repo_name
string
path
string
copies
string
size
string
content
string
license
string
rektide/linux
drivers/input/misc/drv2667.c
547
12638
/* * DRV2667 haptics driver family * * Author: Dan Murphy <dmurphy@ti.com> * * Copyright: (C) 2014 Texas Instruments, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/i2c.h> #include <linux/input.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/regulator/consumer.h> /* Contol registers */ #define DRV2667_STATUS 0x00 #define DRV2667_CTRL_1 0x01 #define DRV2667_CTRL_2 0x02 /* Waveform sequencer */ #define DRV2667_WV_SEQ_0 0x03 #define DRV2667_WV_SEQ_1 0x04 #define DRV2667_WV_SEQ_2 0x05 #define DRV2667_WV_SEQ_3 0x06 #define DRV2667_WV_SEQ_4 0x07 #define DRV2667_WV_SEQ_5 0x08 #define DRV2667_WV_SEQ_6 0x09 #define DRV2667_WV_SEQ_7 0x0A #define DRV2667_FIFO 0x0B #define DRV2667_PAGE 0xFF #define DRV2667_MAX_REG DRV2667_PAGE #define DRV2667_PAGE_0 0x00 #define DRV2667_PAGE_1 0x01 #define DRV2667_PAGE_2 0x02 #define DRV2667_PAGE_3 0x03 #define DRV2667_PAGE_4 0x04 #define DRV2667_PAGE_5 0x05 #define DRV2667_PAGE_6 0x06 #define DRV2667_PAGE_7 0x07 #define DRV2667_PAGE_8 0x08 /* RAM fields */ #define DRV2667_RAM_HDR_SZ 0x0 /* RAM Header addresses */ #define DRV2667_RAM_START_HI 0x01 #define DRV2667_RAM_START_LO 0x02 #define DRV2667_RAM_STOP_HI 0x03 #define DRV2667_RAM_STOP_LO 0x04 #define DRV2667_RAM_REPEAT_CT 0x05 /* RAM data addresses */ #define DRV2667_RAM_AMP 0x06 #define DRV2667_RAM_FREQ 0x07 #define DRV2667_RAM_DURATION 0x08 #define DRV2667_RAM_ENVELOPE 0x09 /* Control 1 Register */ #define DRV2667_25_VPP_GAIN 0x00 #define DRV2667_50_VPP_GAIN 0x01 #define DRV2667_75_VPP_GAIN 0x02 #define DRV2667_100_VPP_GAIN 0x03 #define DRV2667_DIGITAL_IN 0xfc #define DRV2667_ANALOG_IN (1 << 2) /* Control 2 Register */ #define DRV2667_GO (1 << 0) #define DRV2667_STANDBY (1 << 6) #define DRV2667_DEV_RST (1 << 7) /* RAM Envelope settings */ #define DRV2667_NO_ENV 0x00 #define DRV2667_32_MS_ENV 0x01 #define DRV2667_64_MS_ENV 0x02 #define DRV2667_96_MS_ENV 0x03 #define DRV2667_128_MS_ENV 0x04 #define DRV2667_160_MS_ENV 0x05 #define DRV2667_192_MS_ENV 0x06 #define DRV2667_224_MS_ENV 0x07 #define DRV2667_256_MS_ENV 0x08 #define DRV2667_512_MS_ENV 0x09 #define DRV2667_768_MS_ENV 0x0a #define DRV2667_1024_MS_ENV 0x0b #define DRV2667_1280_MS_ENV 0x0c #define DRV2667_1536_MS_ENV 0x0d #define DRV2667_1792_MS_ENV 0x0e #define DRV2667_2048_MS_ENV 0x0f /** * struct drv2667_data - * @input_dev - Pointer to the input device * @client - Pointer to the I2C client * @regmap - Register map of the device * @work - Work item used to off load the enable/disable of the vibration * @regulator - Pointer to the regulator for the IC * @magnitude - Magnitude of the vibration event **/ struct drv2667_data { struct input_dev *input_dev; struct i2c_client *client; struct regmap *regmap; struct work_struct work; struct regulator *regulator; u32 page; u32 magnitude; u32 frequency; }; static struct reg_default drv2667_reg_defs[] = { { DRV2667_STATUS, 0x02 }, { DRV2667_CTRL_1, 0x28 }, { DRV2667_CTRL_2, 0x40 }, { DRV2667_WV_SEQ_0, 0x00 }, { DRV2667_WV_SEQ_1, 0x00 }, { DRV2667_WV_SEQ_2, 0x00 }, { DRV2667_WV_SEQ_3, 0x00 }, { DRV2667_WV_SEQ_4, 0x00 }, { DRV2667_WV_SEQ_5, 0x00 }, { DRV2667_WV_SEQ_6, 0x00 }, { DRV2667_WV_SEQ_7, 0x00 }, { DRV2667_FIFO, 0x00 }, { DRV2667_PAGE, 0x00 }, }; static int drv2667_set_waveform_freq(struct drv2667_data *haptics) { unsigned int read_buf; int freq; int error; /* Per the data sheet: * Sinusoid Frequency (Hz) = 7.8125 x Frequency */ freq = (haptics->frequency * 1000) / 78125; if (freq <= 0) { dev_err(&haptics->client->dev, "ERROR: Frequency calculated to %i\n", freq); return -EINVAL; } error = regmap_read(haptics->regmap, DRV2667_PAGE, &read_buf); if (error) { dev_err(&haptics->client->dev, "Failed to read the page number: %d\n", error); return -EIO; } if (read_buf == DRV2667_PAGE_0 || haptics->page != read_buf) { error = regmap_write(haptics->regmap, DRV2667_PAGE, haptics->page); if (error) { dev_err(&haptics->client->dev, "Failed to set the page: %d\n", error); return -EIO; } } error = regmap_write(haptics->regmap, DRV2667_RAM_FREQ, freq); if (error) dev_err(&haptics->client->dev, "Failed to set the frequency: %d\n", error); /* Reset back to original page */ if (read_buf == DRV2667_PAGE_0 || haptics->page != read_buf) { error = regmap_write(haptics->regmap, DRV2667_PAGE, read_buf); if (error) { dev_err(&haptics->client->dev, "Failed to set the page: %d\n", error); return -EIO; } } return error; } static void drv2667_worker(struct work_struct *work) { struct drv2667_data *haptics = container_of(work, struct drv2667_data, work); int error; if (haptics->magnitude) { error = regmap_write(haptics->regmap, DRV2667_PAGE, haptics->page); if (error) { dev_err(&haptics->client->dev, "Failed to set the page: %d\n", error); return; } error = regmap_write(haptics->regmap, DRV2667_RAM_AMP, haptics->magnitude); if (error) { dev_err(&haptics->client->dev, "Failed to set the amplitude: %d\n", error); return; } error = regmap_write(haptics->regmap, DRV2667_PAGE, DRV2667_PAGE_0); if (error) { dev_err(&haptics->client->dev, "Failed to set the page: %d\n", error); return; } error = regmap_write(haptics->regmap, DRV2667_CTRL_2, DRV2667_GO); if (error) { dev_err(&haptics->client->dev, "Failed to set the GO bit: %d\n", error); } } else { error = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2, DRV2667_GO, 0); if (error) { dev_err(&haptics->client->dev, "Failed to unset the GO bit: %d\n", error); } } } static int drv2667_haptics_play(struct input_dev *input, void *data, struct ff_effect *effect) { struct drv2667_data *haptics = input_get_drvdata(input); if (effect->u.rumble.strong_magnitude > 0) haptics->magnitude = effect->u.rumble.strong_magnitude; else if (effect->u.rumble.weak_magnitude > 0) haptics->magnitude = effect->u.rumble.weak_magnitude; else haptics->magnitude = 0; schedule_work(&haptics->work); return 0; } static void drv2667_close(struct input_dev *input) { struct drv2667_data *haptics = input_get_drvdata(input); int error; cancel_work_sync(&haptics->work); error = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2, DRV2667_STANDBY, 1); if (error) dev_err(&haptics->client->dev, "Failed to enter standby mode: %d\n", error); } static const struct reg_default drv2667_init_regs[] = { { DRV2667_CTRL_2, 0 }, { DRV2667_CTRL_1, DRV2667_25_VPP_GAIN }, { DRV2667_WV_SEQ_0, 1 }, { DRV2667_WV_SEQ_1, 0 } }; static const struct reg_default drv2667_page1_init[] = { { DRV2667_RAM_HDR_SZ, 0x05 }, { DRV2667_RAM_START_HI, 0x80 }, { DRV2667_RAM_START_LO, 0x06 }, { DRV2667_RAM_STOP_HI, 0x00 }, { DRV2667_RAM_STOP_LO, 0x09 }, { DRV2667_RAM_REPEAT_CT, 0 }, { DRV2667_RAM_DURATION, 0x05 }, { DRV2667_RAM_ENVELOPE, DRV2667_NO_ENV }, { DRV2667_RAM_AMP, 0x60 }, }; static int drv2667_init(struct drv2667_data *haptics) { int error; /* Set default haptic frequency to 195Hz on Page 1*/ haptics->frequency = 195; haptics->page = DRV2667_PAGE_1; error = regmap_register_patch(haptics->regmap, drv2667_init_regs, ARRAY_SIZE(drv2667_init_regs)); if (error) { dev_err(&haptics->client->dev, "Failed to write init registers: %d\n", error); return error; } error = regmap_write(haptics->regmap, DRV2667_PAGE, haptics->page); if (error) { dev_err(&haptics->client->dev, "Failed to set page: %d\n", error); goto error_out; } error = drv2667_set_waveform_freq(haptics); if (error) goto error_page; error = regmap_register_patch(haptics->regmap, drv2667_page1_init, ARRAY_SIZE(drv2667_page1_init)); if (error) { dev_err(&haptics->client->dev, "Failed to write page registers: %d\n", error); return error; } error = regmap_write(haptics->regmap, DRV2667_PAGE, DRV2667_PAGE_0); return error; error_page: regmap_write(haptics->regmap, DRV2667_PAGE, DRV2667_PAGE_0); error_out: return error; } static const struct regmap_config drv2667_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = DRV2667_MAX_REG, .reg_defaults = drv2667_reg_defs, .num_reg_defaults = ARRAY_SIZE(drv2667_reg_defs), .cache_type = REGCACHE_NONE, }; static int drv2667_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct drv2667_data *haptics; int error; haptics = devm_kzalloc(&client->dev, sizeof(*haptics), GFP_KERNEL); if (!haptics) return -ENOMEM; haptics->regulator = devm_regulator_get(&client->dev, "vbat"); if (IS_ERR(haptics->regulator)) { error = PTR_ERR(haptics->regulator); dev_err(&client->dev, "unable to get regulator, error: %d\n", error); return error; } haptics->input_dev = devm_input_allocate_device(&client->dev); if (!haptics->input_dev) { dev_err(&client->dev, "Failed to allocate input device\n"); return -ENOMEM; } haptics->input_dev->name = "drv2667:haptics"; haptics->input_dev->dev.parent = client->dev.parent; haptics->input_dev->close = drv2667_close; input_set_drvdata(haptics->input_dev, haptics); input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE); error = input_ff_create_memless(haptics->input_dev, NULL, drv2667_haptics_play); if (error) { dev_err(&client->dev, "input_ff_create() failed: %d\n", error); return error; } INIT_WORK(&haptics->work, drv2667_worker); haptics->client = client; i2c_set_clientdata(client, haptics); haptics->regmap = devm_regmap_init_i2c(client, &drv2667_regmap_config); if (IS_ERR(haptics->regmap)) { error = PTR_ERR(haptics->regmap); dev_err(&client->dev, "Failed to allocate register map: %d\n", error); return error; } error = drv2667_init(haptics); if (error) { dev_err(&client->dev, "Device init failed: %d\n", error); return error; } error = input_register_device(haptics->input_dev); if (error) { dev_err(&client->dev, "couldn't register input device: %d\n", error); return error; } return 0; } static int __maybe_unused drv2667_suspend(struct device *dev) { struct drv2667_data *haptics = dev_get_drvdata(dev); int ret = 0; mutex_lock(&haptics->input_dev->mutex); if (haptics->input_dev->users) { ret = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2, DRV2667_STANDBY, 1); if (ret) { dev_err(dev, "Failed to set standby mode\n"); regulator_disable(haptics->regulator); goto out; } ret = regulator_disable(haptics->regulator); if (ret) { dev_err(dev, "Failed to disable regulator\n"); regmap_update_bits(haptics->regmap, DRV2667_CTRL_2, DRV2667_STANDBY, 0); } } out: mutex_unlock(&haptics->input_dev->mutex); return ret; } static int __maybe_unused drv2667_resume(struct device *dev) { struct drv2667_data *haptics = dev_get_drvdata(dev); int ret = 0; mutex_lock(&haptics->input_dev->mutex); if (haptics->input_dev->users) { ret = regulator_enable(haptics->regulator); if (ret) { dev_err(dev, "Failed to enable regulator\n"); goto out; } ret = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2, DRV2667_STANDBY, 0); if (ret) { dev_err(dev, "Failed to unset standby mode\n"); regulator_disable(haptics->regulator); goto out; } } out: mutex_unlock(&haptics->input_dev->mutex); return ret; } static SIMPLE_DEV_PM_OPS(drv2667_pm_ops, drv2667_suspend, drv2667_resume); static const struct i2c_device_id drv2667_id[] = { { "drv2667", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, drv2667_id); #ifdef CONFIG_OF static const struct of_device_id drv2667_of_match[] = { { .compatible = "ti,drv2667", }, { } }; MODULE_DEVICE_TABLE(of, drv2667_of_match); #endif static struct i2c_driver drv2667_driver = { .probe = drv2667_probe, .driver = { .name = "drv2667-haptics", .owner = THIS_MODULE, .of_match_table = of_match_ptr(drv2667_of_match), .pm = &drv2667_pm_ops, }, .id_table = drv2667_id, }; module_i2c_driver(drv2667_driver); MODULE_DESCRIPTION("TI DRV2667 haptics driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
gpl-2.0
pietrushnic/linux
drivers/mfd/twl-core.c
547
33328
/* * twl_core.c - driver for TWL4030/TWL5030/TWL60X0/TPS659x0 PM * and audio CODEC devices * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * Modifications to defer interrupt handling to a kernel thread: * Copyright (C) 2006 MontaVista Software, Inc. * * Based on tlv320aic23.c: * Copyright (c) by Kai Svahn <kai.svahn@nokia.com> * * Code cleanup and modifications to IRQ handler. * by syed khasim <x0khasim@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/mutex.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/device.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/regulator/machine.h> #include <linux/i2c.h> #include <linux/i2c/twl.h> /* Register descriptions for audio */ #include <linux/mfd/twl4030-audio.h> #include "twl-core.h" /* * The TWL4030 "Triton 2" is one of a family of a multi-function "Power * Management and System Companion Device" chips originally designed for * use in OMAP2 and OMAP 3 based systems. Its control interfaces use I2C, * often at around 3 Mbit/sec, including for interrupt handling. * * This driver core provides genirq support for the interrupts emitted, * by the various modules, and exports register access primitives. * * FIXME this driver currently requires use of the first interrupt line * (and associated registers). */ #define DRIVER_NAME "twl" /* Triton Core internal information (BEGIN) */ /* Base Address defns for twl4030_map[] */ /* subchip/slave 0 - USB ID */ #define TWL4030_BASEADD_USB 0x0000 /* subchip/slave 1 - AUD ID */ #define TWL4030_BASEADD_AUDIO_VOICE 0x0000 #define TWL4030_BASEADD_GPIO 0x0098 #define TWL4030_BASEADD_INTBR 0x0085 #define TWL4030_BASEADD_PIH 0x0080 #define TWL4030_BASEADD_TEST 0x004C /* subchip/slave 2 - AUX ID */ #define TWL4030_BASEADD_INTERRUPTS 0x00B9 #define TWL4030_BASEADD_LED 0x00EE #define TWL4030_BASEADD_MADC 0x0000 #define TWL4030_BASEADD_MAIN_CHARGE 0x0074 #define TWL4030_BASEADD_PRECHARGE 0x00AA #define TWL4030_BASEADD_PWM 0x00F8 #define TWL4030_BASEADD_KEYPAD 0x00D2 #define TWL5031_BASEADD_ACCESSORY 0x0074 /* Replaces Main Charge */ #define TWL5031_BASEADD_INTERRUPTS 0x00B9 /* Different than TWL4030's one */ /* subchip/slave 3 - POWER ID */ #define TWL4030_BASEADD_BACKUP 0x0014 #define TWL4030_BASEADD_INT 0x002E #define TWL4030_BASEADD_PM_MASTER 0x0036 #define TWL4030_BASEADD_PM_RECEIVER 0x005B #define TWL4030_DCDC_GLOBAL_CFG 0x06 #define SMARTREFLEX_ENABLE BIT(3) #define TWL4030_BASEADD_RTC 0x001C #define TWL4030_BASEADD_SECURED_REG 0x0000 /* Triton Core internal information (END) */ /* subchip/slave 0 0x48 - POWER */ #define TWL6030_BASEADD_RTC 0x0000 #define TWL6030_BASEADD_SECURED_REG 0x0017 #define TWL6030_BASEADD_PM_MASTER 0x001F #define TWL6030_BASEADD_PM_SLAVE_MISC 0x0030 /* PM_RECEIVER */ #define TWL6030_BASEADD_PM_MISC 0x00E2 #define TWL6030_BASEADD_PM_PUPD 0x00F0 /* subchip/slave 1 0x49 - FEATURE */ #define TWL6030_BASEADD_USB 0x0000 #define TWL6030_BASEADD_GPADC_CTRL 0x002E #define TWL6030_BASEADD_AUX 0x0090 #define TWL6030_BASEADD_PWM 0x00BA #define TWL6030_BASEADD_GASGAUGE 0x00C0 #define TWL6030_BASEADD_PIH 0x00D0 #define TWL6030_BASEADD_CHARGER 0x00E0 #define TWL6032_BASEADD_CHARGER 0x00DA #define TWL6030_BASEADD_LED 0x00F4 /* subchip/slave 2 0x4A - DFT */ #define TWL6030_BASEADD_DIEID 0x00C0 /* subchip/slave 3 0x4B - AUDIO */ #define TWL6030_BASEADD_AUDIO 0x0000 #define TWL6030_BASEADD_RSV 0x0000 #define TWL6030_BASEADD_ZERO 0x0000 /* Few power values */ #define R_CFG_BOOT 0x05 /* some fields in R_CFG_BOOT */ #define HFCLK_FREQ_19p2_MHZ (1 << 0) #define HFCLK_FREQ_26_MHZ (2 << 0) #define HFCLK_FREQ_38p4_MHZ (3 << 0) #define HIGH_PERF_SQ (1 << 3) #define CK32K_LOWPWR_EN (1 << 7) /*----------------------------------------------------------------------*/ /* Structure for each TWL4030/TWL6030 Slave */ struct twl_client { struct i2c_client *client; struct regmap *regmap; }; /* mapping the module id to slave id and base address */ struct twl_mapping { unsigned char sid; /* Slave ID */ unsigned char base; /* base address */ }; struct twl_private { bool ready; /* The core driver is ready to be used */ u32 twl_idcode; /* TWL IDCODE Register value */ unsigned int twl_id; struct twl_mapping *twl_map; struct twl_client *twl_modules; }; static struct twl_private *twl_priv; static struct twl_mapping twl4030_map[] = { /* * NOTE: don't change this table without updating the * <linux/i2c/twl.h> defines for TWL4030_MODULE_* * so they continue to match the order in this table. */ /* Common IPs */ { 0, TWL4030_BASEADD_USB }, { 1, TWL4030_BASEADD_PIH }, { 2, TWL4030_BASEADD_MAIN_CHARGE }, { 3, TWL4030_BASEADD_PM_MASTER }, { 3, TWL4030_BASEADD_PM_RECEIVER }, { 3, TWL4030_BASEADD_RTC }, { 2, TWL4030_BASEADD_PWM }, { 2, TWL4030_BASEADD_LED }, { 3, TWL4030_BASEADD_SECURED_REG }, /* TWL4030 specific IPs */ { 1, TWL4030_BASEADD_AUDIO_VOICE }, { 1, TWL4030_BASEADD_GPIO }, { 1, TWL4030_BASEADD_INTBR }, { 1, TWL4030_BASEADD_TEST }, { 2, TWL4030_BASEADD_KEYPAD }, { 2, TWL4030_BASEADD_MADC }, { 2, TWL4030_BASEADD_INTERRUPTS }, { 2, TWL4030_BASEADD_PRECHARGE }, { 3, TWL4030_BASEADD_BACKUP }, { 3, TWL4030_BASEADD_INT }, { 2, TWL5031_BASEADD_ACCESSORY }, { 2, TWL5031_BASEADD_INTERRUPTS }, }; static struct reg_default twl4030_49_defaults[] = { /* Audio Registers */ { 0x01, 0x00}, /* CODEC_MODE */ { 0x02, 0x00}, /* OPTION */ /* 0x03 Unused */ { 0x04, 0x00}, /* MICBIAS_CTL */ { 0x05, 0x00}, /* ANAMICL */ { 0x06, 0x00}, /* ANAMICR */ { 0x07, 0x00}, /* AVADC_CTL */ { 0x08, 0x00}, /* ADCMICSEL */ { 0x09, 0x00}, /* DIGMIXING */ { 0x0a, 0x0f}, /* ATXL1PGA */ { 0x0b, 0x0f}, /* ATXR1PGA */ { 0x0c, 0x0f}, /* AVTXL2PGA */ { 0x0d, 0x0f}, /* AVTXR2PGA */ { 0x0e, 0x00}, /* AUDIO_IF */ { 0x0f, 0x00}, /* VOICE_IF */ { 0x10, 0x3f}, /* ARXR1PGA */ { 0x11, 0x3f}, /* ARXL1PGA */ { 0x12, 0x3f}, /* ARXR2PGA */ { 0x13, 0x3f}, /* ARXL2PGA */ { 0x14, 0x25}, /* VRXPGA */ { 0x15, 0x00}, /* VSTPGA */ { 0x16, 0x00}, /* VRX2ARXPGA */ { 0x17, 0x00}, /* AVDAC_CTL */ { 0x18, 0x00}, /* ARX2VTXPGA */ { 0x19, 0x32}, /* ARXL1_APGA_CTL*/ { 0x1a, 0x32}, /* ARXR1_APGA_CTL*/ { 0x1b, 0x32}, /* ARXL2_APGA_CTL*/ { 0x1c, 0x32}, /* ARXR2_APGA_CTL*/ { 0x1d, 0x00}, /* ATX2ARXPGA */ { 0x1e, 0x00}, /* BT_IF */ { 0x1f, 0x55}, /* BTPGA */ { 0x20, 0x00}, /* BTSTPGA */ { 0x21, 0x00}, /* EAR_CTL */ { 0x22, 0x00}, /* HS_SEL */ { 0x23, 0x00}, /* HS_GAIN_SET */ { 0x24, 0x00}, /* HS_POPN_SET */ { 0x25, 0x00}, /* PREDL_CTL */ { 0x26, 0x00}, /* PREDR_CTL */ { 0x27, 0x00}, /* PRECKL_CTL */ { 0x28, 0x00}, /* PRECKR_CTL */ { 0x29, 0x00}, /* HFL_CTL */ { 0x2a, 0x00}, /* HFR_CTL */ { 0x2b, 0x05}, /* ALC_CTL */ { 0x2c, 0x00}, /* ALC_SET1 */ { 0x2d, 0x00}, /* ALC_SET2 */ { 0x2e, 0x00}, /* BOOST_CTL */ { 0x2f, 0x00}, /* SOFTVOL_CTL */ { 0x30, 0x13}, /* DTMF_FREQSEL */ { 0x31, 0x00}, /* DTMF_TONEXT1H */ { 0x32, 0x00}, /* DTMF_TONEXT1L */ { 0x33, 0x00}, /* DTMF_TONEXT2H */ { 0x34, 0x00}, /* DTMF_TONEXT2L */ { 0x35, 0x79}, /* DTMF_TONOFF */ { 0x36, 0x11}, /* DTMF_WANONOFF */ { 0x37, 0x00}, /* I2S_RX_SCRAMBLE_H */ { 0x38, 0x00}, /* I2S_RX_SCRAMBLE_M */ { 0x39, 0x00}, /* I2S_RX_SCRAMBLE_L */ { 0x3a, 0x06}, /* APLL_CTL */ { 0x3b, 0x00}, /* DTMF_CTL */ { 0x3c, 0x44}, /* DTMF_PGA_CTL2 (0x3C) */ { 0x3d, 0x69}, /* DTMF_PGA_CTL1 (0x3D) */ { 0x3e, 0x00}, /* MISC_SET_1 */ { 0x3f, 0x00}, /* PCMBTMUX */ /* 0x40 - 0x42 Unused */ { 0x43, 0x00}, /* RX_PATH_SEL */ { 0x44, 0x32}, /* VDL_APGA_CTL */ { 0x45, 0x00}, /* VIBRA_CTL */ { 0x46, 0x00}, /* VIBRA_SET */ { 0x47, 0x00}, /* VIBRA_PWM_SET */ { 0x48, 0x00}, /* ANAMIC_GAIN */ { 0x49, 0x00}, /* MISC_SET_2 */ /* End of Audio Registers */ }; static bool twl4030_49_nop_reg(struct device *dev, unsigned int reg) { switch (reg) { case 0x00: case 0x03: case 0x40: case 0x41: case 0x42: return false; default: return true; } } static const struct regmap_range twl4030_49_volatile_ranges[] = { regmap_reg_range(TWL4030_BASEADD_TEST, 0xff), }; static const struct regmap_access_table twl4030_49_volatile_table = { .yes_ranges = twl4030_49_volatile_ranges, .n_yes_ranges = ARRAY_SIZE(twl4030_49_volatile_ranges), }; static struct regmap_config twl4030_regmap_config[4] = { { /* Address 0x48 */ .reg_bits = 8, .val_bits = 8, .max_register = 0xff, }, { /* Address 0x49 */ .reg_bits = 8, .val_bits = 8, .max_register = 0xff, .readable_reg = twl4030_49_nop_reg, .writeable_reg = twl4030_49_nop_reg, .volatile_table = &twl4030_49_volatile_table, .reg_defaults = twl4030_49_defaults, .num_reg_defaults = ARRAY_SIZE(twl4030_49_defaults), .cache_type = REGCACHE_RBTREE, }, { /* Address 0x4a */ .reg_bits = 8, .val_bits = 8, .max_register = 0xff, }, { /* Address 0x4b */ .reg_bits = 8, .val_bits = 8, .max_register = 0xff, }, }; static struct twl_mapping twl6030_map[] = { /* * NOTE: don't change this table without updating the * <linux/i2c/twl.h> defines for TWL4030_MODULE_* * so they continue to match the order in this table. */ /* Common IPs */ { 1, TWL6030_BASEADD_USB }, { 1, TWL6030_BASEADD_PIH }, { 1, TWL6030_BASEADD_CHARGER }, { 0, TWL6030_BASEADD_PM_MASTER }, { 0, TWL6030_BASEADD_PM_SLAVE_MISC }, { 0, TWL6030_BASEADD_RTC }, { 1, TWL6030_BASEADD_PWM }, { 1, TWL6030_BASEADD_LED }, { 0, TWL6030_BASEADD_SECURED_REG }, /* TWL6030 specific IPs */ { 0, TWL6030_BASEADD_ZERO }, { 1, TWL6030_BASEADD_ZERO }, { 2, TWL6030_BASEADD_ZERO }, { 1, TWL6030_BASEADD_GPADC_CTRL }, { 1, TWL6030_BASEADD_GASGAUGE }, }; static struct regmap_config twl6030_regmap_config[3] = { { /* Address 0x48 */ .reg_bits = 8, .val_bits = 8, .max_register = 0xff, }, { /* Address 0x49 */ .reg_bits = 8, .val_bits = 8, .max_register = 0xff, }, { /* Address 0x4a */ .reg_bits = 8, .val_bits = 8, .max_register = 0xff, }, }; /*----------------------------------------------------------------------*/ static inline int twl_get_num_slaves(void) { if (twl_class_is_4030()) return 4; /* TWL4030 class have four slave address */ else return 3; /* TWL6030 class have three slave address */ } static inline int twl_get_last_module(void) { if (twl_class_is_4030()) return TWL4030_MODULE_LAST; else return TWL6030_MODULE_LAST; } /* Exported Functions */ unsigned int twl_rev(void) { return twl_priv ? twl_priv->twl_id : 0; } EXPORT_SYMBOL(twl_rev); /** * twl_get_regmap - Get the regmap associated with the given module * @mod_no: module number * * Returns the regmap pointer or NULL in case of failure. */ static struct regmap *twl_get_regmap(u8 mod_no) { int sid; struct twl_client *twl; if (unlikely(!twl_priv || !twl_priv->ready)) { pr_err("%s: not initialized\n", DRIVER_NAME); return NULL; } if (unlikely(mod_no >= twl_get_last_module())) { pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return NULL; } sid = twl_priv->twl_map[mod_no].sid; twl = &twl_priv->twl_modules[sid]; return twl->regmap; } /** * twl_i2c_write - Writes a n bit register in TWL4030/TWL5030/TWL60X0 * @mod_no: module number * @value: an array of num_bytes+1 containing data to write * @reg: register address (just offset will do) * @num_bytes: number of bytes to transfer * * Returns the result of operation - 0 is success */ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) { struct regmap *regmap = twl_get_regmap(mod_no); int ret; if (!regmap) return -EPERM; ret = regmap_bulk_write(regmap, twl_priv->twl_map[mod_no].base + reg, value, num_bytes); if (ret) pr_err("%s: Write failed (mod %d, reg 0x%02x count %d)\n", DRIVER_NAME, mod_no, reg, num_bytes); return ret; } EXPORT_SYMBOL(twl_i2c_write); /** * twl_i2c_read - Reads a n bit register in TWL4030/TWL5030/TWL60X0 * @mod_no: module number * @value: an array of num_bytes containing data to be read * @reg: register address (just offset will do) * @num_bytes: number of bytes to transfer * * Returns result of operation - num_bytes is success else failure. */ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) { struct regmap *regmap = twl_get_regmap(mod_no); int ret; if (!regmap) return -EPERM; ret = regmap_bulk_read(regmap, twl_priv->twl_map[mod_no].base + reg, value, num_bytes); if (ret) pr_err("%s: Read failed (mod %d, reg 0x%02x count %d)\n", DRIVER_NAME, mod_no, reg, num_bytes); return ret; } EXPORT_SYMBOL(twl_i2c_read); /** * twl_regcache_bypass - Configure the regcache bypass for the regmap associated * with the module * @mod_no: module number * @enable: Regcache bypass state * * Returns 0 else failure. */ int twl_set_regcache_bypass(u8 mod_no, bool enable) { struct regmap *regmap = twl_get_regmap(mod_no); if (!regmap) return -EPERM; regcache_cache_bypass(regmap, enable); return 0; } EXPORT_SYMBOL(twl_set_regcache_bypass); /*----------------------------------------------------------------------*/ /** * twl_read_idcode_register - API to read the IDCODE register. * * Unlocks the IDCODE register and read the 32 bit value. */ static int twl_read_idcode_register(void) { int err; err = twl_i2c_write_u8(TWL4030_MODULE_INTBR, TWL_EEPROM_R_UNLOCK, REG_UNLOCK_TEST_REG); if (err) { pr_err("TWL4030 Unable to unlock IDCODE registers -%d\n", err); goto fail; } err = twl_i2c_read(TWL4030_MODULE_INTBR, (u8 *)(&twl_priv->twl_idcode), REG_IDCODE_7_0, 4); if (err) { pr_err("TWL4030: unable to read IDCODE -%d\n", err); goto fail; } err = twl_i2c_write_u8(TWL4030_MODULE_INTBR, 0x0, REG_UNLOCK_TEST_REG); if (err) pr_err("TWL4030 Unable to relock IDCODE registers -%d\n", err); fail: return err; } /** * twl_get_type - API to get TWL Si type. * * Api to get the TWL Si type from IDCODE value. */ int twl_get_type(void) { return TWL_SIL_TYPE(twl_priv->twl_idcode); } EXPORT_SYMBOL_GPL(twl_get_type); /** * twl_get_version - API to get TWL Si version. * * Api to get the TWL Si version from IDCODE value. */ int twl_get_version(void) { return TWL_SIL_REV(twl_priv->twl_idcode); } EXPORT_SYMBOL_GPL(twl_get_version); /** * twl_get_hfclk_rate - API to get TWL external HFCLK clock rate. * * Api to get the TWL HFCLK rate based on BOOT_CFG register. */ int twl_get_hfclk_rate(void) { u8 ctrl; int rate; twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &ctrl, R_CFG_BOOT); switch (ctrl & 0x3) { case HFCLK_FREQ_19p2_MHZ: rate = 19200000; break; case HFCLK_FREQ_26_MHZ: rate = 26000000; break; case HFCLK_FREQ_38p4_MHZ: rate = 38400000; break; default: pr_err("TWL4030: HFCLK is not configured\n"); rate = -EINVAL; break; } return rate; } EXPORT_SYMBOL_GPL(twl_get_hfclk_rate); static struct device * add_numbered_child(unsigned mod_no, const char *name, int num, void *pdata, unsigned pdata_len, bool can_wakeup, int irq0, int irq1) { struct platform_device *pdev; struct twl_client *twl; int status, sid; if (unlikely(mod_no >= twl_get_last_module())) { pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return ERR_PTR(-EPERM); } sid = twl_priv->twl_map[mod_no].sid; twl = &twl_priv->twl_modules[sid]; pdev = platform_device_alloc(name, num); if (!pdev) { dev_dbg(&twl->client->dev, "can't alloc dev\n"); status = -ENOMEM; goto err; } pdev->dev.parent = &twl->client->dev; if (pdata) { status = platform_device_add_data(pdev, pdata, pdata_len); if (status < 0) { dev_dbg(&pdev->dev, "can't add platform_data\n"); goto err; } } if (irq0) { struct resource r[2] = { { .start = irq0, .flags = IORESOURCE_IRQ, }, { .start = irq1, .flags = IORESOURCE_IRQ, }, }; status = platform_device_add_resources(pdev, r, irq1 ? 2 : 1); if (status < 0) { dev_dbg(&pdev->dev, "can't add irqs\n"); goto err; } } status = platform_device_add(pdev); if (status == 0) device_init_wakeup(&pdev->dev, can_wakeup); err: if (status < 0) { platform_device_put(pdev); dev_err(&twl->client->dev, "can't add %s dev\n", name); return ERR_PTR(status); } return &pdev->dev; } static inline struct device *add_child(unsigned mod_no, const char *name, void *pdata, unsigned pdata_len, bool can_wakeup, int irq0, int irq1) { return add_numbered_child(mod_no, name, -1, pdata, pdata_len, can_wakeup, irq0, irq1); } static struct device * add_regulator_linked(int num, struct regulator_init_data *pdata, struct regulator_consumer_supply *consumers, unsigned num_consumers, unsigned long features) { struct twl_regulator_driver_data drv_data; /* regulator framework demands init_data ... */ if (!pdata) return NULL; if (consumers) { pdata->consumer_supplies = consumers; pdata->num_consumer_supplies = num_consumers; } if (pdata->driver_data) { /* If we have existing drv_data, just add the flags */ struct twl_regulator_driver_data *tmp; tmp = pdata->driver_data; tmp->features |= features; } else { /* add new driver data struct, used only during init */ drv_data.features = features; drv_data.set_voltage = NULL; drv_data.get_voltage = NULL; drv_data.data = NULL; pdata->driver_data = &drv_data; } /* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */ return add_numbered_child(TWL_MODULE_PM_MASTER, "twl_reg", num, pdata, sizeof(*pdata), false, 0, 0); } static struct device * add_regulator(int num, struct regulator_init_data *pdata, unsigned long features) { return add_regulator_linked(num, pdata, NULL, 0, features); } /* * NOTE: We know the first 8 IRQs after pdata->base_irq are * for the PIH, and the next are for the PWR_INT SIH, since * that's how twl_init_irq() sets things up. */ static int add_children(struct twl4030_platform_data *pdata, unsigned irq_base, unsigned long features) { struct device *child; if (IS_ENABLED(CONFIG_GPIO_TWL4030) && pdata->gpio) { child = add_child(TWL4030_MODULE_GPIO, "twl4030_gpio", pdata->gpio, sizeof(*pdata->gpio), false, irq_base + GPIO_INTR_OFFSET, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (IS_ENABLED(CONFIG_KEYBOARD_TWL4030) && pdata->keypad) { child = add_child(TWL4030_MODULE_KEYPAD, "twl4030_keypad", pdata->keypad, sizeof(*pdata->keypad), true, irq_base + KEYPAD_INTR_OFFSET, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc && twl_class_is_4030()) { child = add_child(TWL4030_MODULE_MADC, "twl4030_madc", pdata->madc, sizeof(*pdata->madc), true, irq_base + MADC_INTR_OFFSET, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (IS_ENABLED(CONFIG_RTC_DRV_TWL4030)) { /* * REVISIT platform_data here currently might expose the * "msecure" line ... but for now we just expect board * setup to tell the chip "it's always ok to SET_TIME". * Eventually, Linux might become more aware of such * HW security concerns, and "least privilege". */ child = add_child(TWL_MODULE_RTC, "twl_rtc", NULL, 0, true, irq_base + RTC_INTR_OFFSET, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (IS_ENABLED(CONFIG_PWM_TWL)) { child = add_child(TWL_MODULE_PWM, "twl-pwm", NULL, 0, false, 0, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (IS_ENABLED(CONFIG_PWM_TWL_LED)) { child = add_child(TWL_MODULE_LED, "twl-pwmled", NULL, 0, false, 0, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (IS_ENABLED(CONFIG_TWL4030_USB) && pdata->usb && twl_class_is_4030()) { static struct regulator_consumer_supply usb1v5 = { .supply = "usb1v5", }; static struct regulator_consumer_supply usb1v8 = { .supply = "usb1v8", }; static struct regulator_consumer_supply usb3v1[] = { { .supply = "usb3v1" }, { .supply = "bci3v1" }, }; /* First add the regulators so that they can be used by transceiver */ if (IS_ENABLED(CONFIG_REGULATOR_TWL4030)) { /* this is a template that gets copied */ struct regulator_init_data usb_fixed = { .constraints.valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .constraints.valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }; child = add_regulator_linked(TWL4030_REG_VUSB1V5, &usb_fixed, &usb1v5, 1, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator_linked(TWL4030_REG_VUSB1V8, &usb_fixed, &usb1v8, 1, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator_linked(TWL4030_REG_VUSB3V1, &usb_fixed, usb3v1, 2, features); if (IS_ERR(child)) return PTR_ERR(child); } child = add_child(TWL_MODULE_USB, "twl4030_usb", pdata->usb, sizeof(*pdata->usb), true, /* irq0 = USB_PRES, irq1 = USB */ irq_base + USB_PRES_INTR_OFFSET, irq_base + USB_INTR_OFFSET); if (IS_ERR(child)) return PTR_ERR(child); /* we need to connect regulators to this transceiver */ if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && child) { usb1v5.dev_name = dev_name(child); usb1v8.dev_name = dev_name(child); usb3v1[0].dev_name = dev_name(child); } } if (IS_ENABLED(CONFIG_TWL4030_WATCHDOG) && twl_class_is_4030()) { child = add_child(TWL_MODULE_PM_RECEIVER, "twl4030_wdt", NULL, 0, false, 0, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (IS_ENABLED(CONFIG_INPUT_TWL4030_PWRBUTTON) && twl_class_is_4030()) { child = add_child(TWL_MODULE_PM_MASTER, "twl4030_pwrbutton", NULL, 0, true, irq_base + 8 + 0, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (IS_ENABLED(CONFIG_MFD_TWL4030_AUDIO) && pdata->audio && twl_class_is_4030()) { child = add_child(TWL4030_MODULE_AUDIO_VOICE, "twl4030-audio", pdata->audio, sizeof(*pdata->audio), false, 0, 0); if (IS_ERR(child)) return PTR_ERR(child); } /* twl4030 regulators */ if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && twl_class_is_4030()) { child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VIO, pdata->vio, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VDAC, pdata->vdac, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator((features & TWL4030_VAUX2) ? TWL4030_REG_VAUX2_4030 : TWL4030_REG_VAUX2, pdata->vaux2, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig, features); if (IS_ERR(child)) return PTR_ERR(child); } /* maybe add LDOs that are omitted on cost-reduced parts */ if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && !(features & TPS_SUBSET) && twl_class_is_4030()) { child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VSIM, pdata->vsim, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3, features); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4, features); if (IS_ERR(child)) return PTR_ERR(child); } if (IS_ENABLED(CONFIG_CHARGER_TWL4030) && pdata->bci && !(features & (TPS_SUBSET | TWL5031))) { child = add_child(TWL_MODULE_MAIN_CHARGE, "twl4030_bci", pdata->bci, sizeof(*pdata->bci), false, /* irq0 = CHG_PRES, irq1 = BCI */ irq_base + BCI_PRES_INTR_OFFSET, irq_base + BCI_INTR_OFFSET); if (IS_ERR(child)) return PTR_ERR(child); } if (IS_ENABLED(CONFIG_TWL4030_POWER) && pdata->power) { child = add_child(TWL_MODULE_PM_MASTER, "twl4030_power", pdata->power, sizeof(*pdata->power), false, 0, 0); if (IS_ERR(child)) return PTR_ERR(child); } return 0; } /*----------------------------------------------------------------------*/ /* * These three functions initialize the on-chip clock framework, * letting it generate the right frequencies for USB, MADC, and * other purposes. */ static inline int __init protect_pm_master(void) { int e = 0; e = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0, TWL4030_PM_MASTER_PROTECT_KEY); return e; } static inline int __init unprotect_pm_master(void) { int e = 0; e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1, TWL4030_PM_MASTER_PROTECT_KEY); e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2, TWL4030_PM_MASTER_PROTECT_KEY); return e; } static void clocks_init(struct device *dev, struct twl4030_clock_init_data *clock) { int e = 0; struct clk *osc; u32 rate; u8 ctrl = HFCLK_FREQ_26_MHZ; osc = clk_get(dev, "fck"); if (IS_ERR(osc)) { printk(KERN_WARNING "Skipping twl internal clock init and " "using bootloader value (unknown osc rate)\n"); return; } rate = clk_get_rate(osc); clk_put(osc); switch (rate) { case 19200000: ctrl = HFCLK_FREQ_19p2_MHZ; break; case 26000000: ctrl = HFCLK_FREQ_26_MHZ; break; case 38400000: ctrl = HFCLK_FREQ_38p4_MHZ; break; } ctrl |= HIGH_PERF_SQ; if (clock && clock->ck32k_lowpwr_enable) ctrl |= CK32K_LOWPWR_EN; e |= unprotect_pm_master(); /* effect->MADC+USB ck en */ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, ctrl, R_CFG_BOOT); e |= protect_pm_master(); if (e < 0) pr_err("%s: clock init err [%d]\n", DRIVER_NAME, e); } /*----------------------------------------------------------------------*/ static int twl_remove(struct i2c_client *client) { unsigned i, num_slaves; int status; if (twl_class_is_4030()) status = twl4030_exit_irq(); else status = twl6030_exit_irq(); if (status < 0) return status; num_slaves = twl_get_num_slaves(); for (i = 0; i < num_slaves; i++) { struct twl_client *twl = &twl_priv->twl_modules[i]; if (twl->client && twl->client != client) i2c_unregister_device(twl->client); twl->client = NULL; } twl_priv->ready = false; return 0; } static struct of_dev_auxdata twl_auxdata_lookup[] = { OF_DEV_AUXDATA("ti,twl4030-gpio", 0, "twl4030-gpio", NULL), { /* sentinel */ }, }; /* NOTE: This driver only handles a single twl4030/tps659x0 chip */ static int twl_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct twl4030_platform_data *pdata = dev_get_platdata(&client->dev); struct device_node *node = client->dev.of_node; struct platform_device *pdev; struct regmap_config *twl_regmap_config; int irq_base = 0; int status; unsigned i, num_slaves; if (!node && !pdata) { dev_err(&client->dev, "no platform data\n"); return -EINVAL; } if (twl_priv) { dev_dbg(&client->dev, "only one instance of %s allowed\n", DRIVER_NAME); return -EBUSY; } pdev = platform_device_alloc(DRIVER_NAME, -1); if (!pdev) { dev_err(&client->dev, "can't alloc pdev\n"); return -ENOMEM; } status = platform_device_add(pdev); if (status) { platform_device_put(pdev); return status; } if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) { dev_dbg(&client->dev, "can't talk I2C?\n"); status = -EIO; goto free; } twl_priv = devm_kzalloc(&client->dev, sizeof(struct twl_private), GFP_KERNEL); if (!twl_priv) { status = -ENOMEM; goto free; } if ((id->driver_data) & TWL6030_CLASS) { twl_priv->twl_id = TWL6030_CLASS_ID; twl_priv->twl_map = &twl6030_map[0]; /* The charger base address is different in twl6032 */ if ((id->driver_data) & TWL6032_SUBCLASS) twl_priv->twl_map[TWL_MODULE_MAIN_CHARGE].base = TWL6032_BASEADD_CHARGER; twl_regmap_config = twl6030_regmap_config; } else { twl_priv->twl_id = TWL4030_CLASS_ID; twl_priv->twl_map = &twl4030_map[0]; twl_regmap_config = twl4030_regmap_config; } num_slaves = twl_get_num_slaves(); twl_priv->twl_modules = devm_kzalloc(&client->dev, sizeof(struct twl_client) * num_slaves, GFP_KERNEL); if (!twl_priv->twl_modules) { status = -ENOMEM; goto free; } for (i = 0; i < num_slaves; i++) { struct twl_client *twl = &twl_priv->twl_modules[i]; if (i == 0) { twl->client = client; } else { twl->client = i2c_new_dummy(client->adapter, client->addr + i); if (!twl->client) { dev_err(&client->dev, "can't attach client %d\n", i); status = -ENOMEM; goto fail; } } twl->regmap = devm_regmap_init_i2c(twl->client, &twl_regmap_config[i]); if (IS_ERR(twl->regmap)) { status = PTR_ERR(twl->regmap); dev_err(&client->dev, "Failed to allocate regmap %d, err: %d\n", i, status); goto fail; } } twl_priv->ready = true; /* setup clock framework */ clocks_init(&pdev->dev, pdata ? pdata->clock : NULL); /* read TWL IDCODE Register */ if (twl_class_is_4030()) { status = twl_read_idcode_register(); WARN(status < 0, "Error: reading twl_idcode register value\n"); } /* Maybe init the T2 Interrupt subsystem */ if (client->irq) { if (twl_class_is_4030()) { twl4030_init_chip_irq(id->name); irq_base = twl4030_init_irq(&client->dev, client->irq); } else { irq_base = twl6030_init_irq(&client->dev, client->irq); } if (irq_base < 0) { status = irq_base; goto fail; } } /* * Disable TWL4030/TWL5030 I2C Pull-up on I2C1 and I2C4(SR) interface. * Program I2C_SCL_CTRL_PU(bit 0)=0, I2C_SDA_CTRL_PU (bit 2)=0, * SR_I2C_SCL_CTRL_PU(bit 4)=0 and SR_I2C_SDA_CTRL_PU(bit 6)=0. * * Also, always enable SmartReflex bit as that's needed for omaps to * to do anything over I2C4 for voltage scaling even if SmartReflex * is disabled. Without the SmartReflex bit omap sys_clkreq idle * signal will never trigger for retention idle. */ if (twl_class_is_4030()) { u8 temp; twl_i2c_read_u8(TWL4030_MODULE_INTBR, &temp, REG_GPPUPDCTR1); temp &= ~(SR_I2C_SDA_CTRL_PU | SR_I2C_SCL_CTRL_PU | \ I2C_SDA_CTRL_PU | I2C_SCL_CTRL_PU); twl_i2c_write_u8(TWL4030_MODULE_INTBR, temp, REG_GPPUPDCTR1); twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &temp, TWL4030_DCDC_GLOBAL_CFG); temp |= SMARTREFLEX_ENABLE; twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, temp, TWL4030_DCDC_GLOBAL_CFG); } if (node) { if (pdata) twl_auxdata_lookup[0].platform_data = pdata->gpio; status = of_platform_populate(node, NULL, twl_auxdata_lookup, &client->dev); } else { status = add_children(pdata, irq_base, id->driver_data); } fail: if (status < 0) twl_remove(client); free: if (status < 0) platform_device_unregister(pdev); return status; } static const struct i2c_device_id twl_ids[] = { { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ { "twl5030", 0 }, /* T2 updated */ { "twl5031", TWL5031 }, /* TWL5030 updated */ { "tps65950", 0 }, /* catalog version of twl5030 */ { "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */ { "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */ { "tps65921", TPS_SUBSET }, /* fewer LDOs; no codec, no LED and vibrator. Charger in USB module*/ { "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */ { "twl6032", TWL6030_CLASS | TWL6032_SUBCLASS }, /* "Phoenix lite" */ { /* end of list */ }, }; MODULE_DEVICE_TABLE(i2c, twl_ids); /* One Client Driver , 4 Clients */ static struct i2c_driver twl_driver = { .driver.name = DRIVER_NAME, .id_table = twl_ids, .probe = twl_probe, .remove = twl_remove, }; module_i2c_driver(twl_driver); MODULE_AUTHOR("Texas Instruments, Inc."); MODULE_DESCRIPTION("I2C Core interface for TWL"); MODULE_LICENSE("GPL");
gpl-2.0
tjstyle/android_kernel_fih_msm7x30
drivers/scsi/bnx2i/bnx2i_iscsi.c
803
56557
/* * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. * * Copyright (c) 2006 - 2009 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) */ #include <linux/slab.h> #include <scsi/scsi_tcq.h> #include <scsi/libiscsi.h> #include "bnx2i.h" struct scsi_transport_template *bnx2i_scsi_xport_template; struct iscsi_transport bnx2i_iscsi_transport; static struct scsi_host_template bnx2i_host_template; /* * Global endpoint resource info */ static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */ static int bnx2i_adapter_ready(struct bnx2i_hba *hba) { int retval = 0; if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) retval = -EPERM; return retval; } /** * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks * @cmd: iscsi cmd struct pointer * @buf_off: absolute buffer offset * @start_bd_off: u32 pointer to return the offset within the BD * indicated by 'start_bd_idx' on which 'buf_off' falls * @start_bd_idx: index of the BD on which 'buf_off' falls * * identifies & marks various bd info for scsi command's imm data, * unsolicited data and the first solicited data seq. */ static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off, u32 *start_bd_off, u32 *start_bd_idx) { struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl; u32 cur_offset = 0; u32 cur_bd_idx = 0; if (buf_off) { while (buf_off >= (cur_offset + bd_tbl->buffer_length)) { cur_offset += bd_tbl->buffer_length; cur_bd_idx++; bd_tbl++; } } *start_bd_off = buf_off - cur_offset; *start_bd_idx = cur_bd_idx; } /** * bnx2i_setup_write_cmd_bd_info - sets up BD various information * @task: transport layer's cmd struct pointer * * identifies & marks various bd info for scsi command's immediate data, * unsolicited data and first solicited data seq which includes BD start * index & BD buf off. his function takes into account iscsi parameter such * as immediate data and unsolicited data is support on this connection. */ static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task) { struct bnx2i_cmd *cmd = task->dd_data; u32 start_bd_offset; u32 start_bd_idx; u32 buffer_offset = 0; u32 cmd_len = cmd->req.total_data_transfer_length; /* if ImmediateData is turned off & IntialR2T is turned on, * there will be no immediate or unsolicited data, just return. */ if (!iscsi_task_has_unsol_data(task) && !task->imm_count) return; /* Immediate data */ buffer_offset += task->imm_count; if (task->imm_count == cmd_len) return; if (iscsi_task_has_unsol_data(task)) { bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, &start_bd_offset, &start_bd_idx); cmd->req.ud_buffer_offset = start_bd_offset; cmd->req.ud_start_bd_index = start_bd_idx; buffer_offset += task->unsol_r2t.data_length; } if (buffer_offset != cmd_len) { bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, &start_bd_offset, &start_bd_idx); if ((start_bd_offset > task->conn->session->first_burst) || (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) { int i = 0; iscsi_conn_printk(KERN_ALERT, task->conn, "bnx2i- error, buf offset 0x%x " "bd_valid %d use_sg %d\n", buffer_offset, cmd->io_tbl.bd_valid, scsi_sg_count(cmd->scsi_cmd)); for (i = 0; i < cmd->io_tbl.bd_valid; i++) iscsi_conn_printk(KERN_ALERT, task->conn, "bnx2i err, bd[%d]: len %x\n", i, cmd->io_tbl.bd_tbl[i].\ buffer_length); } cmd->req.sd_buffer_offset = start_bd_offset; cmd->req.sd_start_bd_index = start_bd_idx; } } /** * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table * @hba: adapter instance * @cmd: iscsi cmd struct pointer * * map SG list */ static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) { struct scsi_cmnd *sc = cmd->scsi_cmd; struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; struct scatterlist *sg; int byte_count = 0; int bd_count = 0; int sg_count; int sg_len; u64 addr; int i; BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD); sg_count = scsi_dma_map(sc); scsi_for_each_sg(sc, sg, sg_count, i) { sg_len = sg_dma_len(sg); addr = (u64) sg_dma_address(sg); bd[bd_count].buffer_addr_lo = addr & 0xffffffff; bd[bd_count].buffer_addr_hi = addr >> 32; bd[bd_count].buffer_length = sg_len; bd[bd_count].flags = 0; if (bd_count == 0) bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN; byte_count += sg_len; bd_count++; } if (bd_count) bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN; BUG_ON(byte_count != scsi_bufflen(sc)); return bd_count; } /** * bnx2i_iscsi_map_sg_list - maps SG list * @cmd: iscsi cmd struct pointer * * creates BD list table for the command */ static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd) { int bd_count; bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd); if (!bd_count) { struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0; bd[0].buffer_length = bd[0].flags = 0; } cmd->io_tbl.bd_valid = bd_count; } /** * bnx2i_iscsi_unmap_sg_list - unmaps SG list * @cmd: iscsi cmd struct pointer * * unmap IO buffers and invalidate the BD table */ void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd) { struct scsi_cmnd *sc = cmd->scsi_cmd; if (cmd->io_tbl.bd_valid && sc) { scsi_dma_unmap(sc); cmd->io_tbl.bd_valid = 0; } } static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd) { memset(&cmd->req, 0x00, sizeof(cmd->req)); cmd->req.op_code = 0xFF; cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma; cmd->req.bd_list_addr_hi = (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32); } /** * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid' * @hba: pointer to adapter instance * @conn: pointer to iscsi connection * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) * * update iscsi cid table entry with connection pointer. This enables * driver to quickly get hold of connection structure pointer in * completion/interrupt thread using iscsi context ID */ static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba, struct bnx2i_conn *bnx2i_conn, u32 iscsi_cid) { if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) { iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, "conn bind - entry #%d not free\n", iscsi_cid); return -EBUSY; } hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn; return 0; } /** * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr * @hba: pointer to adapter instance * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) */ struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, u16 iscsi_cid) { if (!hba->cid_que.conn_cid_tbl) { printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n"); return NULL; } else if (iscsi_cid >= hba->max_active_conns) { printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid); return NULL; } return hba->cid_que.conn_cid_tbl[iscsi_cid]; } /** * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool * @hba: pointer to adapter instance */ static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba) { int idx; if (!hba->cid_que.cid_free_cnt) return -1; idx = hba->cid_que.cid_q_cons_idx; hba->cid_que.cid_q_cons_idx++; if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx) hba->cid_que.cid_q_cons_idx = 0; hba->cid_que.cid_free_cnt--; return hba->cid_que.cid_que[idx]; } /** * bnx2i_free_iscsi_cid - returns tcp port to free list * @hba: pointer to adapter instance * @iscsi_cid: iscsi context ID to free */ static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid) { int idx; if (iscsi_cid == (u16) -1) return; hba->cid_que.cid_free_cnt++; idx = hba->cid_que.cid_q_prod_idx; hba->cid_que.cid_que[idx] = iscsi_cid; hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL; hba->cid_que.cid_q_prod_idx++; if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx) hba->cid_que.cid_q_prod_idx = 0; } /** * bnx2i_setup_free_cid_que - sets up free iscsi cid queue * @hba: pointer to adapter instance * * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table, * and initialize table attributes */ static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba) { int mem_size; int i; mem_size = hba->max_active_conns * sizeof(u32); mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL); if (!hba->cid_que.cid_que_base) return -ENOMEM; mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *); mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL); if (!hba->cid_que.conn_cid_tbl) { kfree(hba->cid_que.cid_que_base); hba->cid_que.cid_que_base = NULL; return -ENOMEM; } hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base; hba->cid_que.cid_q_prod_idx = 0; hba->cid_que.cid_q_cons_idx = 0; hba->cid_que.cid_q_max_idx = hba->max_active_conns; hba->cid_que.cid_free_cnt = hba->max_active_conns; for (i = 0; i < hba->max_active_conns; i++) { hba->cid_que.cid_que[i] = i; hba->cid_que.conn_cid_tbl[i] = NULL; } return 0; } /** * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources * @hba: pointer to adapter instance */ static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba) { kfree(hba->cid_que.cid_que_base); hba->cid_que.cid_que_base = NULL; kfree(hba->cid_que.conn_cid_tbl); hba->cid_que.conn_cid_tbl = NULL; } /** * bnx2i_alloc_ep - allocates ep structure from global pool * @hba: pointer to adapter instance * * routine allocates a free endpoint structure from global pool and * a tcp port to be used for this connection. Global resource lock, * 'bnx2i_resc_lock' is held while accessing shared global data structures */ static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba) { struct iscsi_endpoint *ep; struct bnx2i_endpoint *bnx2i_ep; ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); if (!ep) { printk(KERN_ERR "bnx2i: Could not allocate ep\n"); return NULL; } bnx2i_ep = ep->dd_data; INIT_LIST_HEAD(&bnx2i_ep->link); bnx2i_ep->state = EP_STATE_IDLE; bnx2i_ep->ep_iscsi_cid = (u16) -1; bnx2i_ep->hba = hba; bnx2i_ep->hba_age = hba->age; hba->ofld_conns_active++; init_waitqueue_head(&bnx2i_ep->ofld_wait); return ep; } /** * bnx2i_free_ep - free endpoint * @ep: pointer to iscsi endpoint structure */ static void bnx2i_free_ep(struct iscsi_endpoint *ep) { struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; unsigned long flags; spin_lock_irqsave(&bnx2i_resc_lock, flags); bnx2i_ep->state = EP_STATE_IDLE; bnx2i_ep->hba->ofld_conns_active--; bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); if (bnx2i_ep->conn) { bnx2i_ep->conn->ep = NULL; bnx2i_ep->conn = NULL; } bnx2i_ep->hba = NULL; spin_unlock_irqrestore(&bnx2i_resc_lock, flags); iscsi_destroy_endpoint(ep); } /** * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command * @hba: adapter instance pointer * @session: iscsi session pointer * @cmd: iscsi command structure */ static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session, struct bnx2i_cmd *cmd) { struct io_bdt *io = &cmd->io_tbl; struct iscsi_bd *bd; io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, ISCSI_MAX_BDS_PER_CMD * sizeof(*bd), &io->bd_tbl_dma, GFP_KERNEL); if (!io->bd_tbl) { iscsi_session_printk(KERN_ERR, session, "Could not " "allocate bdt.\n"); return -ENOMEM; } io->bd_valid = 0; return 0; } /** * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table * @hba: adapter instance pointer * @session: iscsi session pointer * @cmd: iscsi command structure */ static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba, struct iscsi_session *session) { int i; for (i = 0; i < session->cmds_max; i++) { struct iscsi_task *task = session->cmds[i]; struct bnx2i_cmd *cmd = task->dd_data; if (cmd->io_tbl.bd_tbl) dma_free_coherent(&hba->pcidev->dev, ISCSI_MAX_BDS_PER_CMD * sizeof(struct iscsi_bd), cmd->io_tbl.bd_tbl, cmd->io_tbl.bd_tbl_dma); } } /** * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session * @hba: adapter instance pointer * @session: iscsi session pointer */ static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba, struct iscsi_session *session) { int i; for (i = 0; i < session->cmds_max; i++) { struct iscsi_task *task = session->cmds[i]; struct bnx2i_cmd *cmd = task->dd_data; task->hdr = &cmd->hdr; task->hdr_max = sizeof(struct iscsi_hdr); if (bnx2i_alloc_bdt(hba, session, cmd)) goto free_bdts; } return 0; free_bdts: bnx2i_destroy_cmd_pool(hba, session); return -ENOMEM; } /** * bnx2i_setup_mp_bdt - allocate BD table resources * @hba: pointer to adapter structure * * Allocate memory for dummy buffer and associated BD * table to be used by middle path (MP) requests */ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) { int rc = 0; struct iscsi_bd *mp_bdt; u64 addr; hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->mp_bd_dma, GFP_KERNEL); if (!hba->mp_bd_tbl) { printk(KERN_ERR "unable to allocate Middle Path BDT\n"); rc = -1; goto out; } hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->dummy_buf_dma, GFP_KERNEL); if (!hba->dummy_buffer) { printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->mp_bd_tbl, hba->mp_bd_dma); hba->mp_bd_tbl = NULL; rc = -1; goto out; } mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl; addr = (unsigned long) hba->dummy_buf_dma; mp_bdt->buffer_addr_lo = addr & 0xffffffff; mp_bdt->buffer_addr_hi = addr >> 32; mp_bdt->buffer_length = PAGE_SIZE; mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | ISCSI_BD_FIRST_IN_BD_CHAIN; out: return rc; } /** * bnx2i_free_mp_bdt - releases ITT back to free pool * @hba: pointer to adapter instance * * free MP dummy buffer and associated BD table */ static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) { if (hba->mp_bd_tbl) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->mp_bd_tbl, hba->mp_bd_dma); hba->mp_bd_tbl = NULL; } if (hba->dummy_buffer) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->dummy_buffer, hba->dummy_buf_dma); hba->dummy_buffer = NULL; } return; } /** * bnx2i_drop_session - notifies iscsid of connection error. * @hba: adapter instance pointer * @session: iscsi session pointer * * This notifies iscsid that there is a error, so it can initiate * recovery. * * This relies on caller using the iscsi class iterator so the object * is refcounted and does not disapper from under us. */ void bnx2i_drop_session(struct iscsi_cls_session *cls_session) { iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); } /** * bnx2i_ep_destroy_list_add - add an entry to EP destroy list * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport indentifier) structure * * EP destroy queue manager */ static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_add_tail(&ep->link, &hba->ep_destroy_list); write_unlock_bh(&hba->ep_rdwr_lock); return 0; } /** * bnx2i_ep_destroy_list_del - add an entry to EP destroy list * * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport indentifier) structure * * EP destroy queue manager */ static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_del_init(&ep->link); write_unlock_bh(&hba->ep_rdwr_lock); return 0; } /** * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport indentifier) structure * * pending conn offload completion queue manager */ static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_add_tail(&ep->link, &hba->ep_ofld_list); write_unlock_bh(&hba->ep_rdwr_lock); return 0; } /** * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list * @hba: pointer to adapter instance * @ep: pointer to endpoint (transport indentifier) structure * * pending conn offload completion queue manager */ static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { write_lock_bh(&hba->ep_rdwr_lock); list_del_init(&ep->link); write_unlock_bh(&hba->ep_rdwr_lock); return 0; } /** * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints * * @hba: pointer to adapter instance * @iscsi_cid: iscsi context ID to find * */ struct bnx2i_endpoint * bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid) { struct list_head *list; struct list_head *tmp; struct bnx2i_endpoint *ep; read_lock_bh(&hba->ep_rdwr_lock); list_for_each_safe(list, tmp, &hba->ep_ofld_list) { ep = (struct bnx2i_endpoint *)list; if (ep->ep_iscsi_cid == iscsi_cid) break; ep = NULL; } read_unlock_bh(&hba->ep_rdwr_lock); if (!ep) printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); return ep; } /** * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list * @hba: pointer to adapter instance * @iscsi_cid: iscsi context ID to find * */ struct bnx2i_endpoint * bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) { struct list_head *list; struct list_head *tmp; struct bnx2i_endpoint *ep; read_lock_bh(&hba->ep_rdwr_lock); list_for_each_safe(list, tmp, &hba->ep_destroy_list) { ep = (struct bnx2i_endpoint *)list; if (ep->ep_iscsi_cid == iscsi_cid) break; ep = NULL; } read_unlock_bh(&hba->ep_rdwr_lock); if (!ep) printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); return ep; } /** * bnx2i_setup_host_queue_size - assigns shost->can_queue param * @hba: pointer to adapter instance * @shost: scsi host pointer * * Initializes 'can_queue' parameter based on how many outstanding commands * the device can handle. Each device 5708/5709/57710 has different * capabilities */ static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba, struct Scsi_Host *shost) { if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type)) shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709; else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710; else shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; } /** * bnx2i_alloc_hba - allocate and init adapter instance * @cnic: cnic device pointer * * allocate & initialize adapter structure and call other * support routines to do per adapter initialization */ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) { struct Scsi_Host *shost; struct bnx2i_hba *hba; shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0); if (!shost) return NULL; shost->dma_boundary = cnic->pcidev->dma_mask; shost->transportt = bnx2i_scsi_xport_template; shost->max_id = ISCSI_MAX_CONNS_PER_HBA; shost->max_channel = 0; shost->max_lun = 512; shost->max_cmd_len = 16; hba = iscsi_host_priv(shost); hba->shost = shost; hba->netdev = cnic->netdev; /* Get PCI related information and update hba struct members */ hba->pcidev = cnic->pcidev; pci_dev_get(hba->pcidev); hba->pci_did = hba->pcidev->device; hba->pci_vid = hba->pcidev->vendor; hba->pci_sdid = hba->pcidev->subsystem_device; hba->pci_svid = hba->pcidev->subsystem_vendor; hba->pci_func = PCI_FUNC(hba->pcidev->devfn); hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); bnx2i_identify_device(hba); bnx2i_setup_host_queue_size(hba, shost); if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { hba->regview = ioremap_nocache(hba->netdev->base_addr, BNX2_MQ_CONFIG2); if (!hba->regview) goto ioreg_map_err; } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096); if (!hba->regview) goto ioreg_map_err; } if (bnx2i_setup_mp_bdt(hba)) goto mp_bdt_mem_err; INIT_LIST_HEAD(&hba->ep_ofld_list); INIT_LIST_HEAD(&hba->ep_destroy_list); rwlock_init(&hba->ep_rdwr_lock); hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED; /* different values for 5708/5709/57710 */ hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA; if (bnx2i_setup_free_cid_que(hba)) goto cid_que_err; /* SQ/RQ/CQ size can be changed via sysfx interface */ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX) hba->max_sqes = sq_size; else hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT; } else { /* 5706/5708/5709 */ if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX) hba->max_sqes = sq_size; else hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT; } hba->max_rqes = rq_size; hba->max_cqes = hba->max_sqes + rq_size; if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX) hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX; } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX) hba->max_cqes = BNX2I_570X_CQ_WQES_MAX; hba->num_ccell = hba->max_sqes / 2; spin_lock_init(&hba->lock); mutex_init(&hba->net_dev_lock); init_waitqueue_head(&hba->eh_wait); if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) hba->hba_shutdown_tmo = 240 * HZ; else /* 5706/5708/5709 */ hba->hba_shutdown_tmo = 30 * HZ; if (iscsi_host_add(shost, &hba->pcidev->dev)) goto free_dump_mem; return hba; free_dump_mem: bnx2i_release_free_cid_que(hba); cid_que_err: bnx2i_free_mp_bdt(hba); mp_bdt_mem_err: if (hba->regview) { iounmap(hba->regview); hba->regview = NULL; } ioreg_map_err: pci_dev_put(hba->pcidev); scsi_host_put(shost); return NULL; } /** * bnx2i_free_hba- releases hba structure and resources held by the adapter * @hba: pointer to adapter instance * * free adapter structure and call various cleanup routines. */ void bnx2i_free_hba(struct bnx2i_hba *hba) { struct Scsi_Host *shost = hba->shost; iscsi_host_remove(shost); INIT_LIST_HEAD(&hba->ep_ofld_list); INIT_LIST_HEAD(&hba->ep_destroy_list); pci_dev_put(hba->pcidev); if (hba->regview) { iounmap(hba->regview); hba->regview = NULL; } bnx2i_free_mp_bdt(hba); bnx2i_release_free_cid_que(hba); iscsi_host_free(shost); } /** * bnx2i_conn_free_login_resources - free DMA resources used for login process * @hba: pointer to adapter instance * @bnx2i_conn: iscsi connection pointer * * Login related resources, mostly BDT & payload DMA memory is freed */ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, struct bnx2i_conn *bnx2i_conn) { if (bnx2i_conn->gen_pdu.resp_bd_tbl) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, bnx2i_conn->gen_pdu.resp_bd_tbl, bnx2i_conn->gen_pdu.resp_bd_dma); bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; } if (bnx2i_conn->gen_pdu.req_bd_tbl) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, bnx2i_conn->gen_pdu.req_bd_tbl, bnx2i_conn->gen_pdu.req_bd_dma); bnx2i_conn->gen_pdu.req_bd_tbl = NULL; } if (bnx2i_conn->gen_pdu.resp_buf) { dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, bnx2i_conn->gen_pdu.resp_buf, bnx2i_conn->gen_pdu.resp_dma_addr); bnx2i_conn->gen_pdu.resp_buf = NULL; } if (bnx2i_conn->gen_pdu.req_buf) { dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, bnx2i_conn->gen_pdu.req_buf, bnx2i_conn->gen_pdu.req_dma_addr); bnx2i_conn->gen_pdu.req_buf = NULL; } } /** * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop. * @hba: pointer to adapter instance * @bnx2i_conn: iscsi connection pointer * * Mgmt task DNA resources are allocated in this routine. */ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, struct bnx2i_conn *bnx2i_conn) { /* Allocate memory for login request/response buffers */ bnx2i_conn->gen_pdu.req_buf = dma_alloc_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, &bnx2i_conn->gen_pdu.req_dma_addr, GFP_KERNEL); if (bnx2i_conn->gen_pdu.req_buf == NULL) goto login_req_buf_failure; bnx2i_conn->gen_pdu.req_buf_size = 0; bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf; bnx2i_conn->gen_pdu.resp_buf = dma_alloc_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, &bnx2i_conn->gen_pdu.resp_dma_addr, GFP_KERNEL); if (bnx2i_conn->gen_pdu.resp_buf == NULL) goto login_resp_buf_failure; bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; bnx2i_conn->gen_pdu.req_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) goto login_req_bd_tbl_failure; bnx2i_conn->gen_pdu.resp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &bnx2i_conn->gen_pdu.resp_bd_dma, GFP_KERNEL); if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) goto login_resp_bd_tbl_failure; return 0; login_resp_bd_tbl_failure: dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, bnx2i_conn->gen_pdu.req_bd_tbl, bnx2i_conn->gen_pdu.req_bd_dma); bnx2i_conn->gen_pdu.req_bd_tbl = NULL; login_req_bd_tbl_failure: dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, bnx2i_conn->gen_pdu.resp_buf, bnx2i_conn->gen_pdu.resp_dma_addr); bnx2i_conn->gen_pdu.resp_buf = NULL; login_resp_buf_failure: dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, bnx2i_conn->gen_pdu.req_buf, bnx2i_conn->gen_pdu.req_dma_addr); bnx2i_conn->gen_pdu.req_buf = NULL; login_req_buf_failure: iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data, "login resource alloc failed!!\n"); return -ENOMEM; } /** * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table. * @bnx2i_conn: iscsi connection pointer * * Allocates buffers and BD tables before shipping requests to cnic * for PDUs prepared by 'iscsid' daemon */ static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn) { struct iscsi_bd *bd_tbl; bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl; bd_tbl->buffer_addr_hi = (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32); bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr; bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr - bnx2i_conn->gen_pdu.req_buf; bd_tbl->reserved0 = 0; bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | ISCSI_BD_FIRST_IN_BD_CHAIN; bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl; bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32; bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr; bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN; bd_tbl->reserved0 = 0; bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | ISCSI_BD_FIRST_IN_BD_CHAIN; } /** * bnx2i_iscsi_send_generic_request - called to send mgmt tasks. * @task: transport layer task pointer * * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login, * Nop-out and Logout requests flow through this path. */ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) { struct bnx2i_cmd *cmd = task->dd_data; struct bnx2i_conn *bnx2i_conn = cmd->conn; int rc = 0; char *buf; int data_len; bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn); switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { case ISCSI_OP_LOGIN: bnx2i_send_iscsi_login(bnx2i_conn, task); break; case ISCSI_OP_NOOP_OUT: data_len = bnx2i_conn->gen_pdu.req_buf_size; buf = bnx2i_conn->gen_pdu.req_buf; if (data_len) rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, RESERVED_ITT, buf, data_len, 1); else rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, RESERVED_ITT, NULL, 0, 1); break; case ISCSI_OP_LOGOUT: rc = bnx2i_send_iscsi_logout(bnx2i_conn, task); break; case ISCSI_OP_SCSI_TMFUNC: rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task); break; default: iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, "send_gen: unsupported op 0x%x\n", task->hdr->opcode); } return rc; } /********************************************************************** * SCSI-ML Interface **********************************************************************/ /** * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe * @sc: SCSI-ML command pointer * @cmd: iscsi cmd pointer */ static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd) { u32 dword; int lpcnt; u8 *srcp; u32 *dstp; u32 scsi_lun[2]; int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun); cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]); cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]); lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword); srcp = (u8 *) sc->cmnd; dstp = (u32 *) cmd->req.cdb; while (lpcnt--) { memcpy(&dword, (const void *) srcp, 4); *dstp = cpu_to_be32(dword); srcp += 4; dstp++; } if (sc->cmd_len & 0x3) { dword = (u32) srcp[0] | ((u32) srcp[1] << 8); *dstp = cpu_to_be32(dword); } } static void bnx2i_cleanup_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct bnx2i_hba *hba = bnx2i_conn->hba; /* * mgmt task or cmd was never sent to us to transmit. */ if (!task->sc || task->state == ISCSI_TASK_PENDING) return; /* * need to clean-up task context to claim dma buffers */ if (task->state == ISCSI_TASK_ABRT_TMF) { bnx2i_send_cmd_cleanup_req(hba, task->dd_data); spin_unlock_bh(&conn->session->lock); wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); spin_lock_bh(&conn->session->lock); } bnx2i_iscsi_unmap_sg_list(task->dd_data); } /** * bnx2i_mtask_xmit - transmit mtask to chip for further processing * @conn: transport layer conn structure pointer * @task: transport layer command structure pointer */ static int bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) { struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct bnx2i_cmd *cmd = task->dd_data; memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); bnx2i_setup_cmd_wqe_template(cmd); bnx2i_conn->gen_pdu.req_buf_size = task->data_count; if (task->data_count) { memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, task->data_count); bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf + task->data_count; } cmd->conn = conn->dd_data; cmd->scsi_cmd = NULL; return bnx2i_iscsi_send_generic_request(task); } /** * bnx2i_task_xmit - transmit iscsi command to chip for further processing * @task: transport layer command structure pointer * * maps SG buffers and send request to chip/firmware in the form of SQ WQE */ static int bnx2i_task_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); struct bnx2i_hba *hba = iscsi_host_priv(shost); struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct scsi_cmnd *sc = task->sc; struct bnx2i_cmd *cmd = task->dd_data; struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; /* * If there is no scsi_cmnd this must be a mgmt task */ if (!sc) return bnx2i_mtask_xmit(conn, task); bnx2i_setup_cmd_wqe_template(cmd); cmd->req.op_code = ISCSI_OP_SCSI_CMD; cmd->conn = bnx2i_conn; cmd->scsi_cmd = sc; cmd->req.total_data_transfer_length = scsi_bufflen(sc); cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn); bnx2i_iscsi_map_sg_list(cmd); bnx2i_cpy_scsi_cdb(sc, cmd); cmd->req.op_attr = ISCSI_ATTR_SIMPLE; if (sc->sc_data_direction == DMA_TO_DEVICE) { cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE; cmd->req.itt = task->itt | (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); bnx2i_setup_write_cmd_bd_info(task); } else { if (scsi_bufflen(sc)) cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ; cmd->req.itt = task->itt | (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); } cmd->req.num_bds = cmd->io_tbl.bd_valid; if (!cmd->io_tbl.bd_valid) { cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma; cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32); cmd->req.num_bds = 1; } bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd); return 0; } /** * bnx2i_session_create - create a new iscsi session * @cmds_max: max commands supported * @qdepth: scsi queue depth to support * @initial_cmdsn: initial iscsi CMDSN to be used for this session * * Creates a new iSCSI session instance on given device. */ static struct iscsi_cls_session * bnx2i_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, uint16_t qdepth, uint32_t initial_cmdsn) { struct Scsi_Host *shost; struct iscsi_cls_session *cls_session; struct bnx2i_hba *hba; struct bnx2i_endpoint *bnx2i_ep; if (!ep) { printk(KERN_ERR "bnx2i: missing ep.\n"); return NULL; } bnx2i_ep = ep->dd_data; shost = bnx2i_ep->hba->shost; hba = iscsi_host_priv(shost); if (bnx2i_adapter_ready(hba)) return NULL; /* * user can override hw limit as long as it is within * the min/max. */ if (cmds_max > hba->max_sqes) cmds_max = hba->max_sqes; else if (cmds_max < BNX2I_SQ_WQES_MIN) cmds_max = BNX2I_SQ_WQES_MIN; cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost, cmds_max, 0, sizeof(struct bnx2i_cmd), initial_cmdsn, ISCSI_MAX_TARGET); if (!cls_session) return NULL; if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data)) goto session_teardown; return cls_session; session_teardown: iscsi_session_teardown(cls_session); return NULL; } /** * bnx2i_session_destroy - destroys iscsi session * @cls_session: pointer to iscsi cls session * * Destroys previously created iSCSI session instance and releases * all resources held by it */ static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct bnx2i_hba *hba = iscsi_host_priv(shost); bnx2i_destroy_cmd_pool(hba, session); iscsi_session_teardown(cls_session); } /** * bnx2i_conn_create - create iscsi connection instance * @cls_session: pointer to iscsi cls session * @cid: iscsi cid as per rfc (not NX2's CID terminology) * * Creates a new iSCSI connection instance for a given session */ static struct iscsi_cls_conn * bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct bnx2i_hba *hba = iscsi_host_priv(shost); struct bnx2i_conn *bnx2i_conn; struct iscsi_cls_conn *cls_conn; struct iscsi_conn *conn; cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn), cid); if (!cls_conn) return NULL; conn = cls_conn->dd_data; bnx2i_conn = conn->dd_data; bnx2i_conn->cls_conn = cls_conn; bnx2i_conn->hba = hba; /* 'ep' ptr will be assigned in bind() call */ bnx2i_conn->ep = NULL; init_completion(&bnx2i_conn->cmd_cleanup_cmpl); if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) { iscsi_conn_printk(KERN_ALERT, conn, "conn_new: login resc alloc failed!!\n"); goto free_conn; } return cls_conn; free_conn: iscsi_conn_teardown(cls_conn); return NULL; } /** * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together * @cls_session: pointer to iscsi cls session * @cls_conn: pointer to iscsi cls conn * @transport_fd: 64-bit EP handle * @is_leading: leading connection on this session? * * Binds together iSCSI session instance, iSCSI connection instance * and the TCP connection. This routine returns error code if * TCP connection does not belong on the device iSCSI sess/conn * is bound */ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_fd, int is_leading) { struct iscsi_conn *conn = cls_conn->dd_data; struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct bnx2i_hba *hba = iscsi_host_priv(shost); struct bnx2i_endpoint *bnx2i_ep; struct iscsi_endpoint *ep; int ret_code; ep = iscsi_lookup_endpoint(transport_fd); if (!ep) return -EINVAL; bnx2i_ep = ep->dd_data; if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) /* Peer disconnect via' FIN or RST */ return -EINVAL; if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) return -EINVAL; if (bnx2i_ep->hba != hba) { /* Error - TCP connection does not belong to this device */ iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, "conn bind, ep=0x%p (%s) does not", bnx2i_ep, bnx2i_ep->hba->netdev->name); iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, "belong to hba (%s)\n", hba->netdev->name); return -EEXIST; } bnx2i_ep->conn = bnx2i_conn; bnx2i_conn->ep = bnx2i_ep; bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, bnx2i_ep->ep_iscsi_cid); /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710 * driver needs to explicitly replenish RQ index during setup. */ if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) bnx2i_put_rq_buf(bnx2i_conn, 0); bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); return ret_code; } /** * bnx2i_conn_destroy - destroy iscsi connection instance & release resources * @cls_conn: pointer to iscsi cls conn * * Destroy an iSCSI connection instance and release memory resources held by * this connection */ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct bnx2i_conn *bnx2i_conn = conn->dd_data; struct Scsi_Host *shost; struct bnx2i_hba *hba; shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); hba = iscsi_host_priv(shost); bnx2i_conn_free_login_resources(hba, bnx2i_conn); iscsi_conn_teardown(cls_conn); } /** * bnx2i_conn_get_param - return iscsi connection parameter to caller * @cls_conn: pointer to iscsi cls conn * @param: parameter type identifier * @buf: buffer pointer * * returns iSCSI connection parameters */ static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { struct iscsi_conn *conn = cls_conn->dd_data; struct bnx2i_conn *bnx2i_conn = conn->dd_data; int len = 0; switch (param) { case ISCSI_PARAM_CONN_PORT: if (bnx2i_conn->ep) len = sprintf(buf, "%hu\n", bnx2i_conn->ep->cm_sk->dst_port); break; case ISCSI_PARAM_CONN_ADDRESS: if (bnx2i_conn->ep) len = sprintf(buf, "%pI4\n", &bnx2i_conn->ep->cm_sk->dst_ip); break; default: return iscsi_conn_get_param(cls_conn, param, buf); } return len; } /** * bnx2i_host_get_param - returns host (adapter) related parameters * @shost: scsi host pointer * @param: parameter type identifier * @buf: buffer pointer */ static int bnx2i_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct bnx2i_hba *hba = iscsi_host_priv(shost); int len = 0; switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6); break; case ISCSI_HOST_PARAM_NETDEV_NAME: len = sprintf(buf, "%s\n", hba->netdev->name); break; default: return iscsi_host_get_param(shost, param, buf); } return len; } /** * bnx2i_conn_start - completes iscsi connection migration to FFP * @cls_conn: pointer to iscsi cls conn * * last call in FFP migration to handover iscsi conn to the driver */ static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct bnx2i_conn *bnx2i_conn = conn->dd_data; bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START; bnx2i_update_iscsi_conn(conn); /* * this should normally not sleep for a long time so it should * not disrupt the caller. */ bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies; bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer; bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep; add_timer(&bnx2i_conn->ep->ofld_timer); /* update iSCSI context for this conn, wait for CNIC to complete */ wait_event_interruptible(bnx2i_conn->ep->ofld_wait, bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START); if (signal_pending(current)) flush_signals(current); del_timer_sync(&bnx2i_conn->ep->ofld_timer); iscsi_conn_start(cls_conn); return 0; } /** * bnx2i_conn_get_stats - returns iSCSI stats * @cls_conn: pointer to iscsi cls conn * @stats: pointer to iscsi statistic struct */ static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) { struct iscsi_conn *conn = cls_conn->dd_data; stats->txdata_octets = conn->txdata_octets; stats->rxdata_octets = conn->rxdata_octets; stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; stats->dataout_pdus = conn->dataout_pdus_cnt; stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; stats->datain_pdus = conn->datain_pdus_cnt; stats->r2t_pdus = conn->r2t_pdus_cnt; stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; stats->custom_length = 3; strcpy(stats->custom[2].desc, "eh_abort_cnt"); stats->custom[2].value = conn->eh_abort_cnt; stats->digest_err = 0; stats->timeout_err = 0; stats->custom_length = 0; } /** * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices * @dst_addr: target IP address * * check if route resolves to BNX2 device */ static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr) { struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; struct bnx2i_hba *hba; struct cnic_dev *cnic = NULL; bnx2i_reg_dev_all(); hba = get_adapter_list_head(); if (hba && hba->cnic) cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); if (!cnic) { printk(KERN_ALERT "bnx2i: no route," "can't connect using cnic\n"); goto no_nx2_route; } hba = bnx2i_find_hba_for_cnic(cnic); if (!hba) goto no_nx2_route; if (bnx2i_adapter_ready(hba)) { printk(KERN_ALERT "bnx2i: check route, hba not found\n"); goto no_nx2_route; } if (hba->netdev->mtu > hba->mtu_supported) { printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n", hba->netdev->name, hba->netdev->mtu); printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n", hba->mtu_supported); goto no_nx2_route; } return hba; no_nx2_route: return NULL; } /** * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources * @hba: pointer to adapter instance * @ep: endpoint (transport indentifier) structure * * destroys cm_sock structure and on chip iscsi context */ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) { if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) hba->cnic->cm_destroy(ep->cm_sk); if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) ep->state = EP_STATE_DISCONN_COMPL; if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && ep->state == EP_STATE_DISCONN_TIMEDOUT) { printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump," " NW/PCIe trace, driver msgs to developers" " for analysis\n"); return 1; } ep->state = EP_STATE_CLEANUP_START; init_timer(&ep->ofld_timer); ep->ofld_timer.expires = 10*HZ + jiffies; ep->ofld_timer.function = bnx2i_ep_ofld_timer; ep->ofld_timer.data = (unsigned long) ep; add_timer(&ep->ofld_timer); bnx2i_ep_destroy_list_add(hba, ep); /* destroy iSCSI context, wait for it to complete */ bnx2i_send_conn_destroy(hba, ep); wait_event_interruptible(ep->ofld_wait, (ep->state != EP_STATE_CLEANUP_START)); if (signal_pending(current)) flush_signals(current); del_timer_sync(&ep->ofld_timer); bnx2i_ep_destroy_list_del(hba, ep); if (ep->state != EP_STATE_CLEANUP_CMPL) /* should never happen */ printk(KERN_ALERT "bnx2i - conn destroy failed\n"); return 0; } /** * bnx2i_ep_connect - establish TCP connection to target portal * @shost: scsi host * @dst_addr: target IP address * @non_blocking: blocking or non-blocking call * * this routine initiates the TCP/IP connection by invoking Option-2 i/f * with l5_core and the CNIC. This is a multi-step process of resolving * route to target, create a iscsi connection context, handshaking with * CNIC module to create/initialize the socket struct and finally * sending down option-2 request to complete TCP 3-way handshake */ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) { u32 iscsi_cid = BNX2I_CID_RESERVED; struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; struct sockaddr_in6 *desti6; struct bnx2i_endpoint *bnx2i_ep; struct bnx2i_hba *hba; struct cnic_dev *cnic; struct cnic_sockaddr saddr; struct iscsi_endpoint *ep; int rc = 0; if (shost) { /* driver is given scsi host to work with */ hba = iscsi_host_priv(shost); /* Register the device with cnic if not already done so */ bnx2i_register_device(hba); } else /* * check if the given destination can be reached through * a iscsi capable NetXtreme2 device */ hba = bnx2i_check_route(dst_addr); if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) { rc = -EINVAL; goto check_busy; } cnic = hba->cnic; ep = bnx2i_alloc_ep(hba); if (!ep) { rc = -ENOMEM; goto check_busy; } bnx2i_ep = ep->dd_data; mutex_lock(&hba->net_dev_lock); if (bnx2i_adapter_ready(hba)) { rc = -EPERM; goto net_if_down; } bnx2i_ep->num_active_cmds = 0; iscsi_cid = bnx2i_alloc_iscsi_cid(hba); if (iscsi_cid == -1) { printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n"); rc = -ENOMEM; goto iscsi_cid_err; } bnx2i_ep->hba_age = hba->age; rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); if (rc != 0) { printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n"); rc = -ENOMEM; goto qp_resc_err; } bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid; bnx2i_ep->state = EP_STATE_OFLD_START; bnx2i_ep_ofld_list_add(hba, bnx2i_ep); init_timer(&bnx2i_ep->ofld_timer); bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies; bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; add_timer(&bnx2i_ep->ofld_timer); bnx2i_send_conn_ofld_req(hba, bnx2i_ep); /* Wait for CNIC hardware to setup conn context and return 'cid' */ wait_event_interruptible(bnx2i_ep->ofld_wait, bnx2i_ep->state != EP_STATE_OFLD_START); if (signal_pending(current)) flush_signals(current); del_timer_sync(&bnx2i_ep->ofld_timer); bnx2i_ep_ofld_list_del(hba, bnx2i_ep); if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) { rc = -ENOSPC; goto conn_failed; } rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid, iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep); if (rc) { rc = -EINVAL; goto conn_failed; } bnx2i_ep->cm_sk->rcv_buf = 256 * 1024; bnx2i_ep->cm_sk->snd_buf = 256 * 1024; clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags); memset(&saddr, 0, sizeof(saddr)); if (dst_addr->sa_family == AF_INET) { desti = (struct sockaddr_in *) dst_addr; saddr.remote.v4 = *desti; saddr.local.v4.sin_family = desti->sin_family; } else if (dst_addr->sa_family == AF_INET6) { desti6 = (struct sockaddr_in6 *) dst_addr; saddr.remote.v6 = *desti6; saddr.local.v6.sin6_family = desti6->sin6_family; } bnx2i_ep->timestamp = jiffies; bnx2i_ep->state = EP_STATE_CONNECT_START; if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { rc = -EINVAL; goto conn_failed; } else rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr); if (rc) goto release_ep; if (bnx2i_map_ep_dbell_regs(bnx2i_ep)) goto release_ep; mutex_unlock(&hba->net_dev_lock); return ep; release_ep: if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { mutex_unlock(&hba->net_dev_lock); return ERR_PTR(rc); } conn_failed: net_if_down: iscsi_cid_err: bnx2i_free_qp_resc(hba, bnx2i_ep); qp_resc_err: bnx2i_free_ep(ep); mutex_unlock(&hba->net_dev_lock); check_busy: bnx2i_unreg_dev_all(); return ERR_PTR(rc); } /** * bnx2i_ep_poll - polls for TCP connection establishement * @ep: TCP connection (endpoint) handle * @timeout_ms: timeout value in milli secs * * polls for TCP connect request to complete */ static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { struct bnx2i_endpoint *bnx2i_ep; int rc = 0; bnx2i_ep = ep->dd_data; if ((bnx2i_ep->state == EP_STATE_IDLE) || (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) || (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) return -1; if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL) return 1; rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait, ((bnx2i_ep->state == EP_STATE_OFLD_FAILED) || (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) || (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)), msecs_to_jiffies(timeout_ms)); if (bnx2i_ep->state == EP_STATE_OFLD_FAILED) rc = -1; if (rc > 0) return 1; else if (!rc) return 0; /* timeout */ else return rc; } /** * bnx2i_ep_tcp_conn_active - check EP state transition * @ep: endpoint pointer * * check if underlying TCP connection is active */ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) { int ret; int cnic_dev_10g = 0; if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) cnic_dev_10g = 1; switch (bnx2i_ep->state) { case EP_STATE_CONNECT_START: case EP_STATE_CLEANUP_FAILED: case EP_STATE_OFLD_FAILED: case EP_STATE_DISCONN_TIMEDOUT: ret = 0; break; case EP_STATE_CONNECT_COMPL: case EP_STATE_ULP_UPDATE_START: case EP_STATE_ULP_UPDATE_COMPL: case EP_STATE_TCP_FIN_RCVD: case EP_STATE_ULP_UPDATE_FAILED: ret = 1; break; case EP_STATE_TCP_RST_RCVD: ret = 0; break; case EP_STATE_CONNECT_FAILED: if (cnic_dev_10g) ret = 1; else ret = 0; break; default: ret = 0; } return ret; } /** * bnx2i_ep_disconnect - executes TCP connection teardown process * @ep: TCP connection (endpoint) handle * * executes TCP connection teardown process */ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) { struct bnx2i_endpoint *bnx2i_ep; struct bnx2i_conn *bnx2i_conn = NULL; struct iscsi_session *session = NULL; struct iscsi_conn *conn; struct cnic_dev *cnic; struct bnx2i_hba *hba; bnx2i_ep = ep->dd_data; /* driver should not attempt connection cleanup until TCP_CONNECT * completes either successfully or fails. Timeout is 9-secs, so * wait for it to complete */ while ((bnx2i_ep->state == EP_STATE_CONNECT_START) && !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ))) msleep(250); if (bnx2i_ep->conn) { bnx2i_conn = bnx2i_ep->conn; conn = bnx2i_conn->cls_conn->dd_data; session = conn->session; iscsi_suspend_queue(conn); } hba = bnx2i_ep->hba; if (bnx2i_ep->state == EP_STATE_IDLE) goto return_bnx2i_ep; cnic = hba->cnic; mutex_lock(&hba->net_dev_lock); if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) goto free_resc; if (bnx2i_ep->hba_age != hba->age) goto free_resc; if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) goto destory_conn; bnx2i_ep->state = EP_STATE_DISCONN_START; init_timer(&bnx2i_ep->ofld_timer); bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies; bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; add_timer(&bnx2i_ep->ofld_timer); if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { int close = 0; if (session) { spin_lock_bh(&session->lock); if (session->state == ISCSI_STATE_LOGGING_OUT) close = 1; spin_unlock_bh(&session->lock); } if (close) cnic->cm_close(bnx2i_ep->cm_sk); else cnic->cm_abort(bnx2i_ep->cm_sk); } else goto free_resc; /* wait for option-2 conn teardown */ wait_event_interruptible(bnx2i_ep->ofld_wait, bnx2i_ep->state != EP_STATE_DISCONN_START); if (signal_pending(current)) flush_signals(current); del_timer_sync(&bnx2i_ep->ofld_timer); destory_conn: if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { mutex_unlock(&hba->net_dev_lock); return; } free_resc: mutex_unlock(&hba->net_dev_lock); bnx2i_free_qp_resc(hba, bnx2i_ep); return_bnx2i_ep: if (bnx2i_conn) bnx2i_conn->ep = NULL; bnx2i_free_ep(ep); if (!hba->ofld_conns_active) bnx2i_unreg_dev_all(); wake_up_interruptible(&hba->eh_wait); } /** * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler * @buf: pointer to buffer containing iscsi path message * */ static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params) { struct bnx2i_hba *hba = iscsi_host_priv(shost); char *buf = (char *) params; u16 len = sizeof(*params); /* handled by cnic driver */ hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf, len); return 0; } /* * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template * used while registering with the scsi host and iSCSI transport module. */ static struct scsi_host_template bnx2i_host_template = { .module = THIS_MODULE, .name = "Broadcom Offload iSCSI Initiator", .proc_name = "bnx2i", .queuecommand = iscsi_queuecommand, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .change_queue_depth = iscsi_change_queue_depth, .can_queue = 1024, .max_sectors = 127, .cmd_per_lun = 32, .this_id = -1, .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, .shost_attrs = bnx2i_dev_attributes, }; struct iscsi_transport bnx2i_iscsi_transport = { .owner = THIS_MODULE, .name = "bnx2i", .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN | ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T | ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST | ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN | ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL | ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS | ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT | ISCSI_PERSISTENT_ADDRESS | ISCSI_TARGET_NAME | ISCSI_TPGT | ISCSI_USERNAME | ISCSI_PASSWORD | ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | ISCSI_PING_TMO | ISCSI_RECV_TMO | ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME, .create_session = bnx2i_session_create, .destroy_session = bnx2i_session_destroy, .create_conn = bnx2i_conn_create, .bind_conn = bnx2i_conn_bind, .destroy_conn = bnx2i_conn_destroy, .set_param = iscsi_set_param, .get_conn_param = bnx2i_conn_get_param, .get_session_param = iscsi_session_get_param, .get_host_param = bnx2i_host_get_param, .start_conn = bnx2i_conn_start, .stop_conn = iscsi_conn_stop, .send_pdu = iscsi_conn_send_pdu, .xmit_task = bnx2i_task_xmit, .get_stats = bnx2i_conn_get_stats, /* TCP connect - disconnect - option-2 interface calls */ .ep_connect = bnx2i_ep_connect, .ep_poll = bnx2i_ep_poll, .ep_disconnect = bnx2i_ep_disconnect, .set_path = bnx2i_nl_set_path, /* Error recovery timeout call */ .session_recovery_timedout = iscsi_session_recovery_timedout, .cleanup_task = bnx2i_cleanup_task, };
gpl-2.0
drewgaren/linux_samsung_ics
fs/nfs/dns_resolve.c
803
7544
/* * linux/fs/nfs/dns_resolve.c * * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com> * * Resolves DNS hostnames into valid ip addresses */ #include <linux/hash.h> #include <linux/string.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/socket.h> #include <linux/seq_file.h> #include <linux/inet.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/cache.h> #include <linux/sunrpc/svcauth.h> #include "dns_resolve.h" #include "cache_lib.h" #define NFS_DNS_HASHBITS 4 #define NFS_DNS_HASHTBL_SIZE (1 << NFS_DNS_HASHBITS) static struct cache_head *nfs_dns_table[NFS_DNS_HASHTBL_SIZE]; struct nfs_dns_ent { struct cache_head h; char *hostname; size_t namelen; struct sockaddr_storage addr; size_t addrlen; }; static void nfs_dns_ent_update(struct cache_head *cnew, struct cache_head *ckey) { struct nfs_dns_ent *new; struct nfs_dns_ent *key; new = container_of(cnew, struct nfs_dns_ent, h); key = container_of(ckey, struct nfs_dns_ent, h); memcpy(&new->addr, &key->addr, key->addrlen); new->addrlen = key->addrlen; } static void nfs_dns_ent_init(struct cache_head *cnew, struct cache_head *ckey) { struct nfs_dns_ent *new; struct nfs_dns_ent *key; new = container_of(cnew, struct nfs_dns_ent, h); key = container_of(ckey, struct nfs_dns_ent, h); kfree(new->hostname); new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL); if (new->hostname) { new->namelen = key->namelen; nfs_dns_ent_update(cnew, ckey); } else { new->namelen = 0; new->addrlen = 0; } } static void nfs_dns_ent_put(struct kref *ref) { struct nfs_dns_ent *item; item = container_of(ref, struct nfs_dns_ent, h.ref); kfree(item->hostname); kfree(item); } static struct cache_head *nfs_dns_ent_alloc(void) { struct nfs_dns_ent *item = kmalloc(sizeof(*item), GFP_KERNEL); if (item != NULL) { item->hostname = NULL; item->namelen = 0; item->addrlen = 0; return &item->h; } return NULL; }; static unsigned int nfs_dns_hash(const struct nfs_dns_ent *key) { return hash_str(key->hostname, NFS_DNS_HASHBITS); } static void nfs_dns_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, int *blen) { struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h); qword_add(bpp, blen, key->hostname); (*bpp)[-1] = '\n'; } static int nfs_dns_upcall(struct cache_detail *cd, struct cache_head *ch) { struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h); int ret; ret = nfs_cache_upcall(cd, key->hostname); if (ret) ret = sunrpc_cache_pipe_upcall(cd, ch, nfs_dns_request); return ret; } static int nfs_dns_match(struct cache_head *ca, struct cache_head *cb) { struct nfs_dns_ent *a; struct nfs_dns_ent *b; a = container_of(ca, struct nfs_dns_ent, h); b = container_of(cb, struct nfs_dns_ent, h); if (a->namelen == 0 || a->namelen != b->namelen) return 0; return memcmp(a->hostname, b->hostname, a->namelen) == 0; } static int nfs_dns_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct nfs_dns_ent *item; long ttl; if (h == NULL) { seq_puts(m, "# ip address hostname ttl\n"); return 0; } item = container_of(h, struct nfs_dns_ent, h); ttl = (long)item->h.expiry_time - (long)get_seconds(); if (ttl < 0) ttl = 0; if (!test_bit(CACHE_NEGATIVE, &h->flags)) { char buf[INET6_ADDRSTRLEN+IPV6_SCOPE_ID_LEN+1]; rpc_ntop((struct sockaddr *)&item->addr, buf, sizeof(buf)); seq_printf(m, "%15s ", buf); } else seq_puts(m, "<none> "); seq_printf(m, "%15s %ld\n", item->hostname, ttl); return 0; } static struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd, struct nfs_dns_ent *key) { struct cache_head *ch; ch = sunrpc_cache_lookup(cd, &key->h, nfs_dns_hash(key)); if (!ch) return NULL; return container_of(ch, struct nfs_dns_ent, h); } static struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd, struct nfs_dns_ent *new, struct nfs_dns_ent *key) { struct cache_head *ch; ch = sunrpc_cache_update(cd, &new->h, &key->h, nfs_dns_hash(key)); if (!ch) return NULL; return container_of(ch, struct nfs_dns_ent, h); } static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen) { char buf1[NFS_DNS_HOSTNAME_MAXLEN+1]; struct nfs_dns_ent key, *item; unsigned long ttl; ssize_t len; int ret = -EINVAL; if (buf[buflen-1] != '\n') goto out; buf[buflen-1] = '\0'; len = qword_get(&buf, buf1, sizeof(buf1)); if (len <= 0) goto out; key.addrlen = rpc_pton(buf1, len, (struct sockaddr *)&key.addr, sizeof(key.addr)); len = qword_get(&buf, buf1, sizeof(buf1)); if (len <= 0) goto out; key.hostname = buf1; key.namelen = len; memset(&key.h, 0, sizeof(key.h)); ttl = get_expiry(&buf); if (ttl == 0) goto out; key.h.expiry_time = ttl + get_seconds(); ret = -ENOMEM; item = nfs_dns_lookup(cd, &key); if (item == NULL) goto out; if (key.addrlen == 0) set_bit(CACHE_NEGATIVE, &key.h.flags); item = nfs_dns_update(cd, &key, item); if (item == NULL) goto out; ret = 0; cache_put(&item->h, cd); out: return ret; } static struct cache_detail nfs_dns_resolve = { .owner = THIS_MODULE, .hash_size = NFS_DNS_HASHTBL_SIZE, .hash_table = nfs_dns_table, .name = "dns_resolve", .cache_put = nfs_dns_ent_put, .cache_upcall = nfs_dns_upcall, .cache_parse = nfs_dns_parse, .cache_show = nfs_dns_show, .match = nfs_dns_match, .init = nfs_dns_ent_init, .update = nfs_dns_ent_update, .alloc = nfs_dns_ent_alloc, }; static int do_cache_lookup(struct cache_detail *cd, struct nfs_dns_ent *key, struct nfs_dns_ent **item, struct nfs_cache_defer_req *dreq) { int ret = -ENOMEM; *item = nfs_dns_lookup(cd, key); if (*item) { ret = cache_check(cd, &(*item)->h, &dreq->req); if (ret) *item = NULL; } return ret; } static int do_cache_lookup_nowait(struct cache_detail *cd, struct nfs_dns_ent *key, struct nfs_dns_ent **item) { int ret = -ENOMEM; *item = nfs_dns_lookup(cd, key); if (!*item) goto out_err; ret = -ETIMEDOUT; if (!test_bit(CACHE_VALID, &(*item)->h.flags) || (*item)->h.expiry_time < get_seconds() || cd->flush_time > (*item)->h.last_refresh) goto out_put; ret = -ENOENT; if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags)) goto out_put; return 0; out_put: cache_put(&(*item)->h, cd); out_err: *item = NULL; return ret; } static int do_cache_lookup_wait(struct cache_detail *cd, struct nfs_dns_ent *key, struct nfs_dns_ent **item) { struct nfs_cache_defer_req *dreq; int ret = -ENOMEM; dreq = nfs_cache_defer_req_alloc(); if (!dreq) goto out; ret = do_cache_lookup(cd, key, item, dreq); if (ret == -EAGAIN) { ret = nfs_cache_wait_for_upcall(dreq); if (!ret) ret = do_cache_lookup_nowait(cd, key, item); } nfs_cache_defer_req_put(dreq); out: return ret; } ssize_t nfs_dns_resolve_name(char *name, size_t namelen, struct sockaddr *sa, size_t salen) { struct nfs_dns_ent key = { .hostname = name, .namelen = namelen, }; struct nfs_dns_ent *item = NULL; ssize_t ret; ret = do_cache_lookup_wait(&nfs_dns_resolve, &key, &item); if (ret == 0) { if (salen >= item->addrlen) { memcpy(sa, &item->addr, item->addrlen); ret = item->addrlen; } else ret = -EOVERFLOW; cache_put(&item->h, &nfs_dns_resolve); } else if (ret == -ENOENT) ret = -ESRCH; return ret; } int nfs_dns_resolver_init(void) { return nfs_cache_register(&nfs_dns_resolve); } void nfs_dns_resolver_destroy(void) { nfs_cache_unregister(&nfs_dns_resolve); }
gpl-2.0
Split-Screen/android_kernel_oneplus_msm8996
drivers/video/fbdev/pm3fb.c
1059
43352
/* * linux/drivers/video/pm3fb.c -- 3DLabs Permedia3 frame buffer device * * Copyright (C) 2001 Romain Dolbeau <romain@dolbeau.org>. * * Ported to 2.6 kernel on 1 May 2007 by Krzysztof Helt <krzysztof.h1@wp.pl> * based on pm2fb.c * * Based on code written by: * Sven Luther, <luther@dpt-info.u-strasbg.fr> * Alan Hourihane, <alanh@fairlite.demon.co.uk> * Russell King, <rmk@arm.linux.org.uk> * Based on linux/drivers/video/skeletonfb.c: * Copyright (C) 1997 Geert Uytterhoeven * Based on linux/driver/video/pm2fb.c: * Copyright (C) 1998-1999 Ilario Nardinocchi (nardinoc@CS.UniBO.IT) * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/pci.h> #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif #include <video/pm3fb.h> #if !defined(CONFIG_PCI) #error "Only generic PCI cards supported." #endif #undef PM3FB_MASTER_DEBUG #ifdef PM3FB_MASTER_DEBUG #define DPRINTK(a, b...) \ printk(KERN_DEBUG "pm3fb: %s: " a, __func__ , ## b) #else #define DPRINTK(a, b...) #endif #define PM3_PIXMAP_SIZE (2048 * 4) /* * Driver data */ static int hwcursor = 1; static char *mode_option; static bool noaccel; /* mtrr option */ #ifdef CONFIG_MTRR static bool nomtrr; #endif /* * This structure defines the hardware state of the graphics card. Normally * you place this in a header file in linux/include/video. This file usually * also includes register information. That allows other driver subsystems * and userland applications the ability to use the same header file to * avoid duplicate work and easy porting of software. */ struct pm3_par { unsigned char __iomem *v_regs;/* virtual address of p_regs */ u32 video; /* video flags before blanking */ u32 base; /* screen base in 128 bits unit */ u32 palette[16]; int mtrr_handle; }; /* * Here we define the default structs fb_fix_screeninfo and fb_var_screeninfo * if we don't use modedb. If we do use modedb see pm3fb_init how to use it * to get a fb_var_screeninfo. Otherwise define a default var as well. */ static struct fb_fix_screeninfo pm3fb_fix = { .id = "Permedia3", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .xpanstep = 1, .ypanstep = 1, .ywrapstep = 0, .accel = FB_ACCEL_3DLABS_PERMEDIA3, }; /* * Utility functions */ static inline u32 PM3_READ_REG(struct pm3_par *par, s32 off) { return fb_readl(par->v_regs + off); } static inline void PM3_WRITE_REG(struct pm3_par *par, s32 off, u32 v) { fb_writel(v, par->v_regs + off); } static inline void PM3_WAIT(struct pm3_par *par, u32 n) { while (PM3_READ_REG(par, PM3InFIFOSpace) < n) cpu_relax(); } static inline void PM3_WRITE_DAC_REG(struct pm3_par *par, unsigned r, u8 v) { PM3_WAIT(par, 3); PM3_WRITE_REG(par, PM3RD_IndexHigh, (r >> 8) & 0xff); PM3_WRITE_REG(par, PM3RD_IndexLow, r & 0xff); wmb(); PM3_WRITE_REG(par, PM3RD_IndexedData, v); wmb(); } static inline void pm3fb_set_color(struct pm3_par *par, unsigned char regno, unsigned char r, unsigned char g, unsigned char b) { PM3_WAIT(par, 4); PM3_WRITE_REG(par, PM3RD_PaletteWriteAddress, regno); wmb(); PM3_WRITE_REG(par, PM3RD_PaletteData, r); wmb(); PM3_WRITE_REG(par, PM3RD_PaletteData, g); wmb(); PM3_WRITE_REG(par, PM3RD_PaletteData, b); wmb(); } static void pm3fb_clear_colormap(struct pm3_par *par, unsigned char r, unsigned char g, unsigned char b) { int i; for (i = 0; i < 256 ; i++) pm3fb_set_color(par, i, r, g, b); } /* Calculating various clock parameters */ static void pm3fb_calculate_clock(unsigned long reqclock, unsigned char *prescale, unsigned char *feedback, unsigned char *postscale) { int f, pre, post; unsigned long freq; long freqerr = 1000; long currerr; for (f = 1; f < 256; f++) { for (pre = 1; pre < 256; pre++) { for (post = 0; post < 5; post++) { freq = ((2*PM3_REF_CLOCK * f) >> post) / pre; currerr = (reqclock > freq) ? reqclock - freq : freq - reqclock; if (currerr < freqerr) { freqerr = currerr; *feedback = f; *prescale = pre; *postscale = post; } } } } } static inline int pm3fb_depth(const struct fb_var_screeninfo *var) { if (var->bits_per_pixel == 16) return var->red.length + var->green.length + var->blue.length; return var->bits_per_pixel; } static inline int pm3fb_shift_bpp(unsigned bpp, int v) { switch (bpp) { case 8: return (v >> 4); case 16: return (v >> 3); case 32: return (v >> 2); } DPRINTK("Unsupported depth %u\n", bpp); return 0; } /* acceleration */ static int pm3fb_sync(struct fb_info *info) { struct pm3_par *par = info->par; PM3_WAIT(par, 2); PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync); PM3_WRITE_REG(par, PM3Sync, 0); mb(); do { while ((PM3_READ_REG(par, PM3OutFIFOWords)) == 0) cpu_relax(); } while ((PM3_READ_REG(par, PM3OutputFifo)) != PM3Sync_Tag); return 0; } static void pm3fb_init_engine(struct fb_info *info) { struct pm3_par *par = info->par; const u32 width = (info->var.xres_virtual + 7) & ~7; PM3_WAIT(par, 50); PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync); PM3_WRITE_REG(par, PM3StatisticMode, 0x0); PM3_WRITE_REG(par, PM3DeltaMode, 0x0); PM3_WRITE_REG(par, PM3RasterizerMode, 0x0); PM3_WRITE_REG(par, PM3ScissorMode, 0x0); PM3_WRITE_REG(par, PM3LineStippleMode, 0x0); PM3_WRITE_REG(par, PM3AreaStippleMode, 0x0); PM3_WRITE_REG(par, PM3GIDMode, 0x0); PM3_WRITE_REG(par, PM3DepthMode, 0x0); PM3_WRITE_REG(par, PM3StencilMode, 0x0); PM3_WRITE_REG(par, PM3StencilData, 0x0); PM3_WRITE_REG(par, PM3ColorDDAMode, 0x0); PM3_WRITE_REG(par, PM3TextureCoordMode, 0x0); PM3_WRITE_REG(par, PM3TextureIndexMode0, 0x0); PM3_WRITE_REG(par, PM3TextureIndexMode1, 0x0); PM3_WRITE_REG(par, PM3TextureReadMode, 0x0); PM3_WRITE_REG(par, PM3LUTMode, 0x0); PM3_WRITE_REG(par, PM3TextureFilterMode, 0x0); PM3_WRITE_REG(par, PM3TextureCompositeMode, 0x0); PM3_WRITE_REG(par, PM3TextureApplicationMode, 0x0); PM3_WRITE_REG(par, PM3TextureCompositeColorMode1, 0x0); PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode1, 0x0); PM3_WRITE_REG(par, PM3TextureCompositeColorMode0, 0x0); PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode0, 0x0); PM3_WRITE_REG(par, PM3FogMode, 0x0); PM3_WRITE_REG(par, PM3ChromaTestMode, 0x0); PM3_WRITE_REG(par, PM3AlphaTestMode, 0x0); PM3_WRITE_REG(par, PM3AntialiasMode, 0x0); PM3_WRITE_REG(par, PM3YUVMode, 0x0); PM3_WRITE_REG(par, PM3AlphaBlendColorMode, 0x0); PM3_WRITE_REG(par, PM3AlphaBlendAlphaMode, 0x0); PM3_WRITE_REG(par, PM3DitherMode, 0x0); PM3_WRITE_REG(par, PM3LogicalOpMode, 0x0); PM3_WRITE_REG(par, PM3RouterMode, 0x0); PM3_WRITE_REG(par, PM3Window, 0x0); PM3_WRITE_REG(par, PM3Config2D, 0x0); PM3_WRITE_REG(par, PM3SpanColorMask, 0xffffffff); PM3_WRITE_REG(par, PM3XBias, 0x0); PM3_WRITE_REG(par, PM3YBias, 0x0); PM3_WRITE_REG(par, PM3DeltaControl, 0x0); PM3_WRITE_REG(par, PM3BitMaskPattern, 0xffffffff); PM3_WRITE_REG(par, PM3FBDestReadEnables, PM3FBDestReadEnables_E(0xff) | PM3FBDestReadEnables_R(0xff) | PM3FBDestReadEnables_ReferenceAlpha(0xff)); PM3_WRITE_REG(par, PM3FBDestReadBufferAddr0, 0x0); PM3_WRITE_REG(par, PM3FBDestReadBufferOffset0, 0x0); PM3_WRITE_REG(par, PM3FBDestReadBufferWidth0, PM3FBDestReadBufferWidth_Width(width)); PM3_WRITE_REG(par, PM3FBDestReadMode, PM3FBDestReadMode_ReadEnable | PM3FBDestReadMode_Enable0); PM3_WRITE_REG(par, PM3FBSourceReadBufferAddr, 0x0); PM3_WRITE_REG(par, PM3FBSourceReadBufferOffset, 0x0); PM3_WRITE_REG(par, PM3FBSourceReadBufferWidth, PM3FBSourceReadBufferWidth_Width(width)); PM3_WRITE_REG(par, PM3FBSourceReadMode, PM3FBSourceReadMode_Blocking | PM3FBSourceReadMode_ReadEnable); PM3_WAIT(par, 2); { /* invert bits in bitmask */ unsigned long rm = 1 | (3 << 7); switch (info->var.bits_per_pixel) { case 8: PM3_WRITE_REG(par, PM3PixelSize, PM3PixelSize_GLOBAL_8BIT); #ifdef __BIG_ENDIAN rm |= 3 << 15; #endif break; case 16: PM3_WRITE_REG(par, PM3PixelSize, PM3PixelSize_GLOBAL_16BIT); #ifdef __BIG_ENDIAN rm |= 2 << 15; #endif break; case 32: PM3_WRITE_REG(par, PM3PixelSize, PM3PixelSize_GLOBAL_32BIT); break; default: DPRINTK(1, "Unsupported depth %d\n", info->var.bits_per_pixel); break; } PM3_WRITE_REG(par, PM3RasterizerMode, rm); } PM3_WAIT(par, 20); PM3_WRITE_REG(par, PM3FBSoftwareWriteMask, 0xffffffff); PM3_WRITE_REG(par, PM3FBHardwareWriteMask, 0xffffffff); PM3_WRITE_REG(par, PM3FBWriteMode, PM3FBWriteMode_WriteEnable | PM3FBWriteMode_OpaqueSpan | PM3FBWriteMode_Enable0); PM3_WRITE_REG(par, PM3FBWriteBufferAddr0, 0x0); PM3_WRITE_REG(par, PM3FBWriteBufferOffset0, 0x0); PM3_WRITE_REG(par, PM3FBWriteBufferWidth0, PM3FBWriteBufferWidth_Width(width)); PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 0x0); { /* size in lines of FB */ unsigned long sofb = info->screen_size / info->fix.line_length; if (sofb > 4095) PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 4095); else PM3_WRITE_REG(par, PM3SizeOfFramebuffer, sofb); switch (info->var.bits_per_pixel) { case 8: PM3_WRITE_REG(par, PM3DitherMode, (1 << 10) | (2 << 3)); break; case 16: PM3_WRITE_REG(par, PM3DitherMode, (1 << 10) | (1 << 3)); break; case 32: PM3_WRITE_REG(par, PM3DitherMode, (1 << 10) | (0 << 3)); break; default: DPRINTK(1, "Unsupported depth %d\n", info->current_par->depth); break; } } PM3_WRITE_REG(par, PM3dXDom, 0x0); PM3_WRITE_REG(par, PM3dXSub, 0x0); PM3_WRITE_REG(par, PM3dY, 1 << 16); PM3_WRITE_REG(par, PM3StartXDom, 0x0); PM3_WRITE_REG(par, PM3StartXSub, 0x0); PM3_WRITE_REG(par, PM3StartY, 0x0); PM3_WRITE_REG(par, PM3Count, 0x0); /* Disable LocalBuffer. better safe than sorry */ PM3_WRITE_REG(par, PM3LBDestReadMode, 0x0); PM3_WRITE_REG(par, PM3LBDestReadEnables, 0x0); PM3_WRITE_REG(par, PM3LBSourceReadMode, 0x0); PM3_WRITE_REG(par, PM3LBWriteMode, 0x0); pm3fb_sync(info); } static void pm3fb_fillrect(struct fb_info *info, const struct fb_fillrect *region) { struct pm3_par *par = info->par; struct fb_fillrect modded; int vxres, vyres; int rop; u32 color = (info->fix.visual == FB_VISUAL_TRUECOLOR) ? ((u32 *)info->pseudo_palette)[region->color] : region->color; if (info->state != FBINFO_STATE_RUNNING) return; if (info->flags & FBINFO_HWACCEL_DISABLED) { cfb_fillrect(info, region); return; } if (region->rop == ROP_COPY ) rop = PM3Config2D_ForegroundROP(0x3); /* GXcopy */ else rop = PM3Config2D_ForegroundROP(0x6) | /* GXxor */ PM3Config2D_FBDestReadEnable; vxres = info->var.xres_virtual; vyres = info->var.yres_virtual; memcpy(&modded, region, sizeof(struct fb_fillrect)); if (!modded.width || !modded.height || modded.dx >= vxres || modded.dy >= vyres) return; if (modded.dx + modded.width > vxres) modded.width = vxres - modded.dx; if (modded.dy + modded.height > vyres) modded.height = vyres - modded.dy; if (info->var.bits_per_pixel == 8) color |= color << 8; if (info->var.bits_per_pixel <= 16) color |= color << 16; PM3_WAIT(par, 4); /* ROP Ox3 is GXcopy */ PM3_WRITE_REG(par, PM3Config2D, PM3Config2D_UseConstantSource | PM3Config2D_ForegroundROPEnable | rop | PM3Config2D_FBWriteEnable); PM3_WRITE_REG(par, PM3ForegroundColor, color); PM3_WRITE_REG(par, PM3RectanglePosition, PM3RectanglePosition_XOffset(modded.dx) | PM3RectanglePosition_YOffset(modded.dy)); PM3_WRITE_REG(par, PM3Render2D, PM3Render2D_XPositive | PM3Render2D_YPositive | PM3Render2D_Operation_Normal | PM3Render2D_SpanOperation | PM3Render2D_Width(modded.width) | PM3Render2D_Height(modded.height)); } static void pm3fb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct pm3_par *par = info->par; struct fb_copyarea modded; u32 vxres, vyres; int x_align, o_x, o_y; if (info->state != FBINFO_STATE_RUNNING) return; if (info->flags & FBINFO_HWACCEL_DISABLED) { cfb_copyarea(info, area); return; } memcpy(&modded, area, sizeof(struct fb_copyarea)); vxres = info->var.xres_virtual; vyres = info->var.yres_virtual; if (!modded.width || !modded.height || modded.sx >= vxres || modded.sy >= vyres || modded.dx >= vxres || modded.dy >= vyres) return; if (modded.sx + modded.width > vxres) modded.width = vxres - modded.sx; if (modded.dx + modded.width > vxres) modded.width = vxres - modded.dx; if (modded.sy + modded.height > vyres) modded.height = vyres - modded.sy; if (modded.dy + modded.height > vyres) modded.height = vyres - modded.dy; o_x = modded.sx - modded.dx; /*(sx > dx ) ? (sx - dx) : (dx - sx); */ o_y = modded.sy - modded.dy; /*(sy > dy ) ? (sy - dy) : (dy - sy); */ x_align = (modded.sx & 0x1f); PM3_WAIT(par, 6); PM3_WRITE_REG(par, PM3Config2D, PM3Config2D_UserScissorEnable | PM3Config2D_ForegroundROPEnable | PM3Config2D_Blocking | PM3Config2D_ForegroundROP(0x3) | /* Ox3 is GXcopy */ PM3Config2D_FBWriteEnable); PM3_WRITE_REG(par, PM3ScissorMinXY, ((modded.dy & 0x0fff) << 16) | (modded.dx & 0x0fff)); PM3_WRITE_REG(par, PM3ScissorMaxXY, (((modded.dy + modded.height) & 0x0fff) << 16) | ((modded.dx + modded.width) & 0x0fff)); PM3_WRITE_REG(par, PM3FBSourceReadBufferOffset, PM3FBSourceReadBufferOffset_XOffset(o_x) | PM3FBSourceReadBufferOffset_YOffset(o_y)); PM3_WRITE_REG(par, PM3RectanglePosition, PM3RectanglePosition_XOffset(modded.dx - x_align) | PM3RectanglePosition_YOffset(modded.dy)); PM3_WRITE_REG(par, PM3Render2D, ((modded.sx > modded.dx) ? PM3Render2D_XPositive : 0) | ((modded.sy > modded.dy) ? PM3Render2D_YPositive : 0) | PM3Render2D_Operation_Normal | PM3Render2D_SpanOperation | PM3Render2D_FBSourceReadEnable | PM3Render2D_Width(modded.width + x_align) | PM3Render2D_Height(modded.height)); } static void pm3fb_imageblit(struct fb_info *info, const struct fb_image *image) { struct pm3_par *par = info->par; u32 height = image->height; u32 fgx, bgx; const u32 *src = (const u32 *)image->data; if (info->state != FBINFO_STATE_RUNNING) return; if (info->flags & FBINFO_HWACCEL_DISABLED) { cfb_imageblit(info, image); return; } switch (info->fix.visual) { case FB_VISUAL_PSEUDOCOLOR: fgx = image->fg_color; bgx = image->bg_color; break; case FB_VISUAL_TRUECOLOR: default: fgx = par->palette[image->fg_color]; bgx = par->palette[image->bg_color]; break; } if (image->depth != 1) { cfb_imageblit(info, image); return; } if (info->var.bits_per_pixel == 8) { fgx |= fgx << 8; bgx |= bgx << 8; } if (info->var.bits_per_pixel <= 16) { fgx |= fgx << 16; bgx |= bgx << 16; } PM3_WAIT(par, 7); PM3_WRITE_REG(par, PM3ForegroundColor, fgx); PM3_WRITE_REG(par, PM3BackgroundColor, bgx); /* ROP Ox3 is GXcopy */ PM3_WRITE_REG(par, PM3Config2D, PM3Config2D_UserScissorEnable | PM3Config2D_UseConstantSource | PM3Config2D_ForegroundROPEnable | PM3Config2D_ForegroundROP(0x3) | PM3Config2D_OpaqueSpan | PM3Config2D_FBWriteEnable); PM3_WRITE_REG(par, PM3ScissorMinXY, ((image->dy & 0x0fff) << 16) | (image->dx & 0x0fff)); PM3_WRITE_REG(par, PM3ScissorMaxXY, (((image->dy + image->height) & 0x0fff) << 16) | ((image->dx + image->width) & 0x0fff)); PM3_WRITE_REG(par, PM3RectanglePosition, PM3RectanglePosition_XOffset(image->dx) | PM3RectanglePosition_YOffset(image->dy)); PM3_WRITE_REG(par, PM3Render2D, PM3Render2D_XPositive | PM3Render2D_YPositive | PM3Render2D_Operation_SyncOnBitMask | PM3Render2D_SpanOperation | PM3Render2D_Width(image->width) | PM3Render2D_Height(image->height)); while (height--) { int width = ((image->width + 7) >> 3) + info->pixmap.scan_align - 1; width >>= 2; while (width >= PM3_FIFO_SIZE) { int i = PM3_FIFO_SIZE - 1; PM3_WAIT(par, PM3_FIFO_SIZE); while (i--) { PM3_WRITE_REG(par, PM3BitMaskPattern, *src); src++; } width -= PM3_FIFO_SIZE - 1; } PM3_WAIT(par, width + 1); while (width--) { PM3_WRITE_REG(par, PM3BitMaskPattern, *src); src++; } } } /* end of acceleration functions */ /* * Hardware Cursor support. */ static const u8 cursor_bits_lookup[16] = { 0x00, 0x40, 0x10, 0x50, 0x04, 0x44, 0x14, 0x54, 0x01, 0x41, 0x11, 0x51, 0x05, 0x45, 0x15, 0x55 }; static int pm3fb_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct pm3_par *par = info->par; u8 mode; if (!hwcursor) return -EINVAL; /* just to force soft_cursor() call */ /* Too large of a cursor or wrong bpp :-( */ if (cursor->image.width > 64 || cursor->image.height > 64 || cursor->image.depth > 1) return -EINVAL; mode = PM3RD_CursorMode_TYPE_X; if (cursor->enable) mode |= PM3RD_CursorMode_CURSOR_ENABLE; PM3_WRITE_DAC_REG(par, PM3RD_CursorMode, mode); /* * If the cursor is not be changed this means either we want the * current cursor state (if enable is set) or we want to query what * we can do with the cursor (if enable is not set) */ if (!cursor->set) return 0; if (cursor->set & FB_CUR_SETPOS) { int x = cursor->image.dx - info->var.xoffset; int y = cursor->image.dy - info->var.yoffset; PM3_WRITE_DAC_REG(par, PM3RD_CursorXLow, x & 0xff); PM3_WRITE_DAC_REG(par, PM3RD_CursorXHigh, (x >> 8) & 0xf); PM3_WRITE_DAC_REG(par, PM3RD_CursorYLow, y & 0xff); PM3_WRITE_DAC_REG(par, PM3RD_CursorYHigh, (y >> 8) & 0xf); } if (cursor->set & FB_CUR_SETHOT) { PM3_WRITE_DAC_REG(par, PM3RD_CursorHotSpotX, cursor->hot.x & 0x3f); PM3_WRITE_DAC_REG(par, PM3RD_CursorHotSpotY, cursor->hot.y & 0x3f); } if (cursor->set & FB_CUR_SETCMAP) { u32 fg_idx = cursor->image.fg_color; u32 bg_idx = cursor->image.bg_color; struct fb_cmap cmap = info->cmap; /* the X11 driver says one should use these color registers */ PM3_WRITE_DAC_REG(par, PM3RD_CursorPalette(39), cmap.red[fg_idx] >> 8 ); PM3_WRITE_DAC_REG(par, PM3RD_CursorPalette(40), cmap.green[fg_idx] >> 8 ); PM3_WRITE_DAC_REG(par, PM3RD_CursorPalette(41), cmap.blue[fg_idx] >> 8 ); PM3_WRITE_DAC_REG(par, PM3RD_CursorPalette(42), cmap.red[bg_idx] >> 8 ); PM3_WRITE_DAC_REG(par, PM3RD_CursorPalette(43), cmap.green[bg_idx] >> 8 ); PM3_WRITE_DAC_REG(par, PM3RD_CursorPalette(44), cmap.blue[bg_idx] >> 8 ); } if (cursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) { u8 *bitmap = (u8 *)cursor->image.data; u8 *mask = (u8 *)cursor->mask; int i; int pos = PM3RD_CursorPattern(0); for (i = 0; i < cursor->image.height; i++) { int j = (cursor->image.width + 7) >> 3; int k = 8 - j; for (; j > 0; j--) { u8 data = *bitmap ^ *mask; if (cursor->rop == ROP_COPY) data = *mask & *bitmap; /* Upper 4 bits of bitmap data */ PM3_WRITE_DAC_REG(par, pos++, cursor_bits_lookup[data >> 4] | (cursor_bits_lookup[*mask >> 4] << 1)); /* Lower 4 bits of bitmap */ PM3_WRITE_DAC_REG(par, pos++, cursor_bits_lookup[data & 0xf] | (cursor_bits_lookup[*mask & 0xf] << 1)); bitmap++; mask++; } for (; k > 0; k--) { PM3_WRITE_DAC_REG(par, pos++, 0); PM3_WRITE_DAC_REG(par, pos++, 0); } } while (pos < PM3RD_CursorPattern(1024)) PM3_WRITE_DAC_REG(par, pos++, 0); } return 0; } /* write the mode to registers */ static void pm3fb_write_mode(struct fb_info *info) { struct pm3_par *par = info->par; char tempsync = 0x00; char tempmisc = 0x00; const u32 hsstart = info->var.right_margin; const u32 hsend = hsstart + info->var.hsync_len; const u32 hbend = hsend + info->var.left_margin; const u32 xres = (info->var.xres + 31) & ~31; const u32 htotal = xres + hbend; const u32 vsstart = info->var.lower_margin; const u32 vsend = vsstart + info->var.vsync_len; const u32 vbend = vsend + info->var.upper_margin; const u32 vtotal = info->var.yres + vbend; const u32 width = (info->var.xres_virtual + 7) & ~7; const unsigned bpp = info->var.bits_per_pixel; PM3_WAIT(par, 20); PM3_WRITE_REG(par, PM3MemBypassWriteMask, 0xffffffff); PM3_WRITE_REG(par, PM3Aperture0, 0x00000000); PM3_WRITE_REG(par, PM3Aperture1, 0x00000000); PM3_WRITE_REG(par, PM3FIFODis, 0x00000007); PM3_WRITE_REG(par, PM3HTotal, pm3fb_shift_bpp(bpp, htotal - 1)); PM3_WRITE_REG(par, PM3HsEnd, pm3fb_shift_bpp(bpp, hsend)); PM3_WRITE_REG(par, PM3HsStart, pm3fb_shift_bpp(bpp, hsstart)); PM3_WRITE_REG(par, PM3HbEnd, pm3fb_shift_bpp(bpp, hbend)); PM3_WRITE_REG(par, PM3HgEnd, pm3fb_shift_bpp(bpp, hbend)); PM3_WRITE_REG(par, PM3ScreenStride, pm3fb_shift_bpp(bpp, width)); PM3_WRITE_REG(par, PM3VTotal, vtotal - 1); PM3_WRITE_REG(par, PM3VsEnd, vsend - 1); PM3_WRITE_REG(par, PM3VsStart, vsstart - 1); PM3_WRITE_REG(par, PM3VbEnd, vbend); switch (bpp) { case 8: PM3_WRITE_REG(par, PM3ByAperture1Mode, PM3ByApertureMode_PIXELSIZE_8BIT); PM3_WRITE_REG(par, PM3ByAperture2Mode, PM3ByApertureMode_PIXELSIZE_8BIT); break; case 16: #ifndef __BIG_ENDIAN PM3_WRITE_REG(par, PM3ByAperture1Mode, PM3ByApertureMode_PIXELSIZE_16BIT); PM3_WRITE_REG(par, PM3ByAperture2Mode, PM3ByApertureMode_PIXELSIZE_16BIT); #else PM3_WRITE_REG(par, PM3ByAperture1Mode, PM3ByApertureMode_PIXELSIZE_16BIT | PM3ByApertureMode_BYTESWAP_BADC); PM3_WRITE_REG(par, PM3ByAperture2Mode, PM3ByApertureMode_PIXELSIZE_16BIT | PM3ByApertureMode_BYTESWAP_BADC); #endif /* ! __BIG_ENDIAN */ break; case 32: #ifndef __BIG_ENDIAN PM3_WRITE_REG(par, PM3ByAperture1Mode, PM3ByApertureMode_PIXELSIZE_32BIT); PM3_WRITE_REG(par, PM3ByAperture2Mode, PM3ByApertureMode_PIXELSIZE_32BIT); #else PM3_WRITE_REG(par, PM3ByAperture1Mode, PM3ByApertureMode_PIXELSIZE_32BIT | PM3ByApertureMode_BYTESWAP_DCBA); PM3_WRITE_REG(par, PM3ByAperture2Mode, PM3ByApertureMode_PIXELSIZE_32BIT | PM3ByApertureMode_BYTESWAP_DCBA); #endif /* ! __BIG_ENDIAN */ break; default: DPRINTK("Unsupported depth %d\n", bpp); break; } /* * Oxygen VX1 - it appears that setting PM3VideoControl and * then PM3RD_SyncControl to the same SYNC settings undoes * any net change - they seem to xor together. Only set the * sync options in PM3RD_SyncControl. --rmk */ { unsigned int video = par->video; video &= ~(PM3VideoControl_HSYNC_MASK | PM3VideoControl_VSYNC_MASK); video |= PM3VideoControl_HSYNC_ACTIVE_HIGH | PM3VideoControl_VSYNC_ACTIVE_HIGH; PM3_WRITE_REG(par, PM3VideoControl, video); } PM3_WRITE_REG(par, PM3VClkCtl, (PM3_READ_REG(par, PM3VClkCtl) & 0xFFFFFFFC)); PM3_WRITE_REG(par, PM3ScreenBase, par->base); PM3_WRITE_REG(par, PM3ChipConfig, (PM3_READ_REG(par, PM3ChipConfig) & 0xFFFFFFFD)); wmb(); { unsigned char uninitialized_var(m); /* ClkPreScale */ unsigned char uninitialized_var(n); /* ClkFeedBackScale */ unsigned char uninitialized_var(p); /* ClkPostScale */ unsigned long pixclock = PICOS2KHZ(info->var.pixclock); (void)pm3fb_calculate_clock(pixclock, &m, &n, &p); DPRINTK("Pixclock: %ld, Pre: %d, Feedback: %d, Post: %d\n", pixclock, (int) m, (int) n, (int) p); PM3_WRITE_DAC_REG(par, PM3RD_DClk0PreScale, m); PM3_WRITE_DAC_REG(par, PM3RD_DClk0FeedbackScale, n); PM3_WRITE_DAC_REG(par, PM3RD_DClk0PostScale, p); } /* PM3_WRITE_DAC_REG(par, PM3RD_IndexControl, 0x00); */ /* PM3_SLOW_WRITE_REG(par, PM3RD_IndexControl, 0x00); */ if ((par->video & PM3VideoControl_HSYNC_MASK) == PM3VideoControl_HSYNC_ACTIVE_HIGH) tempsync |= PM3RD_SyncControl_HSYNC_ACTIVE_HIGH; if ((par->video & PM3VideoControl_VSYNC_MASK) == PM3VideoControl_VSYNC_ACTIVE_HIGH) tempsync |= PM3RD_SyncControl_VSYNC_ACTIVE_HIGH; PM3_WRITE_DAC_REG(par, PM3RD_SyncControl, tempsync); DPRINTK("PM3RD_SyncControl: %d\n", tempsync); PM3_WRITE_DAC_REG(par, PM3RD_DACControl, 0x00); switch (pm3fb_depth(&info->var)) { case 8: PM3_WRITE_DAC_REG(par, PM3RD_PixelSize, PM3RD_PixelSize_8_BIT_PIXELS); PM3_WRITE_DAC_REG(par, PM3RD_ColorFormat, PM3RD_ColorFormat_CI8_COLOR | PM3RD_ColorFormat_COLOR_ORDER_BLUE_LOW); tempmisc |= PM3RD_MiscControl_HIGHCOLOR_RES_ENABLE; break; case 12: PM3_WRITE_DAC_REG(par, PM3RD_PixelSize, PM3RD_PixelSize_16_BIT_PIXELS); PM3_WRITE_DAC_REG(par, PM3RD_ColorFormat, PM3RD_ColorFormat_4444_COLOR | PM3RD_ColorFormat_COLOR_ORDER_BLUE_LOW | PM3RD_ColorFormat_LINEAR_COLOR_EXT_ENABLE); tempmisc |= PM3RD_MiscControl_DIRECTCOLOR_ENABLE | PM3RD_MiscControl_HIGHCOLOR_RES_ENABLE; break; case 15: PM3_WRITE_DAC_REG(par, PM3RD_PixelSize, PM3RD_PixelSize_16_BIT_PIXELS); PM3_WRITE_DAC_REG(par, PM3RD_ColorFormat, PM3RD_ColorFormat_5551_FRONT_COLOR | PM3RD_ColorFormat_COLOR_ORDER_BLUE_LOW | PM3RD_ColorFormat_LINEAR_COLOR_EXT_ENABLE); tempmisc |= PM3RD_MiscControl_DIRECTCOLOR_ENABLE | PM3RD_MiscControl_HIGHCOLOR_RES_ENABLE; break; case 16: PM3_WRITE_DAC_REG(par, PM3RD_PixelSize, PM3RD_PixelSize_16_BIT_PIXELS); PM3_WRITE_DAC_REG(par, PM3RD_ColorFormat, PM3RD_ColorFormat_565_FRONT_COLOR | PM3RD_ColorFormat_COLOR_ORDER_BLUE_LOW | PM3RD_ColorFormat_LINEAR_COLOR_EXT_ENABLE); tempmisc |= PM3RD_MiscControl_DIRECTCOLOR_ENABLE | PM3RD_MiscControl_HIGHCOLOR_RES_ENABLE; break; case 32: PM3_WRITE_DAC_REG(par, PM3RD_PixelSize, PM3RD_PixelSize_32_BIT_PIXELS); PM3_WRITE_DAC_REG(par, PM3RD_ColorFormat, PM3RD_ColorFormat_8888_COLOR | PM3RD_ColorFormat_COLOR_ORDER_BLUE_LOW); tempmisc |= PM3RD_MiscControl_DIRECTCOLOR_ENABLE | PM3RD_MiscControl_HIGHCOLOR_RES_ENABLE; break; } PM3_WRITE_DAC_REG(par, PM3RD_MiscControl, tempmisc); } /* * hardware independent functions */ static int pm3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { u32 lpitch; unsigned bpp = var->red.length + var->green.length + var->blue.length + var->transp.length; if (bpp != var->bits_per_pixel) { /* set predefined mode for bits_per_pixel settings */ switch (var->bits_per_pixel) { case 8: var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->red.offset = 0; var->green.offset = 0; var->blue.offset = 0; var->transp.offset = 0; var->transp.length = 0; break; case 16: var->red.length = 5; var->blue.length = 5; var->green.length = 6; var->transp.length = 0; break; case 32: var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.length = 8; break; default: DPRINTK("depth not supported: %u\n", var->bits_per_pixel); return -EINVAL; } } /* it is assumed BGRA order */ if (var->bits_per_pixel > 8 ) { var->blue.offset = 0; var->green.offset = var->blue.length; var->red.offset = var->green.offset + var->green.length; var->transp.offset = var->red.offset + var->red.length; } var->height = -1; var->width = -1; if (var->xres != var->xres_virtual) { DPRINTK("virtual x resolution != " "physical x resolution not supported\n"); return -EINVAL; } if (var->yres > var->yres_virtual) { DPRINTK("virtual y resolution < " "physical y resolution not possible\n"); return -EINVAL; } if (var->xoffset) { DPRINTK("xoffset not supported\n"); return -EINVAL; } if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) { DPRINTK("interlace not supported\n"); return -EINVAL; } var->xres = (var->xres + 31) & ~31; /* could sometimes be 8 */ lpitch = var->xres * ((var->bits_per_pixel + 7) >> 3); if (var->xres < 200 || var->xres > 2048) { DPRINTK("width not supported: %u\n", var->xres); return -EINVAL; } if (var->yres < 200 || var->yres > 4095) { DPRINTK("height not supported: %u\n", var->yres); return -EINVAL; } if (lpitch * var->yres_virtual > info->fix.smem_len) { DPRINTK("no memory for screen (%ux%ux%u)\n", var->xres, var->yres_virtual, var->bits_per_pixel); return -EINVAL; } if (PICOS2KHZ(var->pixclock) > PM3_MAX_PIXCLOCK) { DPRINTK("pixclock too high (%ldKHz)\n", PICOS2KHZ(var->pixclock)); return -EINVAL; } var->accel_flags = 0; /* Can't mmap if this is on */ DPRINTK("Checking graphics mode at %dx%d depth %d\n", var->xres, var->yres, var->bits_per_pixel); return 0; } static int pm3fb_set_par(struct fb_info *info) { struct pm3_par *par = info->par; const u32 xres = (info->var.xres + 31) & ~31; const unsigned bpp = info->var.bits_per_pixel; par->base = pm3fb_shift_bpp(bpp, (info->var.yoffset * xres) + info->var.xoffset); par->video = 0; if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) par->video |= PM3VideoControl_HSYNC_ACTIVE_HIGH; else par->video |= PM3VideoControl_HSYNC_ACTIVE_LOW; if (info->var.sync & FB_SYNC_VERT_HIGH_ACT) par->video |= PM3VideoControl_VSYNC_ACTIVE_HIGH; else par->video |= PM3VideoControl_VSYNC_ACTIVE_LOW; if ((info->var.vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) par->video |= PM3VideoControl_LINE_DOUBLE_ON; if ((info->var.activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) par->video |= PM3VideoControl_ENABLE; else DPRINTK("PM3Video disabled\n"); switch (bpp) { case 8: par->video |= PM3VideoControl_PIXELSIZE_8BIT; break; case 16: par->video |= PM3VideoControl_PIXELSIZE_16BIT; break; case 32: par->video |= PM3VideoControl_PIXELSIZE_32BIT; break; default: DPRINTK("Unsupported depth\n"); break; } info->fix.visual = (bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; info->fix.line_length = ((info->var.xres_virtual + 7) >> 3) * bpp; /* pm3fb_clear_memory(info, 0);*/ pm3fb_clear_colormap(par, 0, 0, 0); PM3_WRITE_DAC_REG(par, PM3RD_CursorMode, 0); pm3fb_init_engine(info); pm3fb_write_mode(info); return 0; } static int pm3fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct pm3_par *par = info->par; if (regno >= 256) /* no. of hw registers */ return -EINVAL; /* grayscale works only partially under directcolor */ /* grayscale = 0.30*R + 0.59*G + 0.11*B */ if (info->var.grayscale) red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; /* Directcolor: * var->{color}.offset contains start of bitfield * var->{color}.length contains length of bitfield * {hardwarespecific} contains width of DAC * pseudo_palette[X] is programmed to (X << red.offset) | * (X << green.offset) | * (X << blue.offset) * RAMDAC[X] is programmed to (red, green, blue) * color depth = SUM(var->{color}.length) * * Pseudocolor: * var->{color}.offset is 0 * var->{color}.length contains width of DAC or the number * of unique colors available (color depth) * pseudo_palette is not used * RAMDAC[X] is programmed to (red, green, blue) * color depth = var->{color}.length */ /* * This is the point where the color is converted to something that * is acceptable by the hardware. */ #define CNVT_TOHW(val, width) ((((val) << (width)) + 0x7FFF - (val)) >> 16) red = CNVT_TOHW(red, info->var.red.length); green = CNVT_TOHW(green, info->var.green.length); blue = CNVT_TOHW(blue, info->var.blue.length); transp = CNVT_TOHW(transp, info->var.transp.length); #undef CNVT_TOHW if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { u32 v; if (regno >= 16) return -EINVAL; v = (red << info->var.red.offset) | (green << info->var.green.offset) | (blue << info->var.blue.offset) | (transp << info->var.transp.offset); switch (info->var.bits_per_pixel) { case 8: break; case 16: case 32: ((u32 *)(info->pseudo_palette))[regno] = v; break; } return 0; } else if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR) pm3fb_set_color(par, regno, red, green, blue); return 0; } static int pm3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct pm3_par *par = info->par; const u32 xres = (info->var.xres + 31) & ~31; par->base = pm3fb_shift_bpp(info->var.bits_per_pixel, (var->yoffset * xres) + var->xoffset); PM3_WAIT(par, 1); PM3_WRITE_REG(par, PM3ScreenBase, par->base); return 0; } static int pm3fb_blank(int blank_mode, struct fb_info *info) { struct pm3_par *par = info->par; u32 video = par->video; /* * Oxygen VX1 - it appears that setting PM3VideoControl and * then PM3RD_SyncControl to the same SYNC settings undoes * any net change - they seem to xor together. Only set the * sync options in PM3RD_SyncControl. --rmk */ video &= ~(PM3VideoControl_HSYNC_MASK | PM3VideoControl_VSYNC_MASK); video |= PM3VideoControl_HSYNC_ACTIVE_HIGH | PM3VideoControl_VSYNC_ACTIVE_HIGH; switch (blank_mode) { case FB_BLANK_UNBLANK: video |= PM3VideoControl_ENABLE; break; case FB_BLANK_NORMAL: video &= ~PM3VideoControl_ENABLE; break; case FB_BLANK_HSYNC_SUSPEND: video &= ~(PM3VideoControl_HSYNC_MASK | PM3VideoControl_BLANK_ACTIVE_LOW); break; case FB_BLANK_VSYNC_SUSPEND: video &= ~(PM3VideoControl_VSYNC_MASK | PM3VideoControl_BLANK_ACTIVE_LOW); break; case FB_BLANK_POWERDOWN: video &= ~(PM3VideoControl_HSYNC_MASK | PM3VideoControl_VSYNC_MASK | PM3VideoControl_BLANK_ACTIVE_LOW); break; default: DPRINTK("Unsupported blanking %d\n", blank_mode); return 1; } PM3_WAIT(par, 1); PM3_WRITE_REG(par, PM3VideoControl, video); return 0; } /* * Frame buffer operations */ static struct fb_ops pm3fb_ops = { .owner = THIS_MODULE, .fb_check_var = pm3fb_check_var, .fb_set_par = pm3fb_set_par, .fb_setcolreg = pm3fb_setcolreg, .fb_pan_display = pm3fb_pan_display, .fb_fillrect = pm3fb_fillrect, .fb_copyarea = pm3fb_copyarea, .fb_imageblit = pm3fb_imageblit, .fb_blank = pm3fb_blank, .fb_sync = pm3fb_sync, .fb_cursor = pm3fb_cursor, }; /* ------------------------------------------------------------------------- */ /* * Initialization */ /* mmio register are already mapped when this function is called */ /* the pm3fb_fix.smem_start is also set */ static unsigned long pm3fb_size_memory(struct pm3_par *par) { unsigned long memsize = 0; unsigned long tempBypass, i, temp1, temp2; unsigned char __iomem *screen_mem; pm3fb_fix.smem_len = 64 * 1024l * 1024; /* request full aperture size */ /* Linear frame buffer - request region and map it. */ if (!request_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len, "pm3fb smem")) { printk(KERN_WARNING "pm3fb: Can't reserve smem.\n"); return 0; } screen_mem = ioremap_nocache(pm3fb_fix.smem_start, pm3fb_fix.smem_len); if (!screen_mem) { printk(KERN_WARNING "pm3fb: Can't ioremap smem area.\n"); release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len); return 0; } /* TODO: card-specific stuff, *before* accessing *any* FB memory */ /* For Appian Jeronimo 2000 board second head */ tempBypass = PM3_READ_REG(par, PM3MemBypassWriteMask); DPRINTK("PM3MemBypassWriteMask was: 0x%08lx\n", tempBypass); PM3_WAIT(par, 1); PM3_WRITE_REG(par, PM3MemBypassWriteMask, 0xFFFFFFFF); /* pm3 split up memory, replicates, and do a lot of * nasty stuff IMHO ;-) */ for (i = 0; i < 32; i++) { fb_writel(i * 0x00345678, (screen_mem + (i * 1048576))); mb(); temp1 = fb_readl((screen_mem + (i * 1048576))); /* Let's check for wrapover, write will fail at 16MB boundary */ if (temp1 == (i * 0x00345678)) memsize = i; else break; } DPRINTK("First detect pass already got %ld MB\n", memsize + 1); if (memsize + 1 == i) { for (i = 0; i < 32; i++) { /* Clear first 32MB ; 0 is 0, no need to byteswap */ writel(0x0000000, (screen_mem + (i * 1048576))); } wmb(); for (i = 32; i < 64; i++) { fb_writel(i * 0x00345678, (screen_mem + (i * 1048576))); mb(); temp1 = fb_readl((screen_mem + (i * 1048576))); temp2 = fb_readl((screen_mem + ((i - 32) * 1048576))); /* different value, different RAM... */ if ((temp1 == (i * 0x00345678)) && (temp2 == 0)) memsize = i; else break; } } DPRINTK("Second detect pass got %ld MB\n", memsize + 1); PM3_WAIT(par, 1); PM3_WRITE_REG(par, PM3MemBypassWriteMask, tempBypass); iounmap(screen_mem); release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len); memsize = 1048576 * (memsize + 1); DPRINTK("Returning 0x%08lx bytes\n", memsize); return memsize; } static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent) { struct fb_info *info; struct pm3_par *par; struct device *device = &dev->dev; /* for pci drivers */ int err; int retval = -ENXIO; err = pci_enable_device(dev); if (err) { printk(KERN_WARNING "pm3fb: Can't enable PCI dev: %d\n", err); return err; } /* * Dynamically allocate info and par */ info = framebuffer_alloc(sizeof(struct pm3_par), device); if (!info) return -ENOMEM; par = info->par; /* * Here we set the screen_base to the virtual memory address * for the framebuffer. */ pm3fb_fix.mmio_start = pci_resource_start(dev, 0); pm3fb_fix.mmio_len = PM3_REGS_SIZE; #if defined(__BIG_ENDIAN) pm3fb_fix.mmio_start += PM3_REGS_SIZE; DPRINTK("Adjusting register base for big-endian.\n"); #endif /* Registers - request region and map it. */ if (!request_mem_region(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len, "pm3fb regbase")) { printk(KERN_WARNING "pm3fb: Can't reserve regbase.\n"); goto err_exit_neither; } par->v_regs = ioremap_nocache(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len); if (!par->v_regs) { printk(KERN_WARNING "pm3fb: Can't remap %s register area.\n", pm3fb_fix.id); release_mem_region(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len); goto err_exit_neither; } /* Linear frame buffer - request region and map it. */ pm3fb_fix.smem_start = pci_resource_start(dev, 1); pm3fb_fix.smem_len = pm3fb_size_memory(par); if (!pm3fb_fix.smem_len) { printk(KERN_WARNING "pm3fb: Can't find memory on board.\n"); goto err_exit_mmio; } if (!request_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len, "pm3fb smem")) { printk(KERN_WARNING "pm3fb: Can't reserve smem.\n"); goto err_exit_mmio; } info->screen_base = ioremap_nocache(pm3fb_fix.smem_start, pm3fb_fix.smem_len); if (!info->screen_base) { printk(KERN_WARNING "pm3fb: Can't ioremap smem area.\n"); release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len); goto err_exit_mmio; } info->screen_size = pm3fb_fix.smem_len; #ifdef CONFIG_MTRR if (!nomtrr) par->mtrr_handle = mtrr_add(pm3fb_fix.smem_start, pm3fb_fix.smem_len, MTRR_TYPE_WRCOMB, 1); #endif info->fbops = &pm3fb_ops; par->video = PM3_READ_REG(par, PM3VideoControl); info->fix = pm3fb_fix; info->pseudo_palette = par->palette; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT; if (noaccel) { printk(KERN_DEBUG "disabling acceleration\n"); info->flags |= FBINFO_HWACCEL_DISABLED; } info->pixmap.addr = kmalloc(PM3_PIXMAP_SIZE, GFP_KERNEL); if (!info->pixmap.addr) { retval = -ENOMEM; goto err_exit_pixmap; } info->pixmap.size = PM3_PIXMAP_SIZE; info->pixmap.buf_align = 4; info->pixmap.scan_align = 4; info->pixmap.access_align = 32; info->pixmap.flags = FB_PIXMAP_SYSTEM; /* * This should give a reasonable default video mode. The following is * done when we can set a video mode. */ if (!mode_option) mode_option = "640x480@60"; retval = fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 8); if (!retval || retval == 4) { retval = -EINVAL; goto err_exit_both; } if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { retval = -ENOMEM; goto err_exit_both; } /* * For drivers that can... */ pm3fb_check_var(&info->var, info); if (register_framebuffer(info) < 0) { retval = -EINVAL; goto err_exit_all; } fb_info(info, "%s frame buffer device\n", info->fix.id); pci_set_drvdata(dev, info); return 0; err_exit_all: fb_dealloc_cmap(&info->cmap); err_exit_both: kfree(info->pixmap.addr); err_exit_pixmap: iounmap(info->screen_base); release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len); err_exit_mmio: iounmap(par->v_regs); release_mem_region(pm3fb_fix.mmio_start, pm3fb_fix.mmio_len); err_exit_neither: framebuffer_release(info); return retval; } /* * Cleanup */ static void pm3fb_remove(struct pci_dev *dev) { struct fb_info *info = pci_get_drvdata(dev); if (info) { struct fb_fix_screeninfo *fix = &info->fix; struct pm3_par *par = info->par; unregister_framebuffer(info); fb_dealloc_cmap(&info->cmap); #ifdef CONFIG_MTRR if (par->mtrr_handle >= 0) mtrr_del(par->mtrr_handle, info->fix.smem_start, info->fix.smem_len); #endif /* CONFIG_MTRR */ iounmap(info->screen_base); release_mem_region(fix->smem_start, fix->smem_len); iounmap(par->v_regs); release_mem_region(fix->mmio_start, fix->mmio_len); kfree(info->pixmap.addr); framebuffer_release(info); } } static struct pci_device_id pm3fb_id_table[] = { { PCI_VENDOR_ID_3DLABS, 0x0a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } }; /* For PCI drivers */ static struct pci_driver pm3fb_driver = { .name = "pm3fb", .id_table = pm3fb_id_table, .probe = pm3fb_probe, .remove = pm3fb_remove, }; MODULE_DEVICE_TABLE(pci, pm3fb_id_table); #ifndef MODULE /* * Setup */ /* * Only necessary if your driver takes special options, * otherwise we fall back on the generic fb_setup(). */ static int __init pm3fb_setup(char *options) { char *this_opt; /* Parse user specified options (`video=pm3fb:') */ if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!*this_opt) continue; else if (!strncmp(this_opt, "noaccel", 7)) noaccel = 1; else if (!strncmp(this_opt, "hwcursor=", 9)) hwcursor = simple_strtoul(this_opt + 9, NULL, 0); #ifdef CONFIG_MTRR else if (!strncmp(this_opt, "nomtrr", 6)) nomtrr = 1; #endif else mode_option = this_opt; } return 0; } #endif /* MODULE */ static int __init pm3fb_init(void) { /* * For kernel boot options (in 'video=pm3fb:<options>' format) */ #ifndef MODULE char *option = NULL; if (fb_get_options("pm3fb", &option)) return -ENODEV; pm3fb_setup(option); #endif return pci_register_driver(&pm3fb_driver); } #ifdef MODULE static void __exit pm3fb_exit(void) { pci_unregister_driver(&pm3fb_driver); } module_exit(pm3fb_exit); #endif module_init(pm3fb_init); module_param(mode_option, charp, 0); MODULE_PARM_DESC(mode_option, "Initial video mode e.g. '648x480-8@60'"); module_param(noaccel, bool, 0); MODULE_PARM_DESC(noaccel, "Disable acceleration"); module_param(hwcursor, int, 0644); MODULE_PARM_DESC(hwcursor, "Enable hardware cursor " "(1=enable, 0=disable, default=1)"); #ifdef CONFIG_MTRR module_param(nomtrr, bool, 0); MODULE_PARM_DESC(nomtrr, "Disable MTRR support (0 or 1=disabled) (default=0)"); #endif MODULE_DESCRIPTION("Permedia3 framebuffer device driver"); MODULE_LICENSE("GPL");
gpl-2.0
renaudallard/gnexus_kernel
drivers/crypto/mv_cesa.c
1571
27971
/* * Support for Marvell's crypto engine which can be found on some Orion5X * boards. * * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > * License: GPLv2 * */ #include <crypto/aes.h> #include <crypto/algapi.h> #include <linux/crypto.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kthread.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <crypto/internal/hash.h> #include <crypto/sha.h> #include "mv_cesa.h" #define MV_CESA "MV-CESA:" #define MAX_HW_HASH_SIZE 0xFFFF /* * STM: * /---------------------------------------\ * | | request complete * \./ | * IDLE -> new request -> BUSY -> done -> DEQUEUE * /°\ | * | | more scatter entries * \________________/ */ enum engine_status { ENGINE_IDLE, ENGINE_BUSY, ENGINE_W_DEQUEUE, }; /** * struct req_progress - used for every crypt request * @src_sg_it: sg iterator for src * @dst_sg_it: sg iterator for dst * @sg_src_left: bytes left in src to process (scatter list) * @src_start: offset to add to src start position (scatter list) * @crypt_len: length of current hw crypt/hash process * @hw_nbytes: total bytes to process in hw for this request * @copy_back: whether to copy data back (crypt) or not (hash) * @sg_dst_left: bytes left dst to process in this scatter list * @dst_start: offset to add to dst start position (scatter list) * @hw_processed_bytes: number of bytes processed by hw (request). * * sg helper are used to iterate over the scatterlist. Since the size of the * SRAM may be less than the scatter size, this struct struct is used to keep * track of progress within current scatterlist. */ struct req_progress { struct sg_mapping_iter src_sg_it; struct sg_mapping_iter dst_sg_it; void (*complete) (void); void (*process) (int is_first); /* src mostly */ int sg_src_left; int src_start; int crypt_len; int hw_nbytes; /* dst mostly */ int copy_back; int sg_dst_left; int dst_start; int hw_processed_bytes; }; struct crypto_priv { void __iomem *reg; void __iomem *sram; int irq; struct task_struct *queue_th; /* the lock protects queue and eng_st */ spinlock_t lock; struct crypto_queue queue; enum engine_status eng_st; struct crypto_async_request *cur_req; struct req_progress p; int max_req_size; int sram_size; int has_sha1; int has_hmac_sha1; }; static struct crypto_priv *cpg; struct mv_ctx { u8 aes_enc_key[AES_KEY_LEN]; u32 aes_dec_key[8]; int key_len; u32 need_calc_aes_dkey; }; enum crypto_op { COP_AES_ECB, COP_AES_CBC, }; struct mv_req_ctx { enum crypto_op op; int decrypt; }; enum hash_op { COP_SHA1, COP_HMAC_SHA1 }; struct mv_tfm_hash_ctx { struct crypto_shash *fallback; struct crypto_shash *base_hash; u32 ivs[2 * SHA1_DIGEST_SIZE / 4]; int count_add; enum hash_op op; }; struct mv_req_hash_ctx { u64 count; u32 state[SHA1_DIGEST_SIZE / 4]; u8 buffer[SHA1_BLOCK_SIZE]; int first_hash; /* marks that we don't have previous state */ int last_chunk; /* marks that this is the 'final' request */ int extra_bytes; /* unprocessed bytes in buffer */ enum hash_op op; int count_add; }; static void compute_aes_dec_key(struct mv_ctx *ctx) { struct crypto_aes_ctx gen_aes_key; int key_pos; if (!ctx->need_calc_aes_dkey) return; crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len); key_pos = ctx->key_len + 24; memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4); switch (ctx->key_len) { case AES_KEYSIZE_256: key_pos -= 2; /* fall */ case AES_KEYSIZE_192: key_pos -= 2; memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], 4 * 4); break; } ctx->need_calc_aes_dkey = 0; } static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, unsigned int len) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); struct mv_ctx *ctx = crypto_tfm_ctx(tfm); switch (len) { case AES_KEYSIZE_128: case AES_KEYSIZE_192: case AES_KEYSIZE_256: break; default: crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } ctx->key_len = len; ctx->need_calc_aes_dkey = 1; memcpy(ctx->aes_enc_key, key, AES_KEY_LEN); return 0; } static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) { int ret; void *sbuf; int copy_len; while (len) { if (!p->sg_src_left) { ret = sg_miter_next(&p->src_sg_it); BUG_ON(!ret); p->sg_src_left = p->src_sg_it.length; p->src_start = 0; } sbuf = p->src_sg_it.addr + p->src_start; copy_len = min(p->sg_src_left, len); memcpy(dbuf, sbuf, copy_len); p->src_start += copy_len; p->sg_src_left -= copy_len; len -= copy_len; dbuf += copy_len; } } static void setup_data_in(void) { struct req_progress *p = &cpg->p; int data_in_sram = min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size); copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len, data_in_sram - p->crypt_len); p->crypt_len = data_in_sram; } static void mv_process_current_q(int first_block) { struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); struct sec_accel_config op; switch (req_ctx->op) { case COP_AES_ECB: op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; break; case COP_AES_CBC: default: op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); if (first_block) memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); break; } if (req_ctx->decrypt) { op.config |= CFG_DIR_DEC; memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, AES_KEY_LEN); } else { op.config |= CFG_DIR_ENC; memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, AES_KEY_LEN); } switch (ctx->key_len) { case AES_KEYSIZE_128: op.config |= CFG_AES_LEN_128; break; case AES_KEYSIZE_192: op.config |= CFG_AES_LEN_192; break; case AES_KEYSIZE_256: op.config |= CFG_AES_LEN_256; break; } op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) | ENC_P_DST(SRAM_DATA_OUT_START); op.enc_key_p = SRAM_DATA_KEY_P; setup_data_in(); op.enc_len = cpg->p.crypt_len; memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); /* GO */ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); /* * XXX: add timer if the interrupt does not occur for some mystery * reason */ } static void mv_crypto_algo_completion(void) { struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); sg_miter_stop(&cpg->p.src_sg_it); sg_miter_stop(&cpg->p.dst_sg_it); if (req_ctx->op != COP_AES_CBC) return ; memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); } static void mv_process_hash_current(int first_block) { struct ahash_request *req = ahash_request_cast(cpg->cur_req); const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); struct req_progress *p = &cpg->p; struct sec_accel_config op = { 0 }; int is_last; switch (req_ctx->op) { case COP_SHA1: default: op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1; break; case COP_HMAC_SHA1: op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); break; } op.mac_src_p = MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32) req_ctx-> count); setup_data_in(); op.mac_digest = MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len); op.mac_iv = MAC_INNER_IV_P(SRAM_HMAC_IV_IN) | MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT); is_last = req_ctx->last_chunk && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes) && (req_ctx->count <= MAX_HW_HASH_SIZE); if (req_ctx->first_hash) { if (is_last) op.config |= CFG_NOT_FRAG; else op.config |= CFG_FIRST_FRAG; req_ctx->first_hash = 0; } else { if (is_last) op.config |= CFG_LAST_FRAG; else op.config |= CFG_MID_FRAG; if (first_block) { writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); } } memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); /* GO */ writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); /* * XXX: add timer if the interrupt does not occur for some mystery * reason */ } static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, struct shash_desc *desc) { int i; struct sha1_state shash_state; shash_state.count = ctx->count + ctx->count_add; for (i = 0; i < 5; i++) shash_state.state[i] = ctx->state[i]; memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer)); return crypto_shash_import(desc, &shash_state); } static int mv_hash_final_fallback(struct ahash_request *req) { const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); struct { struct shash_desc shash; char ctx[crypto_shash_descsize(tfm_ctx->fallback)]; } desc; int rc; desc.shash.tfm = tfm_ctx->fallback; desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP; if (unlikely(req_ctx->first_hash)) { crypto_shash_init(&desc.shash); crypto_shash_update(&desc.shash, req_ctx->buffer, req_ctx->extra_bytes); } else { /* only SHA1 for now.... */ rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash); if (rc) goto out; } rc = crypto_shash_final(&desc.shash, req->result); out: return rc; } static void mv_hash_algo_completion(void) { struct ahash_request *req = ahash_request_cast(cpg->cur_req); struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); if (ctx->extra_bytes) copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); sg_miter_stop(&cpg->p.src_sg_it); if (likely(ctx->last_chunk)) { if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, crypto_ahash_digestsize(crypto_ahash_reqtfm (req))); } else mv_hash_final_fallback(req); } else { ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); } } static void dequeue_complete_req(void) { struct crypto_async_request *req = cpg->cur_req; void *buf; int ret; cpg->p.hw_processed_bytes += cpg->p.crypt_len; if (cpg->p.copy_back) { int need_copy_len = cpg->p.crypt_len; int sram_offset = 0; do { int dst_copy; if (!cpg->p.sg_dst_left) { ret = sg_miter_next(&cpg->p.dst_sg_it); BUG_ON(!ret); cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; cpg->p.dst_start = 0; } buf = cpg->p.dst_sg_it.addr; buf += cpg->p.dst_start; dst_copy = min(need_copy_len, cpg->p.sg_dst_left); memcpy(buf, cpg->sram + SRAM_DATA_OUT_START + sram_offset, dst_copy); sram_offset += dst_copy; cpg->p.sg_dst_left -= dst_copy; need_copy_len -= dst_copy; cpg->p.dst_start += dst_copy; } while (need_copy_len > 0); } cpg->p.crypt_len = 0; BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) { /* process next scatter list entry */ cpg->eng_st = ENGINE_BUSY; cpg->p.process(0); } else { cpg->p.complete(); cpg->eng_st = ENGINE_IDLE; local_bh_disable(); req->complete(req, 0); local_bh_enable(); } } static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) { int i = 0; size_t cur_len; while (sl) { cur_len = sl[i].length; ++i; if (total_bytes > cur_len) total_bytes -= cur_len; else break; } return i; } static void mv_start_new_crypt_req(struct ablkcipher_request *req) { struct req_progress *p = &cpg->p; int num_sgs; cpg->cur_req = &req->base; memset(p, 0, sizeof(struct req_progress)); p->hw_nbytes = req->nbytes; p->complete = mv_crypto_algo_completion; p->process = mv_process_current_q; p->copy_back = 1; num_sgs = count_sgs(req->src, req->nbytes); sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); num_sgs = count_sgs(req->dst, req->nbytes); sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); mv_process_current_q(1); } static void mv_start_new_hash_req(struct ahash_request *req) { struct req_progress *p = &cpg->p; struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); int num_sgs, hw_bytes, old_extra_bytes, rc; cpg->cur_req = &req->base; memset(p, 0, sizeof(struct req_progress)); hw_bytes = req->nbytes + ctx->extra_bytes; old_extra_bytes = ctx->extra_bytes; ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; if (ctx->extra_bytes != 0 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) hw_bytes -= ctx->extra_bytes; else ctx->extra_bytes = 0; num_sgs = count_sgs(req->src, req->nbytes); sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); if (hw_bytes) { p->hw_nbytes = hw_bytes; p->complete = mv_hash_algo_completion; p->process = mv_process_hash_current; if (unlikely(old_extra_bytes)) { memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, old_extra_bytes); p->crypt_len = old_extra_bytes; } mv_process_hash_current(1); } else { copy_src_to_buf(p, ctx->buffer + old_extra_bytes, ctx->extra_bytes - old_extra_bytes); sg_miter_stop(&p->src_sg_it); if (ctx->last_chunk) rc = mv_hash_final_fallback(req); else rc = 0; cpg->eng_st = ENGINE_IDLE; local_bh_disable(); req->base.complete(&req->base, rc); local_bh_enable(); } } static int queue_manag(void *data) { cpg->eng_st = ENGINE_IDLE; do { struct crypto_async_request *async_req = NULL; struct crypto_async_request *backlog; __set_current_state(TASK_INTERRUPTIBLE); if (cpg->eng_st == ENGINE_W_DEQUEUE) dequeue_complete_req(); spin_lock_irq(&cpg->lock); if (cpg->eng_st == ENGINE_IDLE) { backlog = crypto_get_backlog(&cpg->queue); async_req = crypto_dequeue_request(&cpg->queue); if (async_req) { BUG_ON(cpg->eng_st != ENGINE_IDLE); cpg->eng_st = ENGINE_BUSY; } } spin_unlock_irq(&cpg->lock); if (backlog) { backlog->complete(backlog, -EINPROGRESS); backlog = NULL; } if (async_req) { if (async_req->tfm->__crt_alg->cra_type != &crypto_ahash_type) { struct ablkcipher_request *req = ablkcipher_request_cast(async_req); mv_start_new_crypt_req(req); } else { struct ahash_request *req = ahash_request_cast(async_req); mv_start_new_hash_req(req); } async_req = NULL; } schedule(); } while (!kthread_should_stop()); return 0; } static int mv_handle_req(struct crypto_async_request *req) { unsigned long flags; int ret; spin_lock_irqsave(&cpg->lock, flags); ret = crypto_enqueue_request(&cpg->queue, req); spin_unlock_irqrestore(&cpg->lock, flags); wake_up_process(cpg->queue_th); return ret; } static int mv_enc_aes_ecb(struct ablkcipher_request *req) { struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); req_ctx->op = COP_AES_ECB; req_ctx->decrypt = 0; return mv_handle_req(&req->base); } static int mv_dec_aes_ecb(struct ablkcipher_request *req) { struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); req_ctx->op = COP_AES_ECB; req_ctx->decrypt = 1; compute_aes_dec_key(ctx); return mv_handle_req(&req->base); } static int mv_enc_aes_cbc(struct ablkcipher_request *req) { struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); req_ctx->op = COP_AES_CBC; req_ctx->decrypt = 0; return mv_handle_req(&req->base); } static int mv_dec_aes_cbc(struct ablkcipher_request *req) { struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); req_ctx->op = COP_AES_CBC; req_ctx->decrypt = 1; compute_aes_dec_key(ctx); return mv_handle_req(&req->base); } static int mv_cra_init(struct crypto_tfm *tfm) { tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx); return 0; } static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op, int is_last, unsigned int req_len, int count_add) { memset(ctx, 0, sizeof(*ctx)); ctx->op = op; ctx->count = req_len; ctx->first_hash = 1; ctx->last_chunk = is_last; ctx->count_add = count_add; } static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last, unsigned req_len) { ctx->last_chunk = is_last; ctx->count += req_len; } static int mv_hash_init(struct ahash_request *req) { const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0, tfm_ctx->count_add); return 0; } static int mv_hash_update(struct ahash_request *req) { if (!req->nbytes) return 0; mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes); return mv_handle_req(&req->base); } static int mv_hash_final(struct ahash_request *req) { struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); ahash_request_set_crypt(req, NULL, req->result, 0); mv_update_hash_req_ctx(ctx, 1, 0); return mv_handle_req(&req->base); } static int mv_hash_finup(struct ahash_request *req) { mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); return mv_handle_req(&req->base); } static int mv_hash_digest(struct ahash_request *req) { const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1, req->nbytes, tfm_ctx->count_add); return mv_handle_req(&req->base); } static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate, const void *ostate) { const struct sha1_state *isha1_state = istate, *osha1_state = ostate; int i; for (i = 0; i < 5; i++) { ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]); ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]); } } static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key, unsigned int keylen) { int rc; struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base); int bs, ds, ss; if (!ctx->base_hash) return 0; rc = crypto_shash_setkey(ctx->fallback, key, keylen); if (rc) return rc; /* Can't see a way to extract the ipad/opad from the fallback tfm so I'm basically copying code from the hmac module */ bs = crypto_shash_blocksize(ctx->base_hash); ds = crypto_shash_digestsize(ctx->base_hash); ss = crypto_shash_statesize(ctx->base_hash); { struct { struct shash_desc shash; char ctx[crypto_shash_descsize(ctx->base_hash)]; } desc; unsigned int i; char ipad[ss]; char opad[ss]; desc.shash.tfm = ctx->base_hash; desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) & CRYPTO_TFM_REQ_MAY_SLEEP; if (keylen > bs) { int err; err = crypto_shash_digest(&desc.shash, key, keylen, ipad); if (err) return err; keylen = ds; } else memcpy(ipad, key, keylen); memset(ipad + keylen, 0, bs - keylen); memcpy(opad, ipad, bs); for (i = 0; i < bs; i++) { ipad[i] ^= 0x36; opad[i] ^= 0x5c; } rc = crypto_shash_init(&desc.shash) ? : crypto_shash_update(&desc.shash, ipad, bs) ? : crypto_shash_export(&desc.shash, ipad) ? : crypto_shash_init(&desc.shash) ? : crypto_shash_update(&desc.shash, opad, bs) ? : crypto_shash_export(&desc.shash, opad); if (rc == 0) mv_hash_init_ivs(ctx, ipad, opad); return rc; } } static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, enum hash_op op, int count_add) { const char *fallback_driver_name = tfm->__crt_alg->cra_name; struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_shash *fallback_tfm = NULL; struct crypto_shash *base_hash = NULL; int err = -ENOMEM; ctx->op = op; ctx->count_add = count_add; /* Allocate a fallback and abort if it failed. */ fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback_tfm)) { printk(KERN_WARNING MV_CESA "Fallback driver '%s' could not be loaded!\n", fallback_driver_name); err = PTR_ERR(fallback_tfm); goto out; } ctx->fallback = fallback_tfm; if (base_hash_name) { /* Allocate a hash to compute the ipad/opad of hmac. */ base_hash = crypto_alloc_shash(base_hash_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(base_hash)) { printk(KERN_WARNING MV_CESA "Base driver '%s' could not be loaded!\n", base_hash_name); err = PTR_ERR(base_hash); goto err_bad_base; } } ctx->base_hash = base_hash; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct mv_req_hash_ctx) + crypto_shash_descsize(ctx->fallback)); return 0; err_bad_base: crypto_free_shash(fallback_tfm); out: return err; } static void mv_cra_hash_exit(struct crypto_tfm *tfm) { struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_shash(ctx->fallback); if (ctx->base_hash) crypto_free_shash(ctx->base_hash); } static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm) { return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0); } static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) { return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); } irqreturn_t crypto_int(int irq, void *priv) { u32 val; val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); if (!(val & SEC_INT_ACCEL0_DONE)) return IRQ_NONE; val &= ~SEC_INT_ACCEL0_DONE; writel(val, cpg->reg + FPGA_INT_STATUS); writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); BUG_ON(cpg->eng_st != ENGINE_BUSY); cpg->eng_st = ENGINE_W_DEQUEUE; wake_up_process(cpg->queue_th); return IRQ_HANDLED; } struct crypto_alg mv_aes_alg_ecb = { .cra_name = "ecb(aes)", .cra_driver_name = "mv-ecb-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = 16, .cra_ctxsize = sizeof(struct mv_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = mv_cra_init, .cra_u = { .ablkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = mv_setkey_aes, .encrypt = mv_enc_aes_ecb, .decrypt = mv_dec_aes_ecb, }, }, }; struct crypto_alg mv_aes_alg_cbc = { .cra_name = "cbc(aes)", .cra_driver_name = "mv-cbc-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = mv_cra_init, .cra_u = { .ablkcipher = { .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = mv_setkey_aes, .encrypt = mv_enc_aes_cbc, .decrypt = mv_dec_aes_cbc, }, }, }; struct ahash_alg mv_sha1_alg = { .init = mv_hash_init, .update = mv_hash_update, .final = mv_hash_final, .finup = mv_hash_finup, .digest = mv_hash_digest, .halg = { .digestsize = SHA1_DIGEST_SIZE, .base = { .cra_name = "sha1", .cra_driver_name = "mv-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), .cra_init = mv_cra_hash_sha1_init, .cra_exit = mv_cra_hash_exit, .cra_module = THIS_MODULE, } } }; struct ahash_alg mv_hmac_sha1_alg = { .init = mv_hash_init, .update = mv_hash_update, .final = mv_hash_final, .finup = mv_hash_finup, .digest = mv_hash_digest, .setkey = mv_hash_setkey, .halg = { .digestsize = SHA1_DIGEST_SIZE, .base = { .cra_name = "hmac(sha1)", .cra_driver_name = "mv-hmac-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), .cra_init = mv_cra_hash_hmac_sha1_init, .cra_exit = mv_cra_hash_exit, .cra_module = THIS_MODULE, } } }; static int mv_probe(struct platform_device *pdev) { struct crypto_priv *cp; struct resource *res; int irq; int ret; if (cpg) { printk(KERN_ERR MV_CESA "Second crypto dev?\n"); return -EEXIST; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); if (!res) return -ENXIO; cp = kzalloc(sizeof(*cp), GFP_KERNEL); if (!cp) return -ENOMEM; spin_lock_init(&cp->lock); crypto_init_queue(&cp->queue, 50); cp->reg = ioremap(res->start, resource_size(res)); if (!cp->reg) { ret = -ENOMEM; goto err; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); if (!res) { ret = -ENXIO; goto err_unmap_reg; } cp->sram_size = resource_size(res); cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; cp->sram = ioremap(res->start, cp->sram_size); if (!cp->sram) { ret = -ENOMEM; goto err_unmap_reg; } irq = platform_get_irq(pdev, 0); if (irq < 0 || irq == NO_IRQ) { ret = irq; goto err_unmap_sram; } cp->irq = irq; platform_set_drvdata(pdev, cp); cpg = cp; cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); if (IS_ERR(cp->queue_th)) { ret = PTR_ERR(cp->queue_th); goto err_unmap_sram; } ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), cp); if (ret) goto err_thread; writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); ret = crypto_register_alg(&mv_aes_alg_ecb); if (ret) { printk(KERN_WARNING MV_CESA "Could not register aes-ecb driver\n"); goto err_irq; } ret = crypto_register_alg(&mv_aes_alg_cbc); if (ret) { printk(KERN_WARNING MV_CESA "Could not register aes-cbc driver\n"); goto err_unreg_ecb; } ret = crypto_register_ahash(&mv_sha1_alg); if (ret == 0) cpg->has_sha1 = 1; else printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n"); ret = crypto_register_ahash(&mv_hmac_sha1_alg); if (ret == 0) { cpg->has_hmac_sha1 = 1; } else { printk(KERN_WARNING MV_CESA "Could not register hmac-sha1 driver\n"); } return 0; err_unreg_ecb: crypto_unregister_alg(&mv_aes_alg_ecb); err_irq: free_irq(irq, cp); err_thread: kthread_stop(cp->queue_th); err_unmap_sram: iounmap(cp->sram); err_unmap_reg: iounmap(cp->reg); err: kfree(cp); cpg = NULL; platform_set_drvdata(pdev, NULL); return ret; } static int mv_remove(struct platform_device *pdev) { struct crypto_priv *cp = platform_get_drvdata(pdev); crypto_unregister_alg(&mv_aes_alg_ecb); crypto_unregister_alg(&mv_aes_alg_cbc); if (cp->has_sha1) crypto_unregister_ahash(&mv_sha1_alg); if (cp->has_hmac_sha1) crypto_unregister_ahash(&mv_hmac_sha1_alg); kthread_stop(cp->queue_th); free_irq(cp->irq, cp); memset(cp->sram, 0, cp->sram_size); iounmap(cp->sram); iounmap(cp->reg); kfree(cp); cpg = NULL; return 0; } static struct platform_driver marvell_crypto = { .probe = mv_probe, .remove = mv_remove, .driver = { .owner = THIS_MODULE, .name = "mv_crypto", }, }; MODULE_ALIAS("platform:mv_crypto"); static int __init mv_crypto_init(void) { return platform_driver_register(&marvell_crypto); } module_init(mv_crypto_init); static void __exit mv_crypto_exit(void) { platform_driver_unregister(&marvell_crypto); } module_exit(mv_crypto_exit); MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>"); MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); MODULE_LICENSE("GPL");
gpl-2.0
rdm-dev/linux-curie
drivers/usb/core/file.c
1827
6194
/* * drivers/usb/core/file.c * * (C) Copyright Linus Torvalds 1999 * (C) Copyright Johannes Erdfelt 1999-2001 * (C) Copyright Andreas Gal 1999 * (C) Copyright Gregory P. Smith 1999 * (C) Copyright Deti Fliegl 1999 (new USB architecture) * (C) Copyright Randy Dunlap 2000 * (C) Copyright David Brownell 2000-2001 (kernel hotplug, usb_device_id, * more docs, etc) * (C) Copyright Yggdrasil Computing, Inc. 2000 * (usb_device_id matching changes by Adam J. Richter) * (C) Copyright Greg Kroah-Hartman 2002-2003 * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <linux/usb.h> #include "usb.h" #define MAX_USB_MINORS 256 static const struct file_operations *usb_minors[MAX_USB_MINORS]; static DECLARE_RWSEM(minor_rwsem); static int usb_open(struct inode *inode, struct file *file) { int err = -ENODEV; const struct file_operations *new_fops; down_read(&minor_rwsem); new_fops = fops_get(usb_minors[iminor(inode)]); if (!new_fops) goto done; replace_fops(file, new_fops); /* Curiouser and curiouser... NULL ->open() as "no device" ? */ if (file->f_op->open) err = file->f_op->open(inode, file); done: up_read(&minor_rwsem); return err; } static const struct file_operations usb_fops = { .owner = THIS_MODULE, .open = usb_open, .llseek = noop_llseek, }; static struct usb_class { struct kref kref; struct class *class; } *usb_class; static char *usb_devnode(struct device *dev, umode_t *mode) { struct usb_class_driver *drv; drv = dev_get_drvdata(dev); if (!drv || !drv->devnode) return NULL; return drv->devnode(dev, mode); } static int init_usb_class(void) { int result = 0; if (usb_class != NULL) { kref_get(&usb_class->kref); goto exit; } usb_class = kmalloc(sizeof(*usb_class), GFP_KERNEL); if (!usb_class) { result = -ENOMEM; goto exit; } kref_init(&usb_class->kref); usb_class->class = class_create(THIS_MODULE, "usbmisc"); if (IS_ERR(usb_class->class)) { result = PTR_ERR(usb_class->class); printk(KERN_ERR "class_create failed for usb devices\n"); kfree(usb_class); usb_class = NULL; goto exit; } usb_class->class->devnode = usb_devnode; exit: return result; } static void release_usb_class(struct kref *kref) { /* Ok, we cheat as we know we only have one usb_class */ class_destroy(usb_class->class); kfree(usb_class); usb_class = NULL; } static void destroy_usb_class(void) { if (usb_class) kref_put(&usb_class->kref, release_usb_class); } int usb_major_init(void) { int error; error = register_chrdev(USB_MAJOR, "usb", &usb_fops); if (error) printk(KERN_ERR "Unable to get major %d for usb devices\n", USB_MAJOR); return error; } void usb_major_cleanup(void) { unregister_chrdev(USB_MAJOR, "usb"); } /** * usb_register_dev - register a USB device, and ask for a minor number * @intf: pointer to the usb_interface that is being registered * @class_driver: pointer to the usb_class_driver for this device * * This should be called by all USB drivers that use the USB major number. * If CONFIG_USB_DYNAMIC_MINORS is enabled, the minor number will be * dynamically allocated out of the list of available ones. If it is not * enabled, the minor number will be based on the next available free minor, * starting at the class_driver->minor_base. * * This function also creates a usb class device in the sysfs tree. * * usb_deregister_dev() must be called when the driver is done with * the minor numbers given out by this function. * * Return: -EINVAL if something bad happens with trying to register a * device, and 0 on success. */ int usb_register_dev(struct usb_interface *intf, struct usb_class_driver *class_driver) { int retval; int minor_base = class_driver->minor_base; int minor; char name[20]; char *temp; #ifdef CONFIG_USB_DYNAMIC_MINORS /* * We don't care what the device tries to start at, we want to start * at zero to pack the devices into the smallest available space with * no holes in the minor range. */ minor_base = 0; #endif if (class_driver->fops == NULL) return -EINVAL; if (intf->minor >= 0) return -EADDRINUSE; retval = init_usb_class(); if (retval) return retval; dev_dbg(&intf->dev, "looking for a minor, starting at %d\n", minor_base); down_write(&minor_rwsem); for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { if (usb_minors[minor]) continue; usb_minors[minor] = class_driver->fops; intf->minor = minor; break; } up_write(&minor_rwsem); if (intf->minor < 0) return -EXFULL; /* create a usb class device for this usb interface */ snprintf(name, sizeof(name), class_driver->name, minor - minor_base); temp = strrchr(name, '/'); if (temp && (temp[1] != '\0')) ++temp; else temp = name; intf->usb_dev = device_create(usb_class->class, &intf->dev, MKDEV(USB_MAJOR, minor), class_driver, "%s", temp); if (IS_ERR(intf->usb_dev)) { down_write(&minor_rwsem); usb_minors[minor] = NULL; intf->minor = -1; up_write(&minor_rwsem); retval = PTR_ERR(intf->usb_dev); } return retval; } EXPORT_SYMBOL_GPL(usb_register_dev); /** * usb_deregister_dev - deregister a USB device's dynamic minor. * @intf: pointer to the usb_interface that is being deregistered * @class_driver: pointer to the usb_class_driver for this device * * Used in conjunction with usb_register_dev(). This function is called * when the USB driver is finished with the minor numbers gotten from a * call to usb_register_dev() (usually when the device is disconnected * from the system.) * * This function also removes the usb class device from the sysfs tree. * * This should be called by all drivers that use the USB major number. */ void usb_deregister_dev(struct usb_interface *intf, struct usb_class_driver *class_driver) { if (intf->minor == -1) return; dev_dbg(&intf->dev, "removing %d minor\n", intf->minor); down_write(&minor_rwsem); usb_minors[intf->minor] = NULL; up_write(&minor_rwsem); device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); intf->usb_dev = NULL; intf->minor = -1; destroy_usb_class(); } EXPORT_SYMBOL_GPL(usb_deregister_dev);
gpl-2.0
RafaelRMachado/linux
drivers/char/tlclk.c
2083
23936
/* * Telecom Clock driver for Intel NetStructure(tm) MPCBL0010 * * Copyright (C) 2005 Kontron Canada * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <sebastien.bouchard@ca.kontron.com> and the current * Maintainer <mark.gross@intel.com> * * Description : This is the TELECOM CLOCK module driver for the ATCA * MPCBL0010 ATCA computer. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> /* printk() */ #include <linux/fs.h> /* everything... */ #include <linux/errno.h> /* error codes */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/timer.h> #include <linux/sysfs.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/platform_device.h> #include <asm/io.h> /* inb/outb */ #include <asm/uaccess.h> MODULE_AUTHOR("Sebastien Bouchard <sebastien.bouchard@ca.kontron.com>"); MODULE_LICENSE("GPL"); /*Hardware Reset of the PLL */ #define RESET_ON 0x00 #define RESET_OFF 0x01 /* MODE SELECT */ #define NORMAL_MODE 0x00 #define HOLDOVER_MODE 0x10 #define FREERUN_MODE 0x20 /* FILTER SELECT */ #define FILTER_6HZ 0x04 #define FILTER_12HZ 0x00 /* SELECT REFERENCE FREQUENCY */ #define REF_CLK1_8kHz 0x00 #define REF_CLK2_19_44MHz 0x02 /* Select primary or secondary redundant clock */ #define PRIMARY_CLOCK 0x00 #define SECONDARY_CLOCK 0x01 /* CLOCK TRANSMISSION DEFINE */ #define CLK_8kHz 0xff #define CLK_16_384MHz 0xfb #define CLK_1_544MHz 0x00 #define CLK_2_048MHz 0x01 #define CLK_4_096MHz 0x02 #define CLK_6_312MHz 0x03 #define CLK_8_192MHz 0x04 #define CLK_19_440MHz 0x06 #define CLK_8_592MHz 0x08 #define CLK_11_184MHz 0x09 #define CLK_34_368MHz 0x0b #define CLK_44_736MHz 0x0a /* RECEIVED REFERENCE */ #define AMC_B1 0 #define AMC_B2 1 /* HARDWARE SWITCHING DEFINE */ #define HW_ENABLE 0x80 #define HW_DISABLE 0x00 /* HARDWARE SWITCHING MODE DEFINE */ #define PLL_HOLDOVER 0x40 #define LOST_CLOCK 0x00 /* ALARMS DEFINE */ #define UNLOCK_MASK 0x10 #define HOLDOVER_MASK 0x20 #define SEC_LOST_MASK 0x40 #define PRI_LOST_MASK 0x80 /* INTERRUPT CAUSE DEFINE */ #define PRI_LOS_01_MASK 0x01 #define PRI_LOS_10_MASK 0x02 #define SEC_LOS_01_MASK 0x04 #define SEC_LOS_10_MASK 0x08 #define HOLDOVER_01_MASK 0x10 #define HOLDOVER_10_MASK 0x20 #define UNLOCK_01_MASK 0x40 #define UNLOCK_10_MASK 0x80 struct tlclk_alarms { __u32 lost_clocks; __u32 lost_primary_clock; __u32 lost_secondary_clock; __u32 primary_clock_back; __u32 secondary_clock_back; __u32 switchover_primary; __u32 switchover_secondary; __u32 pll_holdover; __u32 pll_end_holdover; __u32 pll_lost_sync; __u32 pll_sync; }; /* Telecom clock I/O register definition */ #define TLCLK_BASE 0xa08 #define TLCLK_REG0 TLCLK_BASE #define TLCLK_REG1 (TLCLK_BASE+1) #define TLCLK_REG2 (TLCLK_BASE+2) #define TLCLK_REG3 (TLCLK_BASE+3) #define TLCLK_REG4 (TLCLK_BASE+4) #define TLCLK_REG5 (TLCLK_BASE+5) #define TLCLK_REG6 (TLCLK_BASE+6) #define TLCLK_REG7 (TLCLK_BASE+7) #define SET_PORT_BITS(port, mask, val) outb(((inb(port) & mask) | val), port) /* 0 = Dynamic allocation of the major device number */ #define TLCLK_MAJOR 0 /* sysfs interface definition: Upon loading the driver will create a sysfs directory under /sys/devices/platform/telco_clock. This directory exports the following interfaces. There operation is documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4. alarms : current_ref : received_ref_clk3a : received_ref_clk3b : enable_clk3a_output : enable_clk3b_output : enable_clka0_output : enable_clka1_output : enable_clkb0_output : enable_clkb1_output : filter_select : hardware_switching : hardware_switching_mode : telclock_version : mode_select : refalign : reset : select_amcb1_transmit_clock : select_amcb2_transmit_clock : select_redundant_clock : select_ref_frequency : All sysfs interfaces are integers in hex format, i.e echo 99 > refalign has the same effect as echo 0x99 > refalign. */ static unsigned int telclk_interrupt; static int int_events; /* Event that generate a interrupt */ static int got_event; /* if events processing have been done */ static void switchover_timeout(unsigned long data); static struct timer_list switchover_timer = TIMER_INITIALIZER(switchover_timeout , 0, 0); static unsigned long tlclk_timer_data; static struct tlclk_alarms *alarm_events; static DEFINE_SPINLOCK(event_lock); static int tlclk_major = TLCLK_MAJOR; static irqreturn_t tlclk_interrupt(int irq, void *dev_id); static DECLARE_WAIT_QUEUE_HEAD(wq); static unsigned long useflags; static DEFINE_MUTEX(tlclk_mutex); static int tlclk_open(struct inode *inode, struct file *filp) { int result; mutex_lock(&tlclk_mutex); if (test_and_set_bit(0, &useflags)) { result = -EBUSY; /* this legacy device is always one per system and it doesn't * know how to handle multiple concurrent clients. */ goto out; } /* Make sure there is no interrupt pending while * initialising interrupt handler */ inb(TLCLK_REG6); /* This device is wired through the FPGA IO space of the ATCA blade * we can't share this IRQ */ result = request_irq(telclk_interrupt, &tlclk_interrupt, 0, "telco_clock", tlclk_interrupt); if (result == -EBUSY) printk(KERN_ERR "tlclk: Interrupt can't be reserved.\n"); else inb(TLCLK_REG6); /* Clear interrupt events */ out: mutex_unlock(&tlclk_mutex); return result; } static int tlclk_release(struct inode *inode, struct file *filp) { free_irq(telclk_interrupt, tlclk_interrupt); clear_bit(0, &useflags); return 0; } static ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { if (count < sizeof(struct tlclk_alarms)) return -EIO; if (mutex_lock_interruptible(&tlclk_mutex)) return -EINTR; wait_event_interruptible(wq, got_event); if (copy_to_user(buf, alarm_events, sizeof(struct tlclk_alarms))) { mutex_unlock(&tlclk_mutex); return -EFAULT; } memset(alarm_events, 0, sizeof(struct tlclk_alarms)); got_event = 0; mutex_unlock(&tlclk_mutex); return sizeof(struct tlclk_alarms); } static const struct file_operations tlclk_fops = { .read = tlclk_read, .open = tlclk_open, .release = tlclk_release, .llseek = noop_llseek, }; static struct miscdevice tlclk_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "telco_clock", .fops = &tlclk_fops, }; static ssize_t show_current_ref(struct device *d, struct device_attribute *attr, char *buf) { unsigned long ret_val; unsigned long flags; spin_lock_irqsave(&event_lock, flags); ret_val = ((inb(TLCLK_REG1) & 0x08) >> 3); spin_unlock_irqrestore(&event_lock, flags); return sprintf(buf, "0x%lX\n", ret_val); } static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL); static ssize_t show_telclock_version(struct device *d, struct device_attribute *attr, char *buf) { unsigned long ret_val; unsigned long flags; spin_lock_irqsave(&event_lock, flags); ret_val = inb(TLCLK_REG5); spin_unlock_irqrestore(&event_lock, flags); return sprintf(buf, "0x%lX\n", ret_val); } static DEVICE_ATTR(telclock_version, S_IRUGO, show_telclock_version, NULL); static ssize_t show_alarms(struct device *d, struct device_attribute *attr, char *buf) { unsigned long ret_val; unsigned long flags; spin_lock_irqsave(&event_lock, flags); ret_val = (inb(TLCLK_REG2) & 0xf0); spin_unlock_irqrestore(&event_lock, flags); return sprintf(buf, "0x%lX\n", ret_val); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static ssize_t store_received_ref_clk3a(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long tmp; unsigned char val; unsigned long flags; sscanf(buf, "%lX", &tmp); dev_dbg(d, ": tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG1, 0xef, val); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(received_ref_clk3a, (S_IWUSR|S_IWGRP), NULL, store_received_ref_clk3a); static ssize_t store_received_ref_clk3b(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long tmp; unsigned char val; unsigned long flags; sscanf(buf, "%lX", &tmp); dev_dbg(d, ": tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG1, 0xdf, val << 1); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(received_ref_clk3b, (S_IWUSR|S_IWGRP), NULL, store_received_ref_clk3b); static ssize_t store_enable_clk3b_output(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long tmp; unsigned char val; unsigned long flags; sscanf(buf, "%lX", &tmp); dev_dbg(d, ": tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG3, 0x7f, val << 7); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(enable_clk3b_output, (S_IWUSR|S_IWGRP), NULL, store_enable_clk3b_output); static ssize_t store_enable_clk3a_output(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long flags; unsigned long tmp; unsigned char val; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG3, 0xbf, val << 6); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(enable_clk3a_output, (S_IWUSR|S_IWGRP), NULL, store_enable_clk3a_output); static ssize_t store_enable_clkb1_output(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long flags; unsigned long tmp; unsigned char val; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG2, 0xf7, val << 3); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(enable_clkb1_output, (S_IWUSR|S_IWGRP), NULL, store_enable_clkb1_output); static ssize_t store_enable_clka1_output(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long flags; unsigned long tmp; unsigned char val; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG2, 0xfb, val << 2); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(enable_clka1_output, (S_IWUSR|S_IWGRP), NULL, store_enable_clka1_output); static ssize_t store_enable_clkb0_output(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long flags; unsigned long tmp; unsigned char val; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG2, 0xfd, val << 1); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(enable_clkb0_output, (S_IWUSR|S_IWGRP), NULL, store_enable_clkb0_output); static ssize_t store_enable_clka0_output(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long flags; unsigned long tmp; unsigned char val; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG2, 0xfe, val); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(enable_clka0_output, (S_IWUSR|S_IWGRP), NULL, store_enable_clka0_output); static ssize_t store_select_amcb2_transmit_clock(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long flags; unsigned long tmp; unsigned char val; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) { SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x28); SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val); } else if (val >= CLK_8_592MHz) { SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38); switch (val) { case CLK_8_592MHz: SET_PORT_BITS(TLCLK_REG0, 0xfc, 2); break; case CLK_11_184MHz: SET_PORT_BITS(TLCLK_REG0, 0xfc, 0); break; case CLK_34_368MHz: SET_PORT_BITS(TLCLK_REG0, 0xfc, 3); break; case CLK_44_736MHz: SET_PORT_BITS(TLCLK_REG0, 0xfc, 1); break; } } else SET_PORT_BITS(TLCLK_REG3, 0xc7, val << 3); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(select_amcb2_transmit_clock, (S_IWUSR|S_IWGRP), NULL, store_select_amcb2_transmit_clock); static ssize_t store_select_amcb1_transmit_clock(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long tmp; unsigned char val; unsigned long flags; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) { SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x5); SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val); } else if (val >= CLK_8_592MHz) { SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x7); switch (val) { case CLK_8_592MHz: SET_PORT_BITS(TLCLK_REG0, 0xfc, 2); break; case CLK_11_184MHz: SET_PORT_BITS(TLCLK_REG0, 0xfc, 0); break; case CLK_34_368MHz: SET_PORT_BITS(TLCLK_REG0, 0xfc, 3); break; case CLK_44_736MHz: SET_PORT_BITS(TLCLK_REG0, 0xfc, 1); break; } } else SET_PORT_BITS(TLCLK_REG3, 0xf8, val); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(select_amcb1_transmit_clock, (S_IWUSR|S_IWGRP), NULL, store_select_amcb1_transmit_clock); static ssize_t store_select_redundant_clock(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long tmp; unsigned char val; unsigned long flags; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG1, 0xfe, val); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(select_redundant_clock, (S_IWUSR|S_IWGRP), NULL, store_select_redundant_clock); static ssize_t store_select_ref_frequency(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long tmp; unsigned char val; unsigned long flags; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG1, 0xfd, val); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(select_ref_frequency, (S_IWUSR|S_IWGRP), NULL, store_select_ref_frequency); static ssize_t store_filter_select(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long tmp; unsigned char val; unsigned long flags; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG0, 0xfb, val); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(filter_select, (S_IWUSR|S_IWGRP), NULL, store_filter_select); static ssize_t store_hardware_switching_mode(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long tmp; unsigned char val; unsigned long flags; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG0, 0xbf, val); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(hardware_switching_mode, (S_IWUSR|S_IWGRP), NULL, store_hardware_switching_mode); static ssize_t store_hardware_switching(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long tmp; unsigned char val; unsigned long flags; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG0, 0x7f, val); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(hardware_switching, (S_IWUSR|S_IWGRP), NULL, store_hardware_switching); static ssize_t store_refalign (struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long tmp; unsigned long flags; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG0, 0xf7, 0); SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08); SET_PORT_BITS(TLCLK_REG0, 0xf7, 0); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(refalign, (S_IWUSR|S_IWGRP), NULL, store_refalign); static ssize_t store_mode_select (struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long tmp; unsigned char val; unsigned long flags; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG0, 0xcf, val); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(mode_select, (S_IWUSR|S_IWGRP), NULL, store_mode_select); static ssize_t store_reset (struct device *d, struct device_attribute *attr, const char *buf, size_t count) { unsigned long tmp; unsigned char val; unsigned long flags; sscanf(buf, "%lX", &tmp); dev_dbg(d, "tmp = 0x%lX\n", tmp); val = (unsigned char)tmp; spin_lock_irqsave(&event_lock, flags); SET_PORT_BITS(TLCLK_REG4, 0xfd, val); spin_unlock_irqrestore(&event_lock, flags); return strnlen(buf, count); } static DEVICE_ATTR(reset, (S_IWUSR|S_IWGRP), NULL, store_reset); static struct attribute *tlclk_sysfs_entries[] = { &dev_attr_current_ref.attr, &dev_attr_telclock_version.attr, &dev_attr_alarms.attr, &dev_attr_received_ref_clk3a.attr, &dev_attr_received_ref_clk3b.attr, &dev_attr_enable_clk3a_output.attr, &dev_attr_enable_clk3b_output.attr, &dev_attr_enable_clkb1_output.attr, &dev_attr_enable_clka1_output.attr, &dev_attr_enable_clkb0_output.attr, &dev_attr_enable_clka0_output.attr, &dev_attr_select_amcb1_transmit_clock.attr, &dev_attr_select_amcb2_transmit_clock.attr, &dev_attr_select_redundant_clock.attr, &dev_attr_select_ref_frequency.attr, &dev_attr_filter_select.attr, &dev_attr_hardware_switching_mode.attr, &dev_attr_hardware_switching.attr, &dev_attr_refalign.attr, &dev_attr_mode_select.attr, &dev_attr_reset.attr, NULL }; static struct attribute_group tlclk_attribute_group = { .name = NULL, /* put in device directory */ .attrs = tlclk_sysfs_entries, }; static struct platform_device *tlclk_device; static int __init tlclk_init(void) { int ret; ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops); if (ret < 0) { printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); return ret; } tlclk_major = ret; alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); if (!alarm_events) { ret = -ENOMEM; goto out1; } /* Read telecom clock IRQ number (Set by BIOS) */ if (!request_region(TLCLK_BASE, 8, "telco_clock")) { printk(KERN_ERR "tlclk: request_region 0x%X failed.\n", TLCLK_BASE); ret = -EBUSY; goto out2; } telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */ printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n", telclk_interrupt); ret = -ENXIO; goto out3; } init_timer(&switchover_timer); ret = misc_register(&tlclk_miscdev); if (ret < 0) { printk(KERN_ERR "tlclk: misc_register returns %d.\n", ret); goto out3; } tlclk_device = platform_device_register_simple("telco_clock", -1, NULL, 0); if (IS_ERR(tlclk_device)) { printk(KERN_ERR "tlclk: platform_device_register failed.\n"); ret = PTR_ERR(tlclk_device); goto out4; } ret = sysfs_create_group(&tlclk_device->dev.kobj, &tlclk_attribute_group); if (ret) { printk(KERN_ERR "tlclk: failed to create sysfs device attributes.\n"); goto out5; } return 0; out5: platform_device_unregister(tlclk_device); out4: misc_deregister(&tlclk_miscdev); out3: release_region(TLCLK_BASE, 8); out2: kfree(alarm_events); out1: unregister_chrdev(tlclk_major, "telco_clock"); return ret; } static void __exit tlclk_cleanup(void) { sysfs_remove_group(&tlclk_device->dev.kobj, &tlclk_attribute_group); platform_device_unregister(tlclk_device); misc_deregister(&tlclk_miscdev); unregister_chrdev(tlclk_major, "telco_clock"); release_region(TLCLK_BASE, 8); del_timer_sync(&switchover_timer); kfree(alarm_events); } static void switchover_timeout(unsigned long data) { unsigned long flags = *(unsigned long *) data; if ((flags & 1)) { if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08)) alarm_events->switchover_primary++; } else { if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08)) alarm_events->switchover_secondary++; } /* Alarm processing is done, wake up read task */ del_timer(&switchover_timer); got_event = 1; wake_up(&wq); } static irqreturn_t tlclk_interrupt(int irq, void *dev_id) { unsigned long flags; spin_lock_irqsave(&event_lock, flags); /* Read and clear interrupt events */ int_events = inb(TLCLK_REG6); /* Primary_Los changed from 0 to 1 ? */ if (int_events & PRI_LOS_01_MASK) { if (inb(TLCLK_REG2) & SEC_LOST_MASK) alarm_events->lost_clocks++; else alarm_events->lost_primary_clock++; } /* Primary_Los changed from 1 to 0 ? */ if (int_events & PRI_LOS_10_MASK) { alarm_events->primary_clock_back++; SET_PORT_BITS(TLCLK_REG1, 0xFE, 1); } /* Secondary_Los changed from 0 to 1 ? */ if (int_events & SEC_LOS_01_MASK) { if (inb(TLCLK_REG2) & PRI_LOST_MASK) alarm_events->lost_clocks++; else alarm_events->lost_secondary_clock++; } /* Secondary_Los changed from 1 to 0 ? */ if (int_events & SEC_LOS_10_MASK) { alarm_events->secondary_clock_back++; SET_PORT_BITS(TLCLK_REG1, 0xFE, 0); } if (int_events & HOLDOVER_10_MASK) alarm_events->pll_end_holdover++; if (int_events & UNLOCK_01_MASK) alarm_events->pll_lost_sync++; if (int_events & UNLOCK_10_MASK) alarm_events->pll_sync++; /* Holdover changed from 0 to 1 ? */ if (int_events & HOLDOVER_01_MASK) { alarm_events->pll_holdover++; /* TIMEOUT in ~10ms */ switchover_timer.expires = jiffies + msecs_to_jiffies(10); tlclk_timer_data = inb(TLCLK_REG1); switchover_timer.data = (unsigned long) &tlclk_timer_data; mod_timer(&switchover_timer, switchover_timer.expires); } else { got_event = 1; wake_up(&wq); } spin_unlock_irqrestore(&event_lock, flags); return IRQ_HANDLED; } module_init(tlclk_init); module_exit(tlclk_cleanup);
gpl-2.0
ferhung/kernel_mtk
drivers/net/ethernet/sun/niu.c
2083
234892
/* niu.c: Neptune ethernet driver. * * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/mii.h> #include <linux/if.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <linux/in.h> #include <linux/ipv6.h> #include <linux/log2.h> #include <linux/jiffies.h> #include <linux/crc32.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/of_device.h> #include "niu.h" #define DRV_MODULE_NAME "niu" #define DRV_MODULE_VERSION "1.1" #define DRV_MODULE_RELDATE "Apr 22, 2010" static char version[] = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("NIU ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); #ifndef readq static u64 readq(void __iomem *reg) { return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); } static void writeq(u64 val, void __iomem *reg) { writel(val & 0xffffffff, reg); writel(val >> 32, reg + 0x4UL); } #endif static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, {} }; MODULE_DEVICE_TABLE(pci, niu_pci_tbl); #define NIU_TX_TIMEOUT (5 * HZ) #define nr64(reg) readq(np->regs + (reg)) #define nw64(reg, val) writeq((val), np->regs + (reg)) #define nr64_mac(reg) readq(np->mac_regs + (reg)) #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) static int niu_debug; static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "NIU debug level"); #define niu_lock_parent(np, flags) \ spin_lock_irqsave(&np->parent->lock, flags) #define niu_unlock_parent(np, flags) \ spin_unlock_irqrestore(&np->parent->lock, flags) static int serdes_init_10g_serdes(struct niu *np); static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, u64 bits, int limit, int delay) { while (--limit >= 0) { u64 val = nr64_mac(reg); if (!(val & bits)) break; udelay(delay); } if (limit < 0) return -ENODEV; return 0; } static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, u64 bits, int limit, int delay, const char *reg_name) { int err; nw64_mac(reg, bits); err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); if (err) netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", (unsigned long long)bits, reg_name, (unsigned long long)nr64_mac(reg)); return err; } #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ }) static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, u64 bits, int limit, int delay) { while (--limit >= 0) { u64 val = nr64_ipp(reg); if (!(val & bits)) break; udelay(delay); } if (limit < 0) return -ENODEV; return 0; } static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, u64 bits, int limit, int delay, const char *reg_name) { int err; u64 val; val = nr64_ipp(reg); val |= bits; nw64_ipp(reg, val); err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); if (err) netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", (unsigned long long)bits, reg_name, (unsigned long long)nr64_ipp(reg)); return err; } #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ }) static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, u64 bits, int limit, int delay) { while (--limit >= 0) { u64 val = nr64(reg); if (!(val & bits)) break; udelay(delay); } if (limit < 0) return -ENODEV; return 0; } #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ }) static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, u64 bits, int limit, int delay, const char *reg_name) { int err; nw64(reg, bits); err = __niu_wait_bits_clear(np, reg, bits, limit, delay); if (err) netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", (unsigned long long)bits, reg_name, (unsigned long long)nr64(reg)); return err; } #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ }) static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) { u64 val = (u64) lp->timer; if (on) val |= LDG_IMGMT_ARM; nw64(LDG_IMGMT(lp->ldg_num), val); } static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) { unsigned long mask_reg, bits; u64 val; if (ldn < 0 || ldn > LDN_MAX) return -EINVAL; if (ldn < 64) { mask_reg = LD_IM0(ldn); bits = LD_IM0_MASK; } else { mask_reg = LD_IM1(ldn - 64); bits = LD_IM1_MASK; } val = nr64(mask_reg); if (on) val &= ~bits; else val |= bits; nw64(mask_reg, val); return 0; } static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) { struct niu_parent *parent = np->parent; int i; for (i = 0; i <= LDN_MAX; i++) { int err; if (parent->ldg_map[i] != lp->ldg_num) continue; err = niu_ldn_irq_enable(np, i, on); if (err) return err; } return 0; } static int niu_enable_interrupts(struct niu *np, int on) { int i; for (i = 0; i < np->num_ldg; i++) { struct niu_ldg *lp = &np->ldg[i]; int err; err = niu_enable_ldn_in_ldg(np, lp, on); if (err) return err; } for (i = 0; i < np->num_ldg; i++) niu_ldg_rearm(np, &np->ldg[i], on); return 0; } static u32 phy_encode(u32 type, int port) { return type << (port * 2); } static u32 phy_decode(u32 val, int port) { return (val >> (port * 2)) & PORT_TYPE_MASK; } static int mdio_wait(struct niu *np) { int limit = 1000; u64 val; while (--limit > 0) { val = nr64(MIF_FRAME_OUTPUT); if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) return val & MIF_FRAME_OUTPUT_DATA; udelay(10); } return -ENODEV; } static int mdio_read(struct niu *np, int port, int dev, int reg) { int err; nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); err = mdio_wait(np); if (err < 0) return err; nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); return mdio_wait(np); } static int mdio_write(struct niu *np, int port, int dev, int reg, int data) { int err; nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); err = mdio_wait(np); if (err < 0) return err; nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); err = mdio_wait(np); if (err < 0) return err; return 0; } static int mii_read(struct niu *np, int port, int reg) { nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); return mdio_wait(np); } static int mii_write(struct niu *np, int port, int reg, int data) { int err; nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); err = mdio_wait(np); if (err < 0) return err; return 0; } static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) { int err; err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TX_CFG_L(channel), val & 0xffff); if (!err) err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TX_CFG_H(channel), val >> 16); return err; } static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) { int err; err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_RX_CFG_L(channel), val & 0xffff); if (!err) err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_RX_CFG_H(channel), val >> 16); return err; } /* Mode is always 10G fiber. */ static int serdes_init_niu_10g_fiber(struct niu *np) { struct niu_link_config *lp = &np->link_config; u32 tx_cfg, rx_cfg; unsigned long i; tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | PLL_RX_CFG_EQ_LP_ADAPTIVE); if (lp->loopback_mode == LOOPBACK_PHY) { u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TEST_CFG_L, test_cfg); tx_cfg |= PLL_TX_CFG_ENTEST; rx_cfg |= PLL_RX_CFG_ENTEST; } /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { int err = esr2_set_tx_cfg(np, i, tx_cfg); if (err) return err; } for (i = 0; i < 4; i++) { int err = esr2_set_rx_cfg(np, i, rx_cfg); if (err) return err; } return 0; } static int serdes_init_niu_1g_serdes(struct niu *np) { struct niu_link_config *lp = &np->link_config; u16 pll_cfg, pll_sts; int max_retry = 100; u64 uninitialized_var(sig), mask, val; u32 tx_cfg, rx_cfg; unsigned long i; int err; tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV | PLL_TX_CFG_RATE_HALF); rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | PLL_RX_CFG_RATE_HALF); if (np->port == 0) rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE; if (lp->loopback_mode == LOOPBACK_PHY) { u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TEST_CFG_L, test_cfg); tx_cfg |= PLL_TX_CFG_ENTEST; rx_cfg |= PLL_RX_CFG_ENTEST; } /* Initialize PLL for 1G */ pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X); err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_CFG_L, pll_cfg); if (err) { netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", np->port, __func__); return err; } pll_sts = PLL_CFG_ENPLL; err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_STS_L, pll_sts); if (err) { netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", np->port, __func__); return err; } udelay(200); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { err = esr2_set_tx_cfg(np, i, tx_cfg); if (err) return err; } for (i = 0; i < 4; i++) { err = esr2_set_rx_cfg(np, i, rx_cfg); if (err) return err; } switch (np->port) { case 0: val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); mask = val; break; case 1: val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); mask = val; break; default: return -EINVAL; } while (max_retry--) { sig = nr64(ESR_INT_SIGNALS); if ((sig & mask) == val) break; mdelay(500); } if ((sig & mask) != val) { netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", np->port, (int)(sig & mask), (int)val); return -ENODEV; } return 0; } static int serdes_init_niu_10g_serdes(struct niu *np) { struct niu_link_config *lp = &np->link_config; u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; int max_retry = 100; u64 uninitialized_var(sig), mask, val; unsigned long i; int err; tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | PLL_RX_CFG_EQ_LP_ADAPTIVE); if (lp->loopback_mode == LOOPBACK_PHY) { u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_TEST_CFG_L, test_cfg); tx_cfg |= PLL_TX_CFG_ENTEST; rx_cfg |= PLL_RX_CFG_ENTEST; } /* Initialize PLL for 10G */ pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X); err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff); if (err) { netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", np->port, __func__); return err; } pll_sts = PLL_CFG_ENPLL; err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, ESR2_TI_PLL_STS_L, pll_sts & 0xffff); if (err) { netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", np->port, __func__); return err; } udelay(200); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { err = esr2_set_tx_cfg(np, i, tx_cfg); if (err) return err; } for (i = 0; i < 4; i++) { err = esr2_set_rx_cfg(np, i, rx_cfg); if (err) return err; } /* check if serdes is ready */ switch (np->port) { case 0: mask = ESR_INT_SIGNALS_P0_BITS; val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0 | ESR_INT_XSRDY_P0 | ESR_INT_XDP_P0_CH3 | ESR_INT_XDP_P0_CH2 | ESR_INT_XDP_P0_CH1 | ESR_INT_XDP_P0_CH0); break; case 1: mask = ESR_INT_SIGNALS_P1_BITS; val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1 | ESR_INT_XSRDY_P1 | ESR_INT_XDP_P1_CH3 | ESR_INT_XDP_P1_CH2 | ESR_INT_XDP_P1_CH1 | ESR_INT_XDP_P1_CH0); break; default: return -EINVAL; } while (max_retry--) { sig = nr64(ESR_INT_SIGNALS); if ((sig & mask) == val) break; mdelay(500); } if ((sig & mask) != val) { pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n", np->port, (int)(sig & mask), (int)val); /* 10G failed, try initializing at 1G */ err = serdes_init_niu_1g_serdes(np); if (!err) { np->flags &= ~NIU_FLAGS_10G; np->mac_xcvr = MAC_XCVR_PCS; } else { netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", np->port); return -ENODEV; } } return 0; } static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) { int err; err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); if (err >= 0) { *val = (err & 0xffff); err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_H(chan)); if (err >= 0) *val |= ((err & 0xffff) << 16); err = 0; } return err; } static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) { int err; err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_GLUE_CTRL0_L(chan)); if (err >= 0) { *val = (err & 0xffff); err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_GLUE_CTRL0_H(chan)); if (err >= 0) { *val |= ((err & 0xffff) << 16); err = 0; } } return err; } static int esr_read_reset(struct niu *np, u32 *val) { int err; err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_L); if (err >= 0) { *val = (err & 0xffff); err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_H); if (err >= 0) { *val |= ((err & 0xffff) << 16); err = 0; } } return err; } static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) { int err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan), val & 0xffff); if (!err) err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_H(chan), (val >> 16)); return err; } static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) { int err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_GLUE_CTRL0_L(chan), val & 0xffff); if (!err) err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_GLUE_CTRL0_H(chan), (val >> 16)); return err; } static int esr_reset(struct niu *np) { u32 uninitialized_var(reset); int err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_L, 0x0000); if (err) return err; err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_H, 0xffff); if (err) return err; udelay(200); err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_L, 0xffff); if (err) return err; udelay(200); err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_RESET_CTRL_H, 0x0000); if (err) return err; udelay(200); err = esr_read_reset(np, &reset); if (err) return err; if (reset != 0) { netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n", np->port, reset); return -ENODEV; } return 0; } static int serdes_init_10g(struct niu *np) { struct niu_link_config *lp = &np->link_config; unsigned long ctrl_reg, test_cfg_reg, i; u64 ctrl_val, test_cfg_val, sig, mask, val; int err; switch (np->port) { case 0: ctrl_reg = ENET_SERDES_0_CTRL_CFG; test_cfg_reg = ENET_SERDES_0_TEST_CFG; break; case 1: ctrl_reg = ENET_SERDES_1_CTRL_CFG; test_cfg_reg = ENET_SERDES_1_TEST_CFG; break; default: return -EINVAL; } ctrl_val = (ENET_SERDES_CTRL_SDET_0 | ENET_SERDES_CTRL_SDET_1 | ENET_SERDES_CTRL_SDET_2 | ENET_SERDES_CTRL_SDET_3 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); test_cfg_val = 0; if (lp->loopback_mode == LOOPBACK_PHY) { test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_0_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_1_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_2_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_3_SHIFT)); } nw64(ctrl_reg, ctrl_val); nw64(test_cfg_reg, test_cfg_val); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { u32 rxtx_ctrl, glue0; err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); if (err) return err; err = esr_read_glue0(np, i, &glue0); if (err) return err; rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); glue0 &= ~(ESR_GLUE_CTRL0_SRATE | ESR_GLUE_CTRL0_THCNT | ESR_GLUE_CTRL0_BLTIME); glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | (BLTIME_300_CYCLES << ESR_GLUE_CTRL0_BLTIME_SHIFT)); err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); if (err) return err; err = esr_write_glue0(np, i, glue0); if (err) return err; } err = esr_reset(np); if (err) return err; sig = nr64(ESR_INT_SIGNALS); switch (np->port) { case 0: mask = ESR_INT_SIGNALS_P0_BITS; val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0 | ESR_INT_XSRDY_P0 | ESR_INT_XDP_P0_CH3 | ESR_INT_XDP_P0_CH2 | ESR_INT_XDP_P0_CH1 | ESR_INT_XDP_P0_CH0); break; case 1: mask = ESR_INT_SIGNALS_P1_BITS; val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1 | ESR_INT_XSRDY_P1 | ESR_INT_XDP_P1_CH3 | ESR_INT_XDP_P1_CH2 | ESR_INT_XDP_P1_CH1 | ESR_INT_XDP_P1_CH0); break; default: return -EINVAL; } if ((sig & mask) != val) { if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; return 0; } netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", np->port, (int)(sig & mask), (int)val); return -ENODEV; } if (np->flags & NIU_FLAGS_HOTPLUG_PHY) np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; return 0; } static int serdes_init_1g(struct niu *np) { u64 val; val = nr64(ENET_SERDES_1_PLL_CFG); val &= ~ENET_SERDES_PLL_FBDIV2; switch (np->port) { case 0: val |= ENET_SERDES_PLL_HRATE0; break; case 1: val |= ENET_SERDES_PLL_HRATE1; break; case 2: val |= ENET_SERDES_PLL_HRATE2; break; case 3: val |= ENET_SERDES_PLL_HRATE3; break; default: return -EINVAL; } nw64(ENET_SERDES_1_PLL_CFG, val); return 0; } static int serdes_init_1g_serdes(struct niu *np) { struct niu_link_config *lp = &np->link_config; unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; u64 ctrl_val, test_cfg_val, sig, mask, val; int err; u64 reset_val, val_rd; val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 | ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 | ENET_SERDES_PLL_FBDIV0; switch (np->port) { case 0: reset_val = ENET_SERDES_RESET_0; ctrl_reg = ENET_SERDES_0_CTRL_CFG; test_cfg_reg = ENET_SERDES_0_TEST_CFG; pll_cfg = ENET_SERDES_0_PLL_CFG; break; case 1: reset_val = ENET_SERDES_RESET_1; ctrl_reg = ENET_SERDES_1_CTRL_CFG; test_cfg_reg = ENET_SERDES_1_TEST_CFG; pll_cfg = ENET_SERDES_1_PLL_CFG; break; default: return -EINVAL; } ctrl_val = (ENET_SERDES_CTRL_SDET_0 | ENET_SERDES_CTRL_SDET_1 | ENET_SERDES_CTRL_SDET_2 | ENET_SERDES_CTRL_SDET_3 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); test_cfg_val = 0; if (lp->loopback_mode == LOOPBACK_PHY) { test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_0_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_1_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_2_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_3_SHIFT)); } nw64(ENET_SERDES_RESET, reset_val); mdelay(20); val_rd = nr64(ENET_SERDES_RESET); val_rd &= ~reset_val; nw64(pll_cfg, val); nw64(ctrl_reg, ctrl_val); nw64(test_cfg_reg, test_cfg_val); nw64(ENET_SERDES_RESET, val_rd); mdelay(2000); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { u32 rxtx_ctrl, glue0; err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); if (err) return err; err = esr_read_glue0(np, i, &glue0); if (err) return err; rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); glue0 &= ~(ESR_GLUE_CTRL0_SRATE | ESR_GLUE_CTRL0_THCNT | ESR_GLUE_CTRL0_BLTIME); glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | (BLTIME_300_CYCLES << ESR_GLUE_CTRL0_BLTIME_SHIFT)); err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); if (err) return err; err = esr_write_glue0(np, i, glue0); if (err) return err; } sig = nr64(ESR_INT_SIGNALS); switch (np->port) { case 0: val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0); mask = val; break; case 1: val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1); mask = val; break; default: return -EINVAL; } if ((sig & mask) != val) { netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", np->port, (int)(sig & mask), (int)val); return -ENODEV; } return 0; } static int link_status_1g_serdes(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; int link_up; u64 val; u16 current_speed; unsigned long flags; u8 current_duplex; link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; spin_lock_irqsave(&np->lock, flags); val = nr64_pcs(PCS_MII_STAT); if (val & PCS_MII_STAT_LINK_STATUS) { link_up = 1; current_speed = SPEED_1000; current_duplex = DUPLEX_FULL; } lp->active_speed = current_speed; lp->active_duplex = current_duplex; spin_unlock_irqrestore(&np->lock, flags); *link_up_p = link_up; return 0; } static int link_status_10g_serdes(struct niu *np, int *link_up_p) { unsigned long flags; struct niu_link_config *lp = &np->link_config; int link_up = 0; int link_ok = 1; u64 val, val2; u16 current_speed; u8 current_duplex; if (!(np->flags & NIU_FLAGS_10G)) return link_status_1g_serdes(np, link_up_p); current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; spin_lock_irqsave(&np->lock, flags); val = nr64_xpcs(XPCS_STATUS(0)); val2 = nr64_mac(XMAC_INTER2); if (val2 & 0x01000000) link_ok = 0; if ((val & 0x1000ULL) && link_ok) { link_up = 1; current_speed = SPEED_10000; current_duplex = DUPLEX_FULL; } lp->active_speed = current_speed; lp->active_duplex = current_duplex; spin_unlock_irqrestore(&np->lock, flags); *link_up_p = link_up; return 0; } static int link_status_mii(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; int err; int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus; int supported, advertising, active_speed, active_duplex; err = mii_read(np, np->phy_addr, MII_BMCR); if (unlikely(err < 0)) return err; bmcr = err; err = mii_read(np, np->phy_addr, MII_BMSR); if (unlikely(err < 0)) return err; bmsr = err; err = mii_read(np, np->phy_addr, MII_ADVERTISE); if (unlikely(err < 0)) return err; advert = err; err = mii_read(np, np->phy_addr, MII_LPA); if (unlikely(err < 0)) return err; lpa = err; if (likely(bmsr & BMSR_ESTATEN)) { err = mii_read(np, np->phy_addr, MII_ESTATUS); if (unlikely(err < 0)) return err; estatus = err; err = mii_read(np, np->phy_addr, MII_CTRL1000); if (unlikely(err < 0)) return err; ctrl1000 = err; err = mii_read(np, np->phy_addr, MII_STAT1000); if (unlikely(err < 0)) return err; stat1000 = err; } else estatus = ctrl1000 = stat1000 = 0; supported = 0; if (bmsr & BMSR_ANEGCAPABLE) supported |= SUPPORTED_Autoneg; if (bmsr & BMSR_10HALF) supported |= SUPPORTED_10baseT_Half; if (bmsr & BMSR_10FULL) supported |= SUPPORTED_10baseT_Full; if (bmsr & BMSR_100HALF) supported |= SUPPORTED_100baseT_Half; if (bmsr & BMSR_100FULL) supported |= SUPPORTED_100baseT_Full; if (estatus & ESTATUS_1000_THALF) supported |= SUPPORTED_1000baseT_Half; if (estatus & ESTATUS_1000_TFULL) supported |= SUPPORTED_1000baseT_Full; lp->supported = supported; advertising = mii_adv_to_ethtool_adv_t(advert); advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmcr & BMCR_ANENABLE) { int neg, neg1000; lp->active_autoneg = 1; advertising |= ADVERTISED_Autoneg; neg = advert & lpa; neg1000 = (ctrl1000 << 2) & stat1000; if (neg1000 & (LPA_1000FULL | LPA_1000HALF)) active_speed = SPEED_1000; else if (neg & LPA_100) active_speed = SPEED_100; else if (neg & (LPA_10HALF | LPA_10FULL)) active_speed = SPEED_10; else active_speed = SPEED_INVALID; if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX)) active_duplex = DUPLEX_FULL; else if (active_speed != SPEED_INVALID) active_duplex = DUPLEX_HALF; else active_duplex = DUPLEX_INVALID; } else { lp->active_autoneg = 0; if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100)) active_speed = SPEED_1000; else if (bmcr & BMCR_SPEED100) active_speed = SPEED_100; else active_speed = SPEED_10; if (bmcr & BMCR_FULLDPLX) active_duplex = DUPLEX_FULL; else active_duplex = DUPLEX_HALF; } lp->active_advertising = advertising; lp->active_speed = active_speed; lp->active_duplex = active_duplex; *link_up_p = !!(bmsr & BMSR_LSTATUS); return 0; } static int link_status_1g_rgmii(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; u16 current_speed, bmsr; unsigned long flags; u8 current_duplex; int err, link_up; link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; spin_lock_irqsave(&np->lock, flags); err = -EINVAL; err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) goto out; bmsr = err; if (bmsr & BMSR_LSTATUS) { u16 adv, lpa; err = mii_read(np, np->phy_addr, MII_ADVERTISE); if (err < 0) goto out; adv = err; err = mii_read(np, np->phy_addr, MII_LPA); if (err < 0) goto out; lpa = err; err = mii_read(np, np->phy_addr, MII_ESTATUS); if (err < 0) goto out; link_up = 1; current_speed = SPEED_1000; current_duplex = DUPLEX_FULL; } lp->active_speed = current_speed; lp->active_duplex = current_duplex; err = 0; out: spin_unlock_irqrestore(&np->lock, flags); *link_up_p = link_up; return err; } static int link_status_1g(struct niu *np, int *link_up_p) { struct niu_link_config *lp = &np->link_config; unsigned long flags; int err; spin_lock_irqsave(&np->lock, flags); err = link_status_mii(np, link_up_p); lp->supported |= SUPPORTED_TP; lp->active_advertising |= ADVERTISED_TP; spin_unlock_irqrestore(&np->lock, flags); return err; } static int bcm8704_reset(struct niu *np) { int err, limit; err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_BMCR); if (err < 0 || err == 0xffff) return err; err |= BMCR_RESET; err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_BMCR, err); if (err) return err; limit = 1000; while (--limit >= 0) { err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_BMCR); if (err < 0) return err; if (!(err & BMCR_RESET)) break; } if (limit < 0) { netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n", np->port, (err & 0xffff)); return -ENODEV; } return 0; } /* When written, certain PHY registers need to be read back twice * in order for the bits to settle properly. */ static int bcm8704_user_dev3_readback(struct niu *np, int reg) { int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); if (err < 0) return err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); if (err < 0) return err; return 0; } static int bcm8706_init_user_dev3(struct niu *np) { int err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_OPT_DIGITAL_CTRL); if (err < 0) return err; err &= ~USER_ODIG_CTRL_GPIOS; err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); err |= USER_ODIG_CTRL_RESV2; err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_OPT_DIGITAL_CTRL, err); if (err) return err; mdelay(1000); return 0; } static int bcm8704_init_user_dev3(struct niu *np) { int err; err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, (USER_CONTROL_OPTXRST_LVL | USER_CONTROL_OPBIASFLT_LVL | USER_CONTROL_OBTMPFLT_LVL | USER_CONTROL_OPPRFLT_LVL | USER_CONTROL_OPTXFLT_LVL | USER_CONTROL_OPRXLOS_LVL | USER_CONTROL_OPRXFLT_LVL | USER_CONTROL_OPTXON_LVL | (0x3f << USER_CONTROL_RES1_SHIFT))); if (err) return err; err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, (USER_PMD_TX_CTL_XFP_CLKEN | (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | USER_PMD_TX_CTL_TSCK_LPWREN)); if (err) return err; err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); if (err) return err; err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); if (err) return err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_OPT_DIGITAL_CTRL); if (err < 0) return err; err &= ~USER_ODIG_CTRL_GPIOS; err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_OPT_DIGITAL_CTRL, err); if (err) return err; mdelay(1000); return 0; } static int mrvl88x2011_act_led(struct niu *np, int val) { int err; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, MRVL88X2011_LED_8_TO_11_CTL); if (err < 0) return err; err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK); err |= MRVL88X2011_LED(MRVL88X2011_LED_ACT,val); return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, MRVL88X2011_LED_8_TO_11_CTL, err); } static int mrvl88x2011_led_blink_rate(struct niu *np, int rate) { int err; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, MRVL88X2011_LED_BLINK_CTL); if (err >= 0) { err &= ~MRVL88X2011_LED_BLKRATE_MASK; err |= (rate << 4); err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, MRVL88X2011_LED_BLINK_CTL, err); } return err; } static int xcvr_init_10g_mrvl88x2011(struct niu *np) { int err; /* Set LED functions */ err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS); if (err) return err; /* led activity */ err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF); if (err) return err; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, MRVL88X2011_GENERAL_CTL); if (err < 0) return err; err |= MRVL88X2011_ENA_XFPREFCLK; err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, MRVL88X2011_GENERAL_CTL, err); if (err < 0) return err; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_PMA_PMD_CTL_1); if (err < 0) return err; if (np->link_config.loopback_mode == LOOPBACK_MAC) err |= MRVL88X2011_LOOPBACK; else err &= ~MRVL88X2011_LOOPBACK; err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_PMA_PMD_CTL_1, err); if (err < 0) return err; /* Enable PMD */ return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX); } static int xcvr_diag_bcm870x(struct niu *np) { u16 analog_stat0, tx_alarm_status; int err = 0; #if 1 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, MII_STAT1000); if (err < 0) return err; pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err); err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); if (err < 0) return err; pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err); err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_NWAYTEST); if (err < 0) return err; pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err); #endif /* XXX dig this out it might not be so useful XXX */ err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_ANALOG_STATUS0); if (err < 0) return err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_ANALOG_STATUS0); if (err < 0) return err; analog_stat0 = err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_TX_ALARM_STATUS); if (err < 0) return err; err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, BCM8704_USER_TX_ALARM_STATUS); if (err < 0) return err; tx_alarm_status = err; if (analog_stat0 != 0x03fc) { if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { pr_info("Port %u cable not connected or bad cable\n", np->port); } else if (analog_stat0 == 0x639c) { pr_info("Port %u optical module is bad or missing\n", np->port); } } return 0; } static int xcvr_10g_set_lb_bcm870x(struct niu *np) { struct niu_link_config *lp = &np->link_config; int err; err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, MII_BMCR); if (err < 0) return err; err &= ~BMCR_LOOPBACK; if (lp->loopback_mode == LOOPBACK_MAC) err |= BMCR_LOOPBACK; err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, MII_BMCR, err); if (err) return err; return 0; } static int xcvr_init_10g_bcm8706(struct niu *np) { int err = 0; u64 val; if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) return err; val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_LED_POLARITY; val |= XMAC_CONFIG_FORCE_LED_ON; nw64_mac(XMAC_CONFIG, val); val = nr64(MIF_CONFIG); val |= MIF_CONFIG_INDIRECT_MODE; nw64(MIF_CONFIG, val); err = bcm8704_reset(np); if (err) return err; err = xcvr_10g_set_lb_bcm870x(np); if (err) return err; err = bcm8706_init_user_dev3(np); if (err) return err; err = xcvr_diag_bcm870x(np); if (err) return err; return 0; } static int xcvr_init_10g_bcm8704(struct niu *np) { int err; err = bcm8704_reset(np); if (err) return err; err = bcm8704_init_user_dev3(np); if (err) return err; err = xcvr_10g_set_lb_bcm870x(np); if (err) return err; err = xcvr_diag_bcm870x(np); if (err) return err; return 0; } static int xcvr_init_10g(struct niu *np) { int phy_id, err; u64 val; val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_LED_POLARITY; val |= XMAC_CONFIG_FORCE_LED_ON; nw64_mac(XMAC_CONFIG, val); /* XXX shared resource, lock parent XXX */ val = nr64(MIF_CONFIG); val |= MIF_CONFIG_INDIRECT_MODE; nw64(MIF_CONFIG, val); phy_id = phy_decode(np->parent->port_phy, np->port); phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; /* handle different phy types */ switch (phy_id & NIU_PHY_ID_MASK) { case NIU_PHY_ID_MRVL88X2011: err = xcvr_init_10g_mrvl88x2011(np); break; default: /* bcom 8704 */ err = xcvr_init_10g_bcm8704(np); break; } return err; } static int mii_reset(struct niu *np) { int limit, err; err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); if (err) return err; limit = 1000; while (--limit >= 0) { udelay(500); err = mii_read(np, np->phy_addr, MII_BMCR); if (err < 0) return err; if (!(err & BMCR_RESET)) break; } if (limit < 0) { netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n", np->port, err); return -ENODEV; } return 0; } static int xcvr_init_1g_rgmii(struct niu *np) { int err; u64 val; u16 bmcr, bmsr, estat; val = nr64(MIF_CONFIG); val &= ~MIF_CONFIG_INDIRECT_MODE; nw64(MIF_CONFIG, val); err = mii_reset(np); if (err) return err; err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) return err; bmsr = err; estat = 0; if (bmsr & BMSR_ESTATEN) { err = mii_read(np, np->phy_addr, MII_ESTATUS); if (err < 0) return err; estat = err; } bmcr = 0; err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); if (err) return err; if (bmsr & BMSR_ESTATEN) { u16 ctrl1000 = 0; if (estat & ESTATUS_1000_TFULL) ctrl1000 |= ADVERTISE_1000FULL; err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); if (err) return err; } bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX); err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); if (err) return err; err = mii_read(np, np->phy_addr, MII_BMCR); if (err < 0) return err; bmcr = mii_read(np, np->phy_addr, MII_BMCR); err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) return err; return 0; } static int mii_init_common(struct niu *np) { struct niu_link_config *lp = &np->link_config; u16 bmcr, bmsr, adv, estat; int err; err = mii_reset(np); if (err) return err; err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) return err; bmsr = err; estat = 0; if (bmsr & BMSR_ESTATEN) { err = mii_read(np, np->phy_addr, MII_ESTATUS); if (err < 0) return err; estat = err; } bmcr = 0; err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); if (err) return err; if (lp->loopback_mode == LOOPBACK_MAC) { bmcr |= BMCR_LOOPBACK; if (lp->active_speed == SPEED_1000) bmcr |= BMCR_SPEED1000; if (lp->active_duplex == DUPLEX_FULL) bmcr |= BMCR_FULLDPLX; } if (lp->loopback_mode == LOOPBACK_PHY) { u16 aux; aux = (BCM5464R_AUX_CTL_EXT_LB | BCM5464R_AUX_CTL_WRITE_1); err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); if (err) return err; } if (lp->autoneg) { u16 ctrl1000; adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; if ((bmsr & BMSR_10HALF) && (lp->advertising & ADVERTISED_10baseT_Half)) adv |= ADVERTISE_10HALF; if ((bmsr & BMSR_10FULL) && (lp->advertising & ADVERTISED_10baseT_Full)) adv |= ADVERTISE_10FULL; if ((bmsr & BMSR_100HALF) && (lp->advertising & ADVERTISED_100baseT_Half)) adv |= ADVERTISE_100HALF; if ((bmsr & BMSR_100FULL) && (lp->advertising & ADVERTISED_100baseT_Full)) adv |= ADVERTISE_100FULL; err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); if (err) return err; if (likely(bmsr & BMSR_ESTATEN)) { ctrl1000 = 0; if ((estat & ESTATUS_1000_THALF) && (lp->advertising & ADVERTISED_1000baseT_Half)) ctrl1000 |= ADVERTISE_1000HALF; if ((estat & ESTATUS_1000_TFULL) && (lp->advertising & ADVERTISED_1000baseT_Full)) ctrl1000 |= ADVERTISE_1000FULL; err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); if (err) return err; } bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); } else { /* !lp->autoneg */ int fulldpx; if (lp->duplex == DUPLEX_FULL) { bmcr |= BMCR_FULLDPLX; fulldpx = 1; } else if (lp->duplex == DUPLEX_HALF) fulldpx = 0; else return -EINVAL; if (lp->speed == SPEED_1000) { /* if X-full requested while not supported, or X-half requested while not supported... */ if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) || (!fulldpx && !(estat & ESTATUS_1000_THALF))) return -EINVAL; bmcr |= BMCR_SPEED1000; } else if (lp->speed == SPEED_100) { if ((fulldpx && !(bmsr & BMSR_100FULL)) || (!fulldpx && !(bmsr & BMSR_100HALF))) return -EINVAL; bmcr |= BMCR_SPEED100; } else if (lp->speed == SPEED_10) { if ((fulldpx && !(bmsr & BMSR_10FULL)) || (!fulldpx && !(bmsr & BMSR_10HALF))) return -EINVAL; } else return -EINVAL; } err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); if (err) return err; #if 0 err = mii_read(np, np->phy_addr, MII_BMCR); if (err < 0) return err; bmcr = err; err = mii_read(np, np->phy_addr, MII_BMSR); if (err < 0) return err; bmsr = err; pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n", np->port, bmcr, bmsr); #endif return 0; } static int xcvr_init_1g(struct niu *np) { u64 val; /* XXX shared resource, lock parent XXX */ val = nr64(MIF_CONFIG); val &= ~MIF_CONFIG_INDIRECT_MODE; nw64(MIF_CONFIG, val); return mii_init_common(np); } static int niu_xcvr_init(struct niu *np) { const struct niu_phy_ops *ops = np->phy_ops; int err; err = 0; if (ops->xcvr_init) err = ops->xcvr_init(np); return err; } static int niu_serdes_init(struct niu *np) { const struct niu_phy_ops *ops = np->phy_ops; int err; err = 0; if (ops->serdes_init) err = ops->serdes_init(np); return err; } static void niu_init_xif(struct niu *); static void niu_handle_led(struct niu *, int status); static int niu_link_status_common(struct niu *np, int link_up) { struct niu_link_config *lp = &np->link_config; struct net_device *dev = np->dev; unsigned long flags; if (!netif_carrier_ok(dev) && link_up) { netif_info(np, link, dev, "Link is up at %s, %s duplex\n", lp->active_speed == SPEED_10000 ? "10Gb/sec" : lp->active_speed == SPEED_1000 ? "1Gb/sec" : lp->active_speed == SPEED_100 ? "100Mbit/sec" : "10Mbit/sec", lp->active_duplex == DUPLEX_FULL ? "full" : "half"); spin_lock_irqsave(&np->lock, flags); niu_init_xif(np); niu_handle_led(np, 1); spin_unlock_irqrestore(&np->lock, flags); netif_carrier_on(dev); } else if (netif_carrier_ok(dev) && !link_up) { netif_warn(np, link, dev, "Link is down\n"); spin_lock_irqsave(&np->lock, flags); niu_handle_led(np, 0); spin_unlock_irqrestore(&np->lock, flags); netif_carrier_off(dev); } return 0; } static int link_status_10g_mrvl(struct niu *np, int *link_up_p) { int err, link_up, pma_status, pcs_status; link_up = 0; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_10G_PMD_STATUS_2); if (err < 0) goto out; /* Check PMA/PMD Register: 1.0001.2 == 1 */ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, MRVL88X2011_PMA_PMD_STATUS_1); if (err < 0) goto out; pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); /* Check PMC Register : 3.0001.2 == 1: read twice */ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, MRVL88X2011_PMA_PMD_STATUS_1); if (err < 0) goto out; err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, MRVL88X2011_PMA_PMD_STATUS_1); if (err < 0) goto out; pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0); /* Check XGXS Register : 4.0018.[0-3,12] */ err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, MRVL88X2011_10G_XGXS_LANE_STAT); if (err < 0) goto out; if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 | PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC | 0x800)) link_up = (pma_status && pcs_status) ? 1 : 0; np->link_config.active_speed = SPEED_10000; np->link_config.active_duplex = DUPLEX_FULL; err = 0; out: mrvl88x2011_act_led(np, (link_up ? MRVL88X2011_LED_CTL_PCS_ACT : MRVL88X2011_LED_CTL_OFF)); *link_up_p = link_up; return err; } static int link_status_10g_bcm8706(struct niu *np, int *link_up_p) { int err, link_up; link_up = 0; err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, BCM8704_PMD_RCV_SIGDET); if (err < 0 || err == 0xffff) goto out; if (!(err & PMD_RCV_SIGDET_GLOBAL)) { err = 0; goto out; } err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, BCM8704_PCS_10G_R_STATUS); if (err < 0) goto out; if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { err = 0; goto out; } err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, BCM8704_PHYXS_XGXS_LANE_STAT); if (err < 0) goto out; if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_MAGIC | PHYXS_XGXS_LANE_STAT_PATTEST | PHYXS_XGXS_LANE_STAT_LANE3 | PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | PHYXS_XGXS_LANE_STAT_LANE0)) { err = 0; np->link_config.active_speed = SPEED_INVALID; np->link_config.active_duplex = DUPLEX_INVALID; goto out; } link_up = 1; np->link_config.active_speed = SPEED_10000; np->link_config.active_duplex = DUPLEX_FULL; err = 0; out: *link_up_p = link_up; return err; } static int link_status_10g_bcom(struct niu *np, int *link_up_p) { int err, link_up; link_up = 0; err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, BCM8704_PMD_RCV_SIGDET); if (err < 0) goto out; if (!(err & PMD_RCV_SIGDET_GLOBAL)) { err = 0; goto out; } err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, BCM8704_PCS_10G_R_STATUS); if (err < 0) goto out; if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { err = 0; goto out; } err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, BCM8704_PHYXS_XGXS_LANE_STAT); if (err < 0) goto out; if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_MAGIC | PHYXS_XGXS_LANE_STAT_LANE3 | PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 | PHYXS_XGXS_LANE_STAT_LANE0)) { err = 0; goto out; } link_up = 1; np->link_config.active_speed = SPEED_10000; np->link_config.active_duplex = DUPLEX_FULL; err = 0; out: *link_up_p = link_up; return err; } static int link_status_10g(struct niu *np, int *link_up_p) { unsigned long flags; int err = -EINVAL; spin_lock_irqsave(&np->lock, flags); if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { int phy_id; phy_id = phy_decode(np->parent->port_phy, np->port); phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; /* handle different phy types */ switch (phy_id & NIU_PHY_ID_MASK) { case NIU_PHY_ID_MRVL88X2011: err = link_status_10g_mrvl(np, link_up_p); break; default: /* bcom 8704 */ err = link_status_10g_bcom(np, link_up_p); break; } } spin_unlock_irqrestore(&np->lock, flags); return err; } static int niu_10g_phy_present(struct niu *np) { u64 sig, mask, val; sig = nr64(ESR_INT_SIGNALS); switch (np->port) { case 0: mask = ESR_INT_SIGNALS_P0_BITS; val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0 | ESR_INT_XSRDY_P0 | ESR_INT_XDP_P0_CH3 | ESR_INT_XDP_P0_CH2 | ESR_INT_XDP_P0_CH1 | ESR_INT_XDP_P0_CH0); break; case 1: mask = ESR_INT_SIGNALS_P1_BITS; val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1 | ESR_INT_XSRDY_P1 | ESR_INT_XDP_P1_CH3 | ESR_INT_XDP_P1_CH2 | ESR_INT_XDP_P1_CH1 | ESR_INT_XDP_P1_CH0); break; default: return 0; } if ((sig & mask) != val) return 0; return 1; } static int link_status_10g_hotplug(struct niu *np, int *link_up_p) { unsigned long flags; int err = 0; int phy_present; int phy_present_prev; spin_lock_irqsave(&np->lock, flags); if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? 1 : 0; phy_present = niu_10g_phy_present(np); if (phy_present != phy_present_prev) { /* state change */ if (phy_present) { /* A NEM was just plugged in */ np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; if (np->phy_ops->xcvr_init) err = np->phy_ops->xcvr_init(np); if (err) { err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, MII_BMCR); if (err == 0xffff) { /* No mdio, back-to-back XAUI */ goto out; } /* debounce */ np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; } } else { np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; *link_up_p = 0; netif_warn(np, link, np->dev, "Hotplug PHY Removed\n"); } } out: if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) { err = link_status_10g_bcm8706(np, link_up_p); if (err == 0xffff) { /* No mdio, back-to-back XAUI: it is C10NEM */ *link_up_p = 1; np->link_config.active_speed = SPEED_10000; np->link_config.active_duplex = DUPLEX_FULL; } } } spin_unlock_irqrestore(&np->lock, flags); return 0; } static int niu_link_status(struct niu *np, int *link_up_p) { const struct niu_phy_ops *ops = np->phy_ops; int err; err = 0; if (ops->link_status) err = ops->link_status(np, link_up_p); return err; } static void niu_timer(unsigned long __opaque) { struct niu *np = (struct niu *) __opaque; unsigned long off; int err, link_up; err = niu_link_status(np, &link_up); if (!err) niu_link_status_common(np, link_up); if (netif_carrier_ok(np->dev)) off = 5 * HZ; else off = 1 * HZ; np->timer.expires = jiffies + off; add_timer(&np->timer); } static const struct niu_phy_ops phy_ops_10g_serdes = { .serdes_init = serdes_init_10g_serdes, .link_status = link_status_10g_serdes, }; static const struct niu_phy_ops phy_ops_10g_serdes_niu = { .serdes_init = serdes_init_niu_10g_serdes, .link_status = link_status_10g_serdes, }; static const struct niu_phy_ops phy_ops_1g_serdes_niu = { .serdes_init = serdes_init_niu_1g_serdes, .link_status = link_status_1g_serdes, }; static const struct niu_phy_ops phy_ops_1g_rgmii = { .xcvr_init = xcvr_init_1g_rgmii, .link_status = link_status_1g_rgmii, }; static const struct niu_phy_ops phy_ops_10g_fiber_niu = { .serdes_init = serdes_init_niu_10g_fiber, .xcvr_init = xcvr_init_10g, .link_status = link_status_10g, }; static const struct niu_phy_ops phy_ops_10g_fiber = { .serdes_init = serdes_init_10g, .xcvr_init = xcvr_init_10g, .link_status = link_status_10g, }; static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = { .serdes_init = serdes_init_10g, .xcvr_init = xcvr_init_10g_bcm8706, .link_status = link_status_10g_hotplug, }; static const struct niu_phy_ops phy_ops_niu_10g_hotplug = { .serdes_init = serdes_init_niu_10g_fiber, .xcvr_init = xcvr_init_10g_bcm8706, .link_status = link_status_10g_hotplug, }; static const struct niu_phy_ops phy_ops_10g_copper = { .serdes_init = serdes_init_10g, .link_status = link_status_10g, /* XXX */ }; static const struct niu_phy_ops phy_ops_1g_fiber = { .serdes_init = serdes_init_1g, .xcvr_init = xcvr_init_1g, .link_status = link_status_1g, }; static const struct niu_phy_ops phy_ops_1g_copper = { .xcvr_init = xcvr_init_1g, .link_status = link_status_1g, }; struct niu_phy_template { const struct niu_phy_ops *ops; u32 phy_addr_base; }; static const struct niu_phy_template phy_template_niu_10g_fiber = { .ops = &phy_ops_10g_fiber_niu, .phy_addr_base = 16, }; static const struct niu_phy_template phy_template_niu_10g_serdes = { .ops = &phy_ops_10g_serdes_niu, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_niu_1g_serdes = { .ops = &phy_ops_1g_serdes_niu, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_10g_fiber = { .ops = &phy_ops_10g_fiber, .phy_addr_base = 8, }; static const struct niu_phy_template phy_template_10g_fiber_hotplug = { .ops = &phy_ops_10g_fiber_hotplug, .phy_addr_base = 8, }; static const struct niu_phy_template phy_template_niu_10g_hotplug = { .ops = &phy_ops_niu_10g_hotplug, .phy_addr_base = 8, }; static const struct niu_phy_template phy_template_10g_copper = { .ops = &phy_ops_10g_copper, .phy_addr_base = 10, }; static const struct niu_phy_template phy_template_1g_fiber = { .ops = &phy_ops_1g_fiber, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_1g_copper = { .ops = &phy_ops_1g_copper, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_1g_rgmii = { .ops = &phy_ops_1g_rgmii, .phy_addr_base = 0, }; static const struct niu_phy_template phy_template_10g_serdes = { .ops = &phy_ops_10g_serdes, .phy_addr_base = 0, }; static int niu_atca_port_num[4] = { 0, 0, 11, 10 }; static int serdes_init_10g_serdes(struct niu *np) { struct niu_link_config *lp = &np->link_config; unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i; u64 ctrl_val, test_cfg_val, sig, mask, val; switch (np->port) { case 0: ctrl_reg = ENET_SERDES_0_CTRL_CFG; test_cfg_reg = ENET_SERDES_0_TEST_CFG; pll_cfg = ENET_SERDES_0_PLL_CFG; break; case 1: ctrl_reg = ENET_SERDES_1_CTRL_CFG; test_cfg_reg = ENET_SERDES_1_TEST_CFG; pll_cfg = ENET_SERDES_1_PLL_CFG; break; default: return -EINVAL; } ctrl_val = (ENET_SERDES_CTRL_SDET_0 | ENET_SERDES_CTRL_SDET_1 | ENET_SERDES_CTRL_SDET_2 | ENET_SERDES_CTRL_SDET_3 | (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); test_cfg_val = 0; if (lp->loopback_mode == LOOPBACK_PHY) { test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_0_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_1_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_2_SHIFT) | (ENET_TEST_MD_PAD_LOOPBACK << ENET_SERDES_TEST_MD_3_SHIFT)); } esr_reset(np); nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2); nw64(ctrl_reg, ctrl_val); nw64(test_cfg_reg, test_cfg_val); /* Initialize all 4 lanes of the SERDES. */ for (i = 0; i < 4; i++) { u32 rxtx_ctrl, glue0; int err; err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); if (err) return err; err = esr_read_glue0(np, i, &glue0); if (err) return err; rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); glue0 &= ~(ESR_GLUE_CTRL0_SRATE | ESR_GLUE_CTRL0_THCNT | ESR_GLUE_CTRL0_BLTIME); glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | (BLTIME_300_CYCLES << ESR_GLUE_CTRL0_BLTIME_SHIFT)); err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); if (err) return err; err = esr_write_glue0(np, i, glue0); if (err) return err; } sig = nr64(ESR_INT_SIGNALS); switch (np->port) { case 0: mask = ESR_INT_SIGNALS_P0_BITS; val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0 | ESR_INT_XSRDY_P0 | ESR_INT_XDP_P0_CH3 | ESR_INT_XDP_P0_CH2 | ESR_INT_XDP_P0_CH1 | ESR_INT_XDP_P0_CH0); break; case 1: mask = ESR_INT_SIGNALS_P1_BITS; val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1 | ESR_INT_XSRDY_P1 | ESR_INT_XDP_P1_CH3 | ESR_INT_XDP_P1_CH2 | ESR_INT_XDP_P1_CH1 | ESR_INT_XDP_P1_CH0); break; default: return -EINVAL; } if ((sig & mask) != val) { int err; err = serdes_init_1g_serdes(np); if (!err) { np->flags &= ~NIU_FLAGS_10G; np->mac_xcvr = MAC_XCVR_PCS; } else { netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", np->port); return -ENODEV; } } return 0; } static int niu_determine_phy_disposition(struct niu *np) { struct niu_parent *parent = np->parent; u8 plat_type = parent->plat_type; const struct niu_phy_template *tp; u32 phy_addr_off = 0; if (plat_type == PLAT_TYPE_NIU) { switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_XCVR_SERDES)) { case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: /* 10G Serdes */ tp = &phy_template_niu_10g_serdes; break; case NIU_FLAGS_XCVR_SERDES: /* 1G Serdes */ tp = &phy_template_niu_1g_serdes; break; case NIU_FLAGS_10G | NIU_FLAGS_FIBER: /* 10G Fiber */ default: if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { tp = &phy_template_niu_10g_hotplug; if (np->port == 0) phy_addr_off = 8; if (np->port == 1) phy_addr_off = 12; } else { tp = &phy_template_niu_10g_fiber; phy_addr_off += np->port; } break; } } else { switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_XCVR_SERDES)) { case 0: /* 1G copper */ tp = &phy_template_1g_copper; if (plat_type == PLAT_TYPE_VF_P0) phy_addr_off = 10; else if (plat_type == PLAT_TYPE_VF_P1) phy_addr_off = 26; phy_addr_off += (np->port ^ 0x3); break; case NIU_FLAGS_10G: /* 10G copper */ tp = &phy_template_10g_copper; break; case NIU_FLAGS_FIBER: /* 1G fiber */ tp = &phy_template_1g_fiber; break; case NIU_FLAGS_10G | NIU_FLAGS_FIBER: /* 10G fiber */ tp = &phy_template_10g_fiber; if (plat_type == PLAT_TYPE_VF_P0 || plat_type == PLAT_TYPE_VF_P1) phy_addr_off = 8; phy_addr_off += np->port; if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { tp = &phy_template_10g_fiber_hotplug; if (np->port == 0) phy_addr_off = 8; if (np->port == 1) phy_addr_off = 12; } break; case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: case NIU_FLAGS_XCVR_SERDES: switch(np->port) { case 0: case 1: tp = &phy_template_10g_serdes; break; case 2: case 3: tp = &phy_template_1g_rgmii; break; default: return -EINVAL; break; } phy_addr_off = niu_atca_port_num[np->port]; break; default: return -EINVAL; } } np->phy_ops = tp->ops; np->phy_addr = tp->phy_addr_base + phy_addr_off; return 0; } static int niu_init_link(struct niu *np) { struct niu_parent *parent = np->parent; int err, ignore; if (parent->plat_type == PLAT_TYPE_NIU) { err = niu_xcvr_init(np); if (err) return err; msleep(200); } err = niu_serdes_init(np); if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY)) return err; msleep(200); err = niu_xcvr_init(np); if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY)) niu_link_status(np, &ignore); return 0; } static void niu_set_primary_mac(struct niu *np, unsigned char *addr) { u16 reg0 = addr[4] << 8 | addr[5]; u16 reg1 = addr[2] << 8 | addr[3]; u16 reg2 = addr[0] << 8 | addr[1]; if (np->flags & NIU_FLAGS_XMAC) { nw64_mac(XMAC_ADDR0, reg0); nw64_mac(XMAC_ADDR1, reg1); nw64_mac(XMAC_ADDR2, reg2); } else { nw64_mac(BMAC_ADDR0, reg0); nw64_mac(BMAC_ADDR1, reg1); nw64_mac(BMAC_ADDR2, reg2); } } static int niu_num_alt_addr(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) return XMAC_NUM_ALT_ADDR; else return BMAC_NUM_ALT_ADDR; } static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) { u16 reg0 = addr[4] << 8 | addr[5]; u16 reg1 = addr[2] << 8 | addr[3]; u16 reg2 = addr[0] << 8 | addr[1]; if (index >= niu_num_alt_addr(np)) return -EINVAL; if (np->flags & NIU_FLAGS_XMAC) { nw64_mac(XMAC_ALT_ADDR0(index), reg0); nw64_mac(XMAC_ALT_ADDR1(index), reg1); nw64_mac(XMAC_ALT_ADDR2(index), reg2); } else { nw64_mac(BMAC_ALT_ADDR0(index), reg0); nw64_mac(BMAC_ALT_ADDR1(index), reg1); nw64_mac(BMAC_ALT_ADDR2(index), reg2); } return 0; } static int niu_enable_alt_mac(struct niu *np, int index, int on) { unsigned long reg; u64 val, mask; if (index >= niu_num_alt_addr(np)) return -EINVAL; if (np->flags & NIU_FLAGS_XMAC) { reg = XMAC_ADDR_CMPEN; mask = 1 << index; } else { reg = BMAC_ADDR_CMPEN; mask = 1 << (index + 1); } val = nr64_mac(reg); if (on) val |= mask; else val &= ~mask; nw64_mac(reg, val); return 0; } static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, int num, int mac_pref) { u64 val = nr64_mac(reg); val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); val |= num; if (mac_pref) val |= HOST_INFO_MPR; nw64_mac(reg, val); } static int __set_rdc_table_num(struct niu *np, int xmac_index, int bmac_index, int rdc_table_num, int mac_pref) { unsigned long reg; if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) return -EINVAL; if (np->flags & NIU_FLAGS_XMAC) reg = XMAC_HOST_INFO(xmac_index); else reg = BMAC_HOST_INFO(bmac_index); __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); return 0; } static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, int mac_pref) { return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); } static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, int mac_pref) { return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); } static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, int table_num, int mac_pref) { if (idx >= niu_num_alt_addr(np)) return -EINVAL; return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); } static u64 vlan_entry_set_parity(u64 reg_val) { u64 port01_mask; u64 port23_mask; port01_mask = 0x00ff; port23_mask = 0xff00; if (hweight64(reg_val & port01_mask) & 1) reg_val |= ENET_VLAN_TBL_PARITY0; else reg_val &= ~ENET_VLAN_TBL_PARITY0; if (hweight64(reg_val & port23_mask) & 1) reg_val |= ENET_VLAN_TBL_PARITY1; else reg_val &= ~ENET_VLAN_TBL_PARITY1; return reg_val; } static void vlan_tbl_write(struct niu *np, unsigned long index, int port, int vpr, int rdc_table) { u64 reg_val = nr64(ENET_VLAN_TBL(index)); reg_val &= ~((ENET_VLAN_TBL_VPR | ENET_VLAN_TBL_VLANRDCTBLN) << ENET_VLAN_TBL_SHIFT(port)); if (vpr) reg_val |= (ENET_VLAN_TBL_VPR << ENET_VLAN_TBL_SHIFT(port)); reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); reg_val = vlan_entry_set_parity(reg_val); nw64(ENET_VLAN_TBL(index), reg_val); } static void vlan_tbl_clear(struct niu *np) { int i; for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) nw64(ENET_VLAN_TBL(i), 0); } static int tcam_wait_bit(struct niu *np, u64 bit) { int limit = 1000; while (--limit > 0) { if (nr64(TCAM_CTL) & bit) break; udelay(1); } if (limit <= 0) return -ENODEV; return 0; } static int tcam_flush(struct niu *np, int index) { nw64(TCAM_KEY_0, 0x00); nw64(TCAM_KEY_MASK_0, 0xff); nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); return tcam_wait_bit(np, TCAM_CTL_STAT); } #if 0 static int tcam_read(struct niu *np, int index, u64 *key, u64 *mask) { int err; nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); err = tcam_wait_bit(np, TCAM_CTL_STAT); if (!err) { key[0] = nr64(TCAM_KEY_0); key[1] = nr64(TCAM_KEY_1); key[2] = nr64(TCAM_KEY_2); key[3] = nr64(TCAM_KEY_3); mask[0] = nr64(TCAM_KEY_MASK_0); mask[1] = nr64(TCAM_KEY_MASK_1); mask[2] = nr64(TCAM_KEY_MASK_2); mask[3] = nr64(TCAM_KEY_MASK_3); } return err; } #endif static int tcam_write(struct niu *np, int index, u64 *key, u64 *mask) { nw64(TCAM_KEY_0, key[0]); nw64(TCAM_KEY_1, key[1]); nw64(TCAM_KEY_2, key[2]); nw64(TCAM_KEY_3, key[3]); nw64(TCAM_KEY_MASK_0, mask[0]); nw64(TCAM_KEY_MASK_1, mask[1]); nw64(TCAM_KEY_MASK_2, mask[2]); nw64(TCAM_KEY_MASK_3, mask[3]); nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); return tcam_wait_bit(np, TCAM_CTL_STAT); } #if 0 static int tcam_assoc_read(struct niu *np, int index, u64 *data) { int err; nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); err = tcam_wait_bit(np, TCAM_CTL_STAT); if (!err) *data = nr64(TCAM_KEY_1); return err; } #endif static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) { nw64(TCAM_KEY_1, assoc_data); nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); return tcam_wait_bit(np, TCAM_CTL_STAT); } static void tcam_enable(struct niu *np, int on) { u64 val = nr64(FFLP_CFG_1); if (on) val &= ~FFLP_CFG_1_TCAM_DIS; else val |= FFLP_CFG_1_TCAM_DIS; nw64(FFLP_CFG_1, val); } static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) { u64 val = nr64(FFLP_CFG_1); val &= ~(FFLP_CFG_1_FFLPINITDONE | FFLP_CFG_1_CAMLAT | FFLP_CFG_1_CAMRATIO); val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); nw64(FFLP_CFG_1, val); val = nr64(FFLP_CFG_1); val |= FFLP_CFG_1_FFLPINITDONE; nw64(FFLP_CFG_1, val); } static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, int on) { unsigned long reg; u64 val; if (class < CLASS_CODE_ETHERTYPE1 || class > CLASS_CODE_ETHERTYPE2) return -EINVAL; reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); val = nr64(reg); if (on) val |= L2_CLS_VLD; else val &= ~L2_CLS_VLD; nw64(reg, val); return 0; } #if 0 static int tcam_user_eth_class_set(struct niu *np, unsigned long class, u64 ether_type) { unsigned long reg; u64 val; if (class < CLASS_CODE_ETHERTYPE1 || class > CLASS_CODE_ETHERTYPE2 || (ether_type & ~(u64)0xffff) != 0) return -EINVAL; reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); val = nr64(reg); val &= ~L2_CLS_ETYPE; val |= (ether_type << L2_CLS_ETYPE_SHIFT); nw64(reg, val); return 0; } #endif static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, int on) { unsigned long reg; u64 val; if (class < CLASS_CODE_USER_PROG1 || class > CLASS_CODE_USER_PROG4) return -EINVAL; reg = L3_CLS(class - CLASS_CODE_USER_PROG1); val = nr64(reg); if (on) val |= L3_CLS_VALID; else val &= ~L3_CLS_VALID; nw64(reg, val); return 0; } static int tcam_user_ip_class_set(struct niu *np, unsigned long class, int ipv6, u64 protocol_id, u64 tos_mask, u64 tos_val) { unsigned long reg; u64 val; if (class < CLASS_CODE_USER_PROG1 || class > CLASS_CODE_USER_PROG4 || (protocol_id & ~(u64)0xff) != 0 || (tos_mask & ~(u64)0xff) != 0 || (tos_val & ~(u64)0xff) != 0) return -EINVAL; reg = L3_CLS(class - CLASS_CODE_USER_PROG1); val = nr64(reg); val &= ~(L3_CLS_IPVER | L3_CLS_PID | L3_CLS_TOSMASK | L3_CLS_TOS); if (ipv6) val |= L3_CLS_IPVER; val |= (protocol_id << L3_CLS_PID_SHIFT); val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); val |= (tos_val << L3_CLS_TOS_SHIFT); nw64(reg, val); return 0; } static int tcam_early_init(struct niu *np) { unsigned long i; int err; tcam_enable(np, 0); tcam_set_lat_and_ratio(np, DEFAULT_TCAM_LATENCY, DEFAULT_TCAM_ACCESS_RATIO); for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { err = tcam_user_eth_class_enable(np, i, 0); if (err) return err; } for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { err = tcam_user_ip_class_enable(np, i, 0); if (err) return err; } return 0; } static int tcam_flush_all(struct niu *np) { unsigned long i; for (i = 0; i < np->parent->tcam_num_entries; i++) { int err = tcam_flush(np, i); if (err) return err; } return 0; } static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) { return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0); } #if 0 static int hash_read(struct niu *np, unsigned long partition, unsigned long index, unsigned long num_entries, u64 *data) { u64 val = hash_addr_regval(index, num_entries); unsigned long i; if (partition >= FCRAM_NUM_PARTITIONS || index + num_entries > FCRAM_SIZE) return -EINVAL; nw64(HASH_TBL_ADDR(partition), val); for (i = 0; i < num_entries; i++) data[i] = nr64(HASH_TBL_DATA(partition)); return 0; } #endif static int hash_write(struct niu *np, unsigned long partition, unsigned long index, unsigned long num_entries, u64 *data) { u64 val = hash_addr_regval(index, num_entries); unsigned long i; if (partition >= FCRAM_NUM_PARTITIONS || index + (num_entries * 8) > FCRAM_SIZE) return -EINVAL; nw64(HASH_TBL_ADDR(partition), val); for (i = 0; i < num_entries; i++) nw64(HASH_TBL_DATA(partition), data[i]); return 0; } static void fflp_reset(struct niu *np) { u64 val; nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); udelay(10); nw64(FFLP_CFG_1, 0); val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; nw64(FFLP_CFG_1, val); } static void fflp_set_timings(struct niu *np) { u64 val = nr64(FFLP_CFG_1); val &= ~FFLP_CFG_1_FFLPINITDONE; val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); nw64(FFLP_CFG_1, val); val = nr64(FFLP_CFG_1); val |= FFLP_CFG_1_FFLPINITDONE; nw64(FFLP_CFG_1, val); val = nr64(FCRAM_REF_TMR); val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); nw64(FCRAM_REF_TMR, val); } static int fflp_set_partition(struct niu *np, u64 partition, u64 mask, u64 base, int enable) { unsigned long reg; u64 val; if (partition >= FCRAM_NUM_PARTITIONS || (mask & ~(u64)0x1f) != 0 || (base & ~(u64)0x1f) != 0) return -EINVAL; reg = FLW_PRT_SEL(partition); val = nr64(reg); val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); val |= (mask << FLW_PRT_SEL_MASK_SHIFT); val |= (base << FLW_PRT_SEL_BASE_SHIFT); if (enable) val |= FLW_PRT_SEL_EXT; nw64(reg, val); return 0; } static int fflp_disable_all_partitions(struct niu *np) { unsigned long i; for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { int err = fflp_set_partition(np, 0, 0, 0, 0); if (err) return err; } return 0; } static void fflp_llcsnap_enable(struct niu *np, int on) { u64 val = nr64(FFLP_CFG_1); if (on) val |= FFLP_CFG_1_LLCSNAP; else val &= ~FFLP_CFG_1_LLCSNAP; nw64(FFLP_CFG_1, val); } static void fflp_errors_enable(struct niu *np, int on) { u64 val = nr64(FFLP_CFG_1); if (on) val &= ~FFLP_CFG_1_ERRORDIS; else val |= FFLP_CFG_1_ERRORDIS; nw64(FFLP_CFG_1, val); } static int fflp_hash_clear(struct niu *np) { struct fcram_hash_ipv4 ent; unsigned long i; /* IPV4 hash entry with valid bit clear, rest is don't care. */ memset(&ent, 0, sizeof(ent)); ent.header = HASH_HEADER_EXT; for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { int err = hash_write(np, 0, i, 1, (u64 *) &ent); if (err) return err; } return 0; } static int fflp_early_init(struct niu *np) { struct niu_parent *parent; unsigned long flags; int err; niu_lock_parent(np, flags); parent = np->parent; err = 0; if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { if (np->parent->plat_type != PLAT_TYPE_NIU) { fflp_reset(np); fflp_set_timings(np); err = fflp_disable_all_partitions(np); if (err) { netif_printk(np, probe, KERN_DEBUG, np->dev, "fflp_disable_all_partitions failed, err=%d\n", err); goto out; } } err = tcam_early_init(np); if (err) { netif_printk(np, probe, KERN_DEBUG, np->dev, "tcam_early_init failed, err=%d\n", err); goto out; } fflp_llcsnap_enable(np, 1); fflp_errors_enable(np, 0); nw64(H1POLY, 0); nw64(H2POLY, 0); err = tcam_flush_all(np); if (err) { netif_printk(np, probe, KERN_DEBUG, np->dev, "tcam_flush_all failed, err=%d\n", err); goto out; } if (np->parent->plat_type != PLAT_TYPE_NIU) { err = fflp_hash_clear(np); if (err) { netif_printk(np, probe, KERN_DEBUG, np->dev, "fflp_hash_clear failed, err=%d\n", err); goto out; } } vlan_tbl_clear(np); parent->flags |= PARENT_FLGS_CLS_HWINIT; } out: niu_unlock_parent(np, flags); return err; } static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) { if (class_code < CLASS_CODE_USER_PROG1 || class_code > CLASS_CODE_SCTP_IPV6) return -EINVAL; nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); return 0; } static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) { if (class_code < CLASS_CODE_USER_PROG1 || class_code > CLASS_CODE_SCTP_IPV6) return -EINVAL; nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); return 0; } /* Entries for the ports are interleaved in the TCAM */ static u16 tcam_get_index(struct niu *np, u16 idx) { /* One entry reserved for IP fragment rule */ if (idx >= (np->clas.tcam_sz - 1)) idx = 0; return np->clas.tcam_top + ((idx+1) * np->parent->num_ports); } static u16 tcam_get_size(struct niu *np) { /* One entry reserved for IP fragment rule */ return np->clas.tcam_sz - 1; } static u16 tcam_get_valid_entry_cnt(struct niu *np) { /* One entry reserved for IP fragment rule */ return np->clas.tcam_valid_entries - 1; } static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, u32 offset, u32 size, u32 truesize) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size); skb->len += size; skb->data_len += size; skb->truesize += truesize; } static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) { a >>= PAGE_SHIFT; a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); return a & (MAX_RBR_RING_SIZE - 1); } static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, struct page ***link) { unsigned int h = niu_hash_rxaddr(rp, addr); struct page *p, **pp; addr &= PAGE_MASK; pp = &rp->rxhash[h]; for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { if (p->index == addr) { *link = pp; goto found; } } BUG(); found: return p; } static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) { unsigned int h = niu_hash_rxaddr(rp, base); page->index = base; page->mapping = (struct address_space *) rp->rxhash[h]; rp->rxhash[h] = page; } static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, gfp_t mask, int start_index) { struct page *page; u64 addr; int i; page = alloc_page(mask); if (!page) return -ENOMEM; addr = np->ops->map_page(np->device, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (!addr) { __free_page(page); return -ENOMEM; } niu_hash_page(rp, page, addr); if (rp->rbr_blocks_per_page > 1) atomic_add(rp->rbr_blocks_per_page - 1, &compound_head(page)->_count); for (i = 0; i < rp->rbr_blocks_per_page; i++) { __le32 *rbr = &rp->rbr[start_index + i]; *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); addr += rp->rbr_block_size; } return 0; } static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) { int index = rp->rbr_index; rp->rbr_pending++; if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { int err = niu_rbr_add_page(np, rp, mask, index); if (unlikely(err)) { rp->rbr_pending--; return; } rp->rbr_index += rp->rbr_blocks_per_page; BUG_ON(rp->rbr_index > rp->rbr_table_size); if (rp->rbr_index == rp->rbr_table_size) rp->rbr_index = 0; if (rp->rbr_pending >= rp->rbr_kick_thresh) { nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); rp->rbr_pending = 0; } } } static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) { unsigned int index = rp->rcr_index; int num_rcr = 0; rp->rx_dropped++; while (1) { struct page *page, **link; u64 addr, val; u32 rcr_size; num_rcr++; val = le64_to_cpup(&rp->rcr[index]); addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << RCR_ENTRY_PKT_BUF_ADDR_SHIFT; page = niu_find_rxpage(rp, addr, &link); rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> RCR_ENTRY_PKTBUFSZ_SHIFT]; if ((page->index + PAGE_SIZE) - rcr_size == addr) { *link = (struct page *) page->mapping; np->ops->unmap_page(np->device, page->index, PAGE_SIZE, DMA_FROM_DEVICE); page->index = 0; page->mapping = NULL; __free_page(page); rp->rbr_refill_pending++; } index = NEXT_RCR(rp, index); if (!(val & RCR_ENTRY_MULTI)) break; } rp->rcr_index = index; return num_rcr; } static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, struct rx_ring_info *rp) { unsigned int index = rp->rcr_index; struct rx_pkt_hdr1 *rh; struct sk_buff *skb; int len, num_rcr; skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); if (unlikely(!skb)) return niu_rx_pkt_ignore(np, rp); num_rcr = 0; while (1) { struct page *page, **link; u32 rcr_size, append_size; u64 addr, val, off; num_rcr++; val = le64_to_cpup(&rp->rcr[index]); len = (val & RCR_ENTRY_L2_LEN) >> RCR_ENTRY_L2_LEN_SHIFT; len -= ETH_FCS_LEN; addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << RCR_ENTRY_PKT_BUF_ADDR_SHIFT; page = niu_find_rxpage(rp, addr, &link); rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> RCR_ENTRY_PKTBUFSZ_SHIFT]; off = addr & ~PAGE_MASK; append_size = rcr_size; if (num_rcr == 1) { int ptype; ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); if ((ptype == RCR_PKT_TYPE_TCP || ptype == RCR_PKT_TYPE_UDP) && !(val & (RCR_ENTRY_NOPORT | RCR_ENTRY_ERROR))) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); } else if (!(val & RCR_ENTRY_MULTI)) append_size = len - skb->len; niu_rx_skb_append(skb, page, off, append_size, rcr_size); if ((page->index + rp->rbr_block_size) - rcr_size == addr) { *link = (struct page *) page->mapping; np->ops->unmap_page(np->device, page->index, PAGE_SIZE, DMA_FROM_DEVICE); page->index = 0; page->mapping = NULL; rp->rbr_refill_pending++; } else get_page(page); index = NEXT_RCR(rp, index); if (!(val & RCR_ENTRY_MULTI)) break; } rp->rcr_index = index; len += sizeof(*rh); len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN); __pskb_pull_tail(skb, len); rh = (struct rx_pkt_hdr1 *) skb->data; if (np->dev->features & NETIF_F_RXHASH) skb->rxhash = ((u32)rh->hashval2_0 << 24 | (u32)rh->hashval2_1 << 16 | (u32)rh->hashval1_1 << 8 | (u32)rh->hashval1_2 << 0); skb_pull(skb, sizeof(*rh)); rp->rx_packets++; rp->rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, np->dev); skb_record_rx_queue(skb, rp->rx_channel); napi_gro_receive(napi, skb); return num_rcr; } static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) { int blocks_per_page = rp->rbr_blocks_per_page; int err, index = rp->rbr_index; err = 0; while (index < (rp->rbr_table_size - blocks_per_page)) { err = niu_rbr_add_page(np, rp, mask, index); if (unlikely(err)) break; index += blocks_per_page; } rp->rbr_index = index; return err; } static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) { int i; for (i = 0; i < MAX_RBR_RING_SIZE; i++) { struct page *page; page = rp->rxhash[i]; while (page) { struct page *next = (struct page *) page->mapping; u64 base = page->index; np->ops->unmap_page(np->device, base, PAGE_SIZE, DMA_FROM_DEVICE); page->index = 0; page->mapping = NULL; __free_page(page); page = next; } } for (i = 0; i < rp->rbr_table_size; i++) rp->rbr[i] = cpu_to_le32(0); rp->rbr_index = 0; } static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) { struct tx_buff_info *tb = &rp->tx_buffs[idx]; struct sk_buff *skb = tb->skb; struct tx_pkt_hdr *tp; u64 tx_flags; int i, len; tp = (struct tx_pkt_hdr *) skb->data; tx_flags = le64_to_cpup(&tp->flags); rp->tx_packets++; rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - ((tx_flags & TXHDR_PAD) / 2)); len = skb_headlen(skb); np->ops->unmap_single(np->device, tb->mapping, len, DMA_TO_DEVICE); if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) rp->mark_pending--; tb->skb = NULL; do { idx = NEXT_TX(rp, idx); len -= MAX_TX_DESC_LEN; } while (len > 0); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { tb = &rp->tx_buffs[idx]; BUG_ON(tb->skb != NULL); np->ops->unmap_page(np->device, tb->mapping, skb_frag_size(&skb_shinfo(skb)->frags[i]), DMA_TO_DEVICE); idx = NEXT_TX(rp, idx); } dev_kfree_skb(skb); return idx; } #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) { struct netdev_queue *txq; u16 pkt_cnt, tmp; int cons, index; u64 cs; index = (rp - np->tx_rings); txq = netdev_get_tx_queue(np->dev, index); cs = rp->tx_cs; if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) goto out; tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); rp->last_pkt_cnt = tmp; cons = rp->cons; netif_printk(np, tx_done, KERN_DEBUG, np->dev, "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); while (pkt_cnt--) cons = release_tx_packet(np, rp, cons); rp->cons = cons; smp_mb(); out: if (unlikely(netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { __netif_tx_lock(txq, smp_processor_id()); if (netif_tx_queue_stopped(txq) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); } } static inline void niu_sync_rx_discard_stats(struct niu *np, struct rx_ring_info *rp, const int limit) { /* This elaborate scheme is needed for reading the RX discard * counters, as they are only 16-bit and can overflow quickly, * and because the overflow indication bit is not usable as * the counter value does not wrap, but remains at max value * 0xFFFF. * * In theory and in practice counters can be lost in between * reading nr64() and clearing the counter nw64(). For this * reason, the number of counter clearings nw64() is * limited/reduced though the limit parameter. */ int rx_channel = rp->rx_channel; u32 misc, wred; /* RXMISC (Receive Miscellaneous Discard Count), covers the * following discard events: IPP (Input Port Process), * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive * Block Ring) prefetch buffer is empty. */ misc = nr64(RXMISC(rx_channel)); if (unlikely((misc & RXMISC_COUNT) > limit)) { nw64(RXMISC(rx_channel), 0); rp->rx_errors += misc & RXMISC_COUNT; if (unlikely(misc & RXMISC_OFLOW)) dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n", rx_channel); netif_printk(np, rx_err, KERN_DEBUG, np->dev, "rx-%d: MISC drop=%u over=%u\n", rx_channel, misc, misc-limit); } /* WRED (Weighted Random Early Discard) by hardware */ wred = nr64(RED_DIS_CNT(rx_channel)); if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) { nw64(RED_DIS_CNT(rx_channel), 0); rp->rx_dropped += wred & RED_DIS_CNT_COUNT; if (unlikely(wred & RED_DIS_CNT_OFLOW)) dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel); netif_printk(np, rx_err, KERN_DEBUG, np->dev, "rx-%d: WRED drop=%u over=%u\n", rx_channel, wred, wred-limit); } } static int niu_rx_work(struct napi_struct *napi, struct niu *np, struct rx_ring_info *rp, int budget) { int qlen, rcr_done = 0, work_done = 0; struct rxdma_mailbox *mbox = rp->mbox; u64 stat; #if 1 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; #else stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); #endif mbox->rx_dma_ctl_stat = 0; mbox->rcrstat_a = 0; netif_printk(np, rx_status, KERN_DEBUG, np->dev, "%s(chan[%d]), stat[%llx] qlen=%d\n", __func__, rp->rx_channel, (unsigned long long)stat, qlen); rcr_done = work_done = 0; qlen = min(qlen, budget); while (work_done < qlen) { rcr_done += niu_process_rx_pkt(napi, np, rp); work_done++; } if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { unsigned int i; for (i = 0; i < rp->rbr_refill_pending; i++) niu_rbr_refill(np, rp, GFP_ATOMIC); rp->rbr_refill_pending = 0; } stat = (RX_DMA_CTL_STAT_MEX | ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); /* Only sync discards stats when qlen indicate potential for drops */ if (qlen > 10) niu_sync_rx_discard_stats(np, rp, 0x7FFF); return work_done; } static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) { u64 v0 = lp->v0; u32 tx_vec = (v0 >> 32); u32 rx_vec = (v0 & 0xffffffff); int i, work_done = 0; netif_printk(np, intr, KERN_DEBUG, np->dev, "%s() v0[%016llx]\n", __func__, (unsigned long long)v0); for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; if (tx_vec & (1 << rp->tx_channel)) niu_tx_work(np, rp); nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); } for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; if (rx_vec & (1 << rp->rx_channel)) { int this_work_done; this_work_done = niu_rx_work(&lp->napi, np, rp, budget); budget -= this_work_done; work_done += this_work_done; } nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); } return work_done; } static int niu_poll(struct napi_struct *napi, int budget) { struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); struct niu *np = lp->np; int work_done; work_done = niu_poll_core(np, lp, budget); if (work_done < budget) { napi_complete(napi); niu_ldg_rearm(np, lp, 1); } return work_done; } static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, u64 stat) { netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel); if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) pr_cont("RBR_TMOUT "); if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) pr_cont("RSP_CNT "); if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) pr_cont("BYTE_EN_BUS "); if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) pr_cont("RSP_DAT "); if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) pr_cont("RCR_ACK "); if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) pr_cont("RCR_SHA_PAR "); if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) pr_cont("RBR_PRE_PAR "); if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) pr_cont("CONFIG "); if (stat & RX_DMA_CTL_STAT_RCRINCON) pr_cont("RCRINCON "); if (stat & RX_DMA_CTL_STAT_RCRFULL) pr_cont("RCRFULL "); if (stat & RX_DMA_CTL_STAT_RBRFULL) pr_cont("RBRFULL "); if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) pr_cont("RBRLOGPAGE "); if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) pr_cont("CFIGLOGPAGE "); if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) pr_cont("DC_FIDO "); pr_cont(")\n"); } static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) { u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); int err = 0; if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | RX_DMA_CTL_STAT_PORT_FATAL)) err = -EINVAL; if (err) { netdev_err(np->dev, "RX channel %u error, stat[%llx]\n", rp->rx_channel, (unsigned long long) stat); niu_log_rxchan_errors(np, rp, stat); } nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); return err; } static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, u64 cs) { netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel); if (cs & TX_CS_MBOX_ERR) pr_cont("MBOX "); if (cs & TX_CS_PKT_SIZE_ERR) pr_cont("PKT_SIZE "); if (cs & TX_CS_TX_RING_OFLOW) pr_cont("TX_RING_OFLOW "); if (cs & TX_CS_PREF_BUF_PAR_ERR) pr_cont("PREF_BUF_PAR "); if (cs & TX_CS_NACK_PREF) pr_cont("NACK_PREF "); if (cs & TX_CS_NACK_PKT_RD) pr_cont("NACK_PKT_RD "); if (cs & TX_CS_CONF_PART_ERR) pr_cont("CONF_PART "); if (cs & TX_CS_PKT_PRT_ERR) pr_cont("PKT_PTR "); pr_cont(")\n"); } static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) { u64 cs, logh, logl; cs = nr64(TX_CS(rp->tx_channel)); logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n", rp->tx_channel, (unsigned long long)cs, (unsigned long long)logh, (unsigned long long)logl); niu_log_txchan_errors(np, rp, cs); return -ENODEV; } static int niu_mif_interrupt(struct niu *np) { u64 mif_status = nr64(MIF_STATUS); int phy_mdint = 0; if (np->flags & NIU_FLAGS_XMAC) { u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) phy_mdint = 1; } netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n", (unsigned long long)mif_status, phy_mdint); return -ENODEV; } static void niu_xmac_interrupt(struct niu *np) { struct niu_xmac_stats *mp = &np->mac_stats.xmac; u64 val; val = nr64_mac(XTXMAC_STATUS); if (val & XTXMAC_STATUS_FRAME_CNT_EXP) mp->tx_frames += TXMAC_FRM_CNT_COUNT; if (val & XTXMAC_STATUS_BYTE_CNT_EXP) mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) mp->tx_fifo_errors++; if (val & XTXMAC_STATUS_TXMAC_OFLOW) mp->tx_overflow_errors++; if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) mp->tx_max_pkt_size_errors++; if (val & XTXMAC_STATUS_TXMAC_UFLOW) mp->tx_underflow_errors++; val = nr64_mac(XRXMAC_STATUS); if (val & XRXMAC_STATUS_LCL_FLT_STATUS) mp->rx_local_faults++; if (val & XRXMAC_STATUS_RFLT_DET) mp->rx_remote_faults++; if (val & XRXMAC_STATUS_LFLT_CNT_EXP) mp->rx_link_faults += LINK_FAULT_CNT_COUNT; if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) mp->rx_frags += RXMAC_FRAG_CNT_COUNT; if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP) mp->rx_octets += RXMAC_BT_CNT_COUNT; if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; if (val & XRXMAC_STATUS_LENERR_CNT_EXP) mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; if (val & XRXMAC_STATUS_RXUFLOW) mp->rx_underflows++; if (val & XRXMAC_STATUS_RXOFLOW) mp->rx_overflows++; val = nr64_mac(XMAC_FC_STAT); if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) mp->pause_off_state++; if (val & XMAC_FC_STAT_TX_MAC_PAUSE) mp->pause_on_state++; if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) mp->pause_received++; } static void niu_bmac_interrupt(struct niu *np) { struct niu_bmac_stats *mp = &np->mac_stats.bmac; u64 val; val = nr64_mac(BTXMAC_STATUS); if (val & BTXMAC_STATUS_UNDERRUN) mp->tx_underflow_errors++; if (val & BTXMAC_STATUS_MAX_PKT_ERR) mp->tx_max_pkt_size_errors++; if (val & BTXMAC_STATUS_BYTE_CNT_EXP) mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; if (val & BTXMAC_STATUS_FRAME_CNT_EXP) mp->tx_frames += BTXMAC_FRM_CNT_COUNT; val = nr64_mac(BRXMAC_STATUS); if (val & BRXMAC_STATUS_OVERFLOW) mp->rx_overflows++; if (val & BRXMAC_STATUS_FRAME_CNT_EXP) mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; if (val & BRXMAC_STATUS_CRC_ERR_EXP) mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; if (val & BRXMAC_STATUS_LEN_ERR_EXP) mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; val = nr64_mac(BMAC_CTRL_STATUS); if (val & BMAC_CTRL_STATUS_NOPAUSE) mp->pause_off_state++; if (val & BMAC_CTRL_STATUS_PAUSE) mp->pause_on_state++; if (val & BMAC_CTRL_STATUS_PAUSE_RECV) mp->pause_received++; } static int niu_mac_interrupt(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) niu_xmac_interrupt(np); else niu_bmac_interrupt(np); return 0; } static void niu_log_device_error(struct niu *np, u64 stat) { netdev_err(np->dev, "Core device errors ( "); if (stat & SYS_ERR_MASK_META2) pr_cont("META2 "); if (stat & SYS_ERR_MASK_META1) pr_cont("META1 "); if (stat & SYS_ERR_MASK_PEU) pr_cont("PEU "); if (stat & SYS_ERR_MASK_TXC) pr_cont("TXC "); if (stat & SYS_ERR_MASK_RDMC) pr_cont("RDMC "); if (stat & SYS_ERR_MASK_TDMC) pr_cont("TDMC "); if (stat & SYS_ERR_MASK_ZCP) pr_cont("ZCP "); if (stat & SYS_ERR_MASK_FFLP) pr_cont("FFLP "); if (stat & SYS_ERR_MASK_IPP) pr_cont("IPP "); if (stat & SYS_ERR_MASK_MAC) pr_cont("MAC "); if (stat & SYS_ERR_MASK_SMX) pr_cont("SMX "); pr_cont(")\n"); } static int niu_device_error(struct niu *np) { u64 stat = nr64(SYS_ERR_STAT); netdev_err(np->dev, "Core device error, stat[%llx]\n", (unsigned long long)stat); niu_log_device_error(np, stat); return -ENODEV; } static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp, u64 v0, u64 v1, u64 v2) { int i, err = 0; lp->v0 = v0; lp->v1 = v1; lp->v2 = v2; if (v1 & 0x00000000ffffffffULL) { u32 rx_vec = (v1 & 0xffffffff); for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; if (rx_vec & (1 << rp->rx_channel)) { int r = niu_rx_error(np, rp); if (r) { err = r; } else { if (!v0) nw64(RX_DMA_CTL_STAT(rp->rx_channel), RX_DMA_CTL_STAT_MEX); } } } } if (v1 & 0x7fffffff00000000ULL) { u32 tx_vec = (v1 >> 32) & 0x7fffffff; for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; if (tx_vec & (1 << rp->tx_channel)) { int r = niu_tx_error(np, rp); if (r) err = r; } } } if ((v0 | v1) & 0x8000000000000000ULL) { int r = niu_mif_interrupt(np); if (r) err = r; } if (v2) { if (v2 & 0x01ef) { int r = niu_mac_interrupt(np); if (r) err = r; } if (v2 & 0x0210) { int r = niu_device_error(np); if (r) err = r; } } if (err) niu_enable_interrupts(np, 0); return err; } static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, int ldn) { struct rxdma_mailbox *mbox = rp->mbox; u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); stat_write = (RX_DMA_CTL_STAT_RCRTHRES | RX_DMA_CTL_STAT_RCRTO); nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); netif_printk(np, intr, KERN_DEBUG, np->dev, "%s() stat[%llx]\n", __func__, (unsigned long long)stat); } static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, int ldn) { rp->tx_cs = nr64(TX_CS(rp->tx_channel)); netif_printk(np, intr, KERN_DEBUG, np->dev, "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs); } static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) { struct niu_parent *parent = np->parent; u32 rx_vec, tx_vec; int i; tx_vec = (v0 >> 32); rx_vec = (v0 & 0xffffffff); for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; int ldn = LDN_RXDMA(rp->rx_channel); if (parent->ldg_map[ldn] != ldg) continue; nw64(LD_IM0(ldn), LD_IM0_MASK); if (rx_vec & (1 << rp->rx_channel)) niu_rxchan_intr(np, rp, ldn); } for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; int ldn = LDN_TXDMA(rp->tx_channel); if (parent->ldg_map[ldn] != ldg) continue; nw64(LD_IM0(ldn), LD_IM0_MASK); if (tx_vec & (1 << rp->tx_channel)) niu_txchan_intr(np, rp, ldn); } } static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, u64 v0, u64 v1, u64 v2) { if (likely(napi_schedule_prep(&lp->napi))) { lp->v0 = v0; lp->v1 = v1; lp->v2 = v2; __niu_fastpath_interrupt(np, lp->ldg_num, v0); __napi_schedule(&lp->napi); } } static irqreturn_t niu_interrupt(int irq, void *dev_id) { struct niu_ldg *lp = dev_id; struct niu *np = lp->np; int ldg = lp->ldg_num; unsigned long flags; u64 v0, v1, v2; if (netif_msg_intr(np)) printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)", __func__, lp, ldg); spin_lock_irqsave(&np->lock, flags); v0 = nr64(LDSV0(ldg)); v1 = nr64(LDSV1(ldg)); v2 = nr64(LDSV2(ldg)); if (netif_msg_intr(np)) pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n", (unsigned long long) v0, (unsigned long long) v1, (unsigned long long) v2); if (unlikely(!v0 && !v1 && !v2)) { spin_unlock_irqrestore(&np->lock, flags); return IRQ_NONE; } if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { int err = niu_slowpath_interrupt(np, lp, v0, v1, v2); if (err) goto out; } if (likely(v0 & ~((u64)1 << LDN_MIF))) niu_schedule_napi(np, lp, v0, v1, v2); else niu_ldg_rearm(np, lp, 1); out: spin_unlock_irqrestore(&np->lock, flags); return IRQ_HANDLED; } static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) { if (rp->mbox) { np->ops->free_coherent(np->device, sizeof(struct rxdma_mailbox), rp->mbox, rp->mbox_dma); rp->mbox = NULL; } if (rp->rcr) { np->ops->free_coherent(np->device, MAX_RCR_RING_SIZE * sizeof(__le64), rp->rcr, rp->rcr_dma); rp->rcr = NULL; rp->rcr_table_size = 0; rp->rcr_index = 0; } if (rp->rbr) { niu_rbr_free(np, rp); np->ops->free_coherent(np->device, MAX_RBR_RING_SIZE * sizeof(__le32), rp->rbr, rp->rbr_dma); rp->rbr = NULL; rp->rbr_table_size = 0; rp->rbr_index = 0; } kfree(rp->rxhash); rp->rxhash = NULL; } static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) { if (rp->mbox) { np->ops->free_coherent(np->device, sizeof(struct txdma_mailbox), rp->mbox, rp->mbox_dma); rp->mbox = NULL; } if (rp->descr) { int i; for (i = 0; i < MAX_TX_RING_SIZE; i++) { if (rp->tx_buffs[i].skb) (void) release_tx_packet(np, rp, i); } np->ops->free_coherent(np->device, MAX_TX_RING_SIZE * sizeof(__le64), rp->descr, rp->descr_dma); rp->descr = NULL; rp->pending = 0; rp->prod = 0; rp->cons = 0; rp->wrap_bit = 0; } } static void niu_free_channels(struct niu *np) { int i; if (np->rx_rings) { for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; niu_free_rx_ring_info(np, rp); } kfree(np->rx_rings); np->rx_rings = NULL; np->num_rx_rings = 0; } if (np->tx_rings) { for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; niu_free_tx_ring_info(np, rp); } kfree(np->tx_rings); np->tx_rings = NULL; np->num_tx_rings = 0; } } static int niu_alloc_rx_ring_info(struct niu *np, struct rx_ring_info *rp) { BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *), GFP_KERNEL); if (!rp->rxhash) return -ENOMEM; rp->mbox = np->ops->alloc_coherent(np->device, sizeof(struct rxdma_mailbox), &rp->mbox_dma, GFP_KERNEL); if (!rp->mbox) return -ENOMEM; if ((unsigned long)rp->mbox & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n", rp->mbox); return -EINVAL; } rp->rcr = np->ops->alloc_coherent(np->device, MAX_RCR_RING_SIZE * sizeof(__le64), &rp->rcr_dma, GFP_KERNEL); if (!rp->rcr) return -ENOMEM; if ((unsigned long)rp->rcr & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n", rp->rcr); return -EINVAL; } rp->rcr_table_size = MAX_RCR_RING_SIZE; rp->rcr_index = 0; rp->rbr = np->ops->alloc_coherent(np->device, MAX_RBR_RING_SIZE * sizeof(__le32), &rp->rbr_dma, GFP_KERNEL); if (!rp->rbr) return -ENOMEM; if ((unsigned long)rp->rbr & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n", rp->rbr); return -EINVAL; } rp->rbr_table_size = MAX_RBR_RING_SIZE; rp->rbr_index = 0; rp->rbr_pending = 0; return 0; } static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) { int mtu = np->dev->mtu; /* These values are recommended by the HW designers for fair * utilization of DRR amongst the rings. */ rp->max_burst = mtu + 32; if (rp->max_burst > 4096) rp->max_burst = 4096; } static int niu_alloc_tx_ring_info(struct niu *np, struct tx_ring_info *rp) { BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); rp->mbox = np->ops->alloc_coherent(np->device, sizeof(struct txdma_mailbox), &rp->mbox_dma, GFP_KERNEL); if (!rp->mbox) return -ENOMEM; if ((unsigned long)rp->mbox & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n", rp->mbox); return -EINVAL; } rp->descr = np->ops->alloc_coherent(np->device, MAX_TX_RING_SIZE * sizeof(__le64), &rp->descr_dma, GFP_KERNEL); if (!rp->descr) return -ENOMEM; if ((unsigned long)rp->descr & (64UL - 1)) { netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n", rp->descr); return -EINVAL; } rp->pending = MAX_TX_RING_SIZE; rp->prod = 0; rp->cons = 0; rp->wrap_bit = 0; /* XXX make these configurable... XXX */ rp->mark_freq = rp->pending / 4; niu_set_max_burst(np, rp); return 0; } static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) { u16 bss; bss = min(PAGE_SHIFT, 15); rp->rbr_block_size = 1 << bss; rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); rp->rbr_sizes[0] = 256; rp->rbr_sizes[1] = 1024; if (np->dev->mtu > ETH_DATA_LEN) { switch (PAGE_SIZE) { case 4 * 1024: rp->rbr_sizes[2] = 4096; break; default: rp->rbr_sizes[2] = 8192; break; } } else { rp->rbr_sizes[2] = 2048; } rp->rbr_sizes[3] = rp->rbr_block_size; } static int niu_alloc_channels(struct niu *np) { struct niu_parent *parent = np->parent; int first_rx_channel, first_tx_channel; int num_rx_rings, num_tx_rings; struct rx_ring_info *rx_rings; struct tx_ring_info *tx_rings; int i, port, err; port = np->port; first_rx_channel = first_tx_channel = 0; for (i = 0; i < port; i++) { first_rx_channel += parent->rxchan_per_port[i]; first_tx_channel += parent->txchan_per_port[i]; } num_rx_rings = parent->rxchan_per_port[port]; num_tx_rings = parent->txchan_per_port[port]; rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info), GFP_KERNEL); err = -ENOMEM; if (!rx_rings) goto out_err; np->num_rx_rings = num_rx_rings; smp_wmb(); np->rx_rings = rx_rings; netif_set_real_num_rx_queues(np->dev, num_rx_rings); for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; rp->np = np; rp->rx_channel = first_rx_channel + i; err = niu_alloc_rx_ring_info(np, rp); if (err) goto out_err; niu_size_rbr(np, rp); /* XXX better defaults, configurable, etc... XXX */ rp->nonsyn_window = 64; rp->nonsyn_threshold = rp->rcr_table_size - 64; rp->syn_window = 64; rp->syn_threshold = rp->rcr_table_size - 64; rp->rcr_pkt_threshold = 16; rp->rcr_timeout = 8; rp->rbr_kick_thresh = RBR_REFILL_MIN; if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) rp->rbr_kick_thresh = rp->rbr_blocks_per_page; err = niu_rbr_fill(np, rp, GFP_KERNEL); if (err) return err; } tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), GFP_KERNEL); err = -ENOMEM; if (!tx_rings) goto out_err; np->num_tx_rings = num_tx_rings; smp_wmb(); np->tx_rings = tx_rings; netif_set_real_num_tx_queues(np->dev, num_tx_rings); for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; rp->np = np; rp->tx_channel = first_tx_channel + i; err = niu_alloc_tx_ring_info(np, rp); if (err) goto out_err; } return 0; out_err: niu_free_channels(np); return err; } static int niu_tx_cs_sng_poll(struct niu *np, int channel) { int limit = 1000; while (--limit > 0) { u64 val = nr64(TX_CS(channel)); if (val & TX_CS_SNG_STATE) return 0; } return -ENODEV; } static int niu_tx_channel_stop(struct niu *np, int channel) { u64 val = nr64(TX_CS(channel)); val |= TX_CS_STOP_N_GO; nw64(TX_CS(channel), val); return niu_tx_cs_sng_poll(np, channel); } static int niu_tx_cs_reset_poll(struct niu *np, int channel) { int limit = 1000; while (--limit > 0) { u64 val = nr64(TX_CS(channel)); if (!(val & TX_CS_RST)) return 0; } return -ENODEV; } static int niu_tx_channel_reset(struct niu *np, int channel) { u64 val = nr64(TX_CS(channel)); int err; val |= TX_CS_RST; nw64(TX_CS(channel), val); err = niu_tx_cs_reset_poll(np, channel); if (!err) nw64(TX_RING_KICK(channel), 0); return err; } static int niu_tx_channel_lpage_init(struct niu *np, int channel) { u64 val; nw64(TX_LOG_MASK1(channel), 0); nw64(TX_LOG_VAL1(channel), 0); nw64(TX_LOG_MASK2(channel), 0); nw64(TX_LOG_VAL2(channel), 0); nw64(TX_LOG_PAGE_RELO1(channel), 0); nw64(TX_LOG_PAGE_RELO2(channel), 0); nw64(TX_LOG_PAGE_HDL(channel), 0); val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); nw64(TX_LOG_PAGE_VLD(channel), val); /* XXX TXDMA 32bit mode? XXX */ return 0; } static void niu_txc_enable_port(struct niu *np, int on) { unsigned long flags; u64 val, mask; niu_lock_parent(np, flags); val = nr64(TXC_CONTROL); mask = (u64)1 << np->port; if (on) { val |= TXC_CONTROL_ENABLE | mask; } else { val &= ~mask; if ((val & ~TXC_CONTROL_ENABLE) == 0) val &= ~TXC_CONTROL_ENABLE; } nw64(TXC_CONTROL, val); niu_unlock_parent(np, flags); } static void niu_txc_set_imask(struct niu *np, u64 imask) { unsigned long flags; u64 val; niu_lock_parent(np, flags); val = nr64(TXC_INT_MASK); val &= ~TXC_INT_MASK_VAL(np->port); val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); niu_unlock_parent(np, flags); } static void niu_txc_port_dma_enable(struct niu *np, int on) { u64 val = 0; if (on) { int i; for (i = 0; i < np->num_tx_rings; i++) val |= (1 << np->tx_rings[i].tx_channel); } nw64(TXC_PORT_DMA(np->port), val); } static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) { int err, channel = rp->tx_channel; u64 val, ring_len; err = niu_tx_channel_stop(np, channel); if (err) return err; err = niu_tx_channel_reset(np, channel); if (err) return err; err = niu_tx_channel_lpage_init(np, channel); if (err) return err; nw64(TXC_DMA_MAX(channel), rp->max_burst); nw64(TX_ENT_MSK(channel), 0); if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | TX_RNG_CFIG_STADDR)) { netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n", channel, (unsigned long long)rp->descr_dma); return -EINVAL; } /* The length field in TX_RNG_CFIG is measured in 64-byte * blocks. rp->pending is the number of TX descriptors in * our ring, 8 bytes each, thus we divide by 8 bytes more * to get the proper value the chip wants. */ ring_len = (rp->pending / 8); val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | rp->descr_dma); nw64(TX_RNG_CFIG(channel), val); if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n", channel, (unsigned long long)rp->mbox_dma); return -EINVAL; } nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); nw64(TX_CS(channel), 0); rp->last_pkt_cnt = 0; return 0; } static void niu_init_rdc_groups(struct niu *np) { struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; int i, first_table_num = tp->first_table_num; for (i = 0; i < tp->num_tables; i++) { struct rdc_table *tbl = &tp->tables[i]; int this_table = first_table_num + i; int slot; for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) nw64(RDC_TBL(this_table, slot), tbl->rxdma_channel[slot]); } nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); } static void niu_init_drr_weight(struct niu *np) { int type = phy_decode(np->parent->port_phy, np->port); u64 val; switch (type) { case PORT_TYPE_10G: val = PT_DRR_WEIGHT_DEFAULT_10G; break; case PORT_TYPE_1G: default: val = PT_DRR_WEIGHT_DEFAULT_1G; break; } nw64(PT_DRR_WT(np->port), val); } static int niu_init_hostinfo(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; int i, err, num_alt = niu_num_alt_addr(np); int first_rdc_table = tp->first_table_num; err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); if (err) return err; err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); if (err) return err; for (i = 0; i < num_alt; i++) { err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); if (err) return err; } return 0; } static int niu_rx_channel_reset(struct niu *np, int channel) { return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), RXDMA_CFIG1_RST, 1000, 10, "RXDMA_CFIG1"); } static int niu_rx_channel_lpage_init(struct niu *np, int channel) { u64 val; nw64(RX_LOG_MASK1(channel), 0); nw64(RX_LOG_VAL1(channel), 0); nw64(RX_LOG_MASK2(channel), 0); nw64(RX_LOG_VAL2(channel), 0); nw64(RX_LOG_PAGE_RELO1(channel), 0); nw64(RX_LOG_PAGE_RELO2(channel), 0); nw64(RX_LOG_PAGE_HDL(channel), 0); val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); nw64(RX_LOG_PAGE_VLD(channel), val); return 0; } static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) { u64 val; val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); nw64(RDC_RED_PARA(rp->rx_channel), val); } static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) { u64 val = 0; *ret = 0; switch (rp->rbr_block_size) { case 4 * 1024: val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); break; case 8 * 1024: val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); break; case 16 * 1024: val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); break; case 32 * 1024: val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); break; default: return -EINVAL; } val |= RBR_CFIG_B_VLD2; switch (rp->rbr_sizes[2]) { case 2 * 1024: val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); break; case 4 * 1024: val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); break; case 8 * 1024: val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); break; case 16 * 1024: val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); break; default: return -EINVAL; } val |= RBR_CFIG_B_VLD1; switch (rp->rbr_sizes[1]) { case 1 * 1024: val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); break; case 2 * 1024: val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); break; case 4 * 1024: val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); break; case 8 * 1024: val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); break; default: return -EINVAL; } val |= RBR_CFIG_B_VLD0; switch (rp->rbr_sizes[0]) { case 256: val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); break; case 512: val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); break; case 1 * 1024: val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); break; case 2 * 1024: val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); break; default: return -EINVAL; } *ret = val; return 0; } static int niu_enable_rx_channel(struct niu *np, int channel, int on) { u64 val = nr64(RXDMA_CFIG1(channel)); int limit; if (on) val |= RXDMA_CFIG1_EN; else val &= ~RXDMA_CFIG1_EN; nw64(RXDMA_CFIG1(channel), val); limit = 1000; while (--limit > 0) { if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) break; udelay(10); } if (limit <= 0) return -ENODEV; return 0; } static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) { int err, channel = rp->rx_channel; u64 val; err = niu_rx_channel_reset(np, channel); if (err) return err; err = niu_rx_channel_lpage_init(np, channel); if (err) return err; niu_rx_channel_wred_init(np, rp); nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); nw64(RX_DMA_CTL_STAT(channel), (RX_DMA_CTL_STAT_MEX | RX_DMA_CTL_STAT_RCRTHRES | RX_DMA_CTL_STAT_RCRTO | RX_DMA_CTL_STAT_RBR_EMPTY)); nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); nw64(RXDMA_CFIG2(channel), ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) | RXDMA_CFIG2_FULL_HDR)); nw64(RBR_CFIG_A(channel), ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); err = niu_compute_rbr_cfig_b(rp, &val); if (err) return err; nw64(RBR_CFIG_B(channel), val); nw64(RCRCFIG_A(channel), ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); nw64(RCRCFIG_B(channel), ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | RCRCFIG_B_ENTOUT | ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); err = niu_enable_rx_channel(np, channel, 1); if (err) return err; nw64(RBR_KICK(channel), rp->rbr_index); val = nr64(RX_DMA_CTL_STAT(channel)); val |= RX_DMA_CTL_STAT_RBR_EMPTY; nw64(RX_DMA_CTL_STAT(channel), val); return 0; } static int niu_init_rx_channels(struct niu *np) { unsigned long flags; u64 seed = jiffies_64; int err, i; niu_lock_parent(np, flags); nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); niu_unlock_parent(np, flags); /* XXX RXDMA 32bit mode? XXX */ niu_init_rdc_groups(np); niu_init_drr_weight(np); err = niu_init_hostinfo(np); if (err) return err; for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; err = niu_init_one_rx_channel(np, rp); if (err) return err; } return 0; } static int niu_set_ip_frag_rule(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_classifier *cp = &np->clas; struct niu_tcam_entry *tp; int index, err; index = cp->tcam_top; tp = &parent->tcam[index]; /* Note that the noport bit is the same in both ipv4 and * ipv6 format TCAM entries. */ memset(tp, 0, sizeof(*tp)); tp->key[1] = TCAM_V4KEY1_NOPORT; tp->key_mask[1] = TCAM_V4KEY1_NOPORT; tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); err = tcam_write(np, index, tp->key, tp->key_mask); if (err) return err; err = tcam_assoc_write(np, index, tp->assoc_data); if (err) return err; tp->valid = 1; cp->tcam_valid_entries++; return 0; } static int niu_init_classifier_hw(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_classifier *cp = &np->clas; int i, err; nw64(H1POLY, cp->h1_init); nw64(H2POLY, cp->h2_init); err = niu_init_hostinfo(np); if (err) return err; for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; vlan_tbl_write(np, i, np->port, vp->vlan_pref, vp->rdc_num); } for (i = 0; i < cp->num_alt_mac_mappings; i++) { struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, ap->rdc_num, ap->mac_pref); if (err) return err; } for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { int index = i - CLASS_CODE_USER_PROG1; err = niu_set_tcam_key(np, i, parent->tcam_key[index]); if (err) return err; err = niu_set_flow_key(np, i, parent->flow_key[index]); if (err) return err; } err = niu_set_ip_frag_rule(np); if (err) return err; tcam_enable(np, 1); return 0; } static int niu_zcp_write(struct niu *np, int index, u64 *data) { nw64(ZCP_RAM_DATA0, data[0]); nw64(ZCP_RAM_DATA1, data[1]); nw64(ZCP_RAM_DATA2, data[2]); nw64(ZCP_RAM_DATA3, data[3]); nw64(ZCP_RAM_DATA4, data[4]); nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); nw64(ZCP_RAM_ACC, (ZCP_RAM_ACC_WRITE | (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 1000, 100); } static int niu_zcp_read(struct niu *np, int index, u64 *data) { int err; err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 1000, 100); if (err) { netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n", (unsigned long long)nr64(ZCP_RAM_ACC)); return err; } nw64(ZCP_RAM_ACC, (ZCP_RAM_ACC_READ | (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, 1000, 100); if (err) { netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n", (unsigned long long)nr64(ZCP_RAM_ACC)); return err; } data[0] = nr64(ZCP_RAM_DATA0); data[1] = nr64(ZCP_RAM_DATA1); data[2] = nr64(ZCP_RAM_DATA2); data[3] = nr64(ZCP_RAM_DATA3); data[4] = nr64(ZCP_RAM_DATA4); return 0; } static void niu_zcp_cfifo_reset(struct niu *np) { u64 val = nr64(RESET_CFIFO); val |= RESET_CFIFO_RST(np->port); nw64(RESET_CFIFO, val); udelay(10); val &= ~RESET_CFIFO_RST(np->port); nw64(RESET_CFIFO, val); } static int niu_init_zcp(struct niu *np) { u64 data[5], rbuf[5]; int i, max, err; if (np->parent->plat_type != PLAT_TYPE_NIU) { if (np->port == 0 || np->port == 1) max = ATLAS_P0_P1_CFIFO_ENTRIES; else max = ATLAS_P2_P3_CFIFO_ENTRIES; } else max = NIU_CFIFO_ENTRIES; data[0] = 0; data[1] = 0; data[2] = 0; data[3] = 0; data[4] = 0; for (i = 0; i < max; i++) { err = niu_zcp_write(np, i, data); if (err) return err; err = niu_zcp_read(np, i, rbuf); if (err) return err; } niu_zcp_cfifo_reset(np); nw64(CFIFO_ECC(np->port), 0); nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); (void) nr64(ZCP_INT_STAT); nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); return 0; } static void niu_ipp_write(struct niu *np, int index, u64 *data) { u64 val = nr64_ipp(IPP_CFIG); nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); nw64_ipp(IPP_DFIFO_WR_PTR, index); nw64_ipp(IPP_DFIFO_WR0, data[0]); nw64_ipp(IPP_DFIFO_WR1, data[1]); nw64_ipp(IPP_DFIFO_WR2, data[2]); nw64_ipp(IPP_DFIFO_WR3, data[3]); nw64_ipp(IPP_DFIFO_WR4, data[4]); nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); } static void niu_ipp_read(struct niu *np, int index, u64 *data) { nw64_ipp(IPP_DFIFO_RD_PTR, index); data[0] = nr64_ipp(IPP_DFIFO_RD0); data[1] = nr64_ipp(IPP_DFIFO_RD1); data[2] = nr64_ipp(IPP_DFIFO_RD2); data[3] = nr64_ipp(IPP_DFIFO_RD3); data[4] = nr64_ipp(IPP_DFIFO_RD4); } static int niu_ipp_reset(struct niu *np) { return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, 1000, 100, "IPP_CFIG"); } static int niu_init_ipp(struct niu *np) { u64 data[5], rbuf[5], val; int i, max, err; if (np->parent->plat_type != PLAT_TYPE_NIU) { if (np->port == 0 || np->port == 1) max = ATLAS_P0_P1_DFIFO_ENTRIES; else max = ATLAS_P2_P3_DFIFO_ENTRIES; } else max = NIU_DFIFO_ENTRIES; data[0] = 0; data[1] = 0; data[2] = 0; data[3] = 0; data[4] = 0; for (i = 0; i < max; i++) { niu_ipp_write(np, i, data); niu_ipp_read(np, i, rbuf); } (void) nr64_ipp(IPP_INT_STAT); (void) nr64_ipp(IPP_INT_STAT); err = niu_ipp_reset(np); if (err) return err; (void) nr64_ipp(IPP_PKT_DIS); (void) nr64_ipp(IPP_BAD_CS_CNT); (void) nr64_ipp(IPP_ECC); (void) nr64_ipp(IPP_INT_STAT); nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); val = nr64_ipp(IPP_CFIG); val &= ~IPP_CFIG_IP_MAX_PKT; val |= (IPP_CFIG_IPP_ENABLE | IPP_CFIG_DFIFO_ECC_EN | IPP_CFIG_DROP_BAD_CRC | IPP_CFIG_CKSUM_EN | (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); nw64_ipp(IPP_CFIG, val); return 0; } static void niu_handle_led(struct niu *np, int status) { u64 val; val = nr64_mac(XMAC_CONFIG); if ((np->flags & NIU_FLAGS_10G) != 0 && (np->flags & NIU_FLAGS_FIBER) != 0) { if (status) { val |= XMAC_CONFIG_LED_POLARITY; val &= ~XMAC_CONFIG_FORCE_LED_ON; } else { val |= XMAC_CONFIG_FORCE_LED_ON; val &= ~XMAC_CONFIG_LED_POLARITY; } } nw64_mac(XMAC_CONFIG, val); } static void niu_init_xif_xmac(struct niu *np) { struct niu_link_config *lp = &np->link_config; u64 val; if (np->flags & NIU_FLAGS_XCVR_SERDES) { val = nr64(MIF_CONFIG); val |= MIF_CONFIG_ATCA_GE; nw64(MIF_CONFIG, val); } val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; val |= XMAC_CONFIG_TX_OUTPUT_EN; if (lp->loopback_mode == LOOPBACK_MAC) { val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; val |= XMAC_CONFIG_LOOPBACK; } else { val &= ~XMAC_CONFIG_LOOPBACK; } if (np->flags & NIU_FLAGS_10G) { val &= ~XMAC_CONFIG_LFS_DISABLE; } else { val |= XMAC_CONFIG_LFS_DISABLE; if (!(np->flags & NIU_FLAGS_FIBER) && !(np->flags & NIU_FLAGS_XCVR_SERDES)) val |= XMAC_CONFIG_1G_PCS_BYPASS; else val &= ~XMAC_CONFIG_1G_PCS_BYPASS; } val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; if (lp->active_speed == SPEED_100) val |= XMAC_CONFIG_SEL_CLK_25MHZ; else val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; nw64_mac(XMAC_CONFIG, val); val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_MODE_MASK; if (np->flags & NIU_FLAGS_10G) { val |= XMAC_CONFIG_MODE_XGMII; } else { if (lp->active_speed == SPEED_1000) val |= XMAC_CONFIG_MODE_GMII; else val |= XMAC_CONFIG_MODE_MII; } nw64_mac(XMAC_CONFIG, val); } static void niu_init_xif_bmac(struct niu *np) { struct niu_link_config *lp = &np->link_config; u64 val; val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; if (lp->loopback_mode == LOOPBACK_MAC) val |= BMAC_XIF_CONFIG_MII_LOOPBACK; else val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; if (lp->active_speed == SPEED_1000) val |= BMAC_XIF_CONFIG_GMII_MODE; else val &= ~BMAC_XIF_CONFIG_GMII_MODE; val &= ~(BMAC_XIF_CONFIG_LINK_LED | BMAC_XIF_CONFIG_LED_POLARITY); if (!(np->flags & NIU_FLAGS_10G) && !(np->flags & NIU_FLAGS_FIBER) && lp->active_speed == SPEED_100) val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; else val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; nw64_mac(BMAC_XIF_CONFIG, val); } static void niu_init_xif(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) niu_init_xif_xmac(np); else niu_init_xif_bmac(np); } static void niu_pcs_mii_reset(struct niu *np) { int limit = 1000; u64 val = nr64_pcs(PCS_MII_CTL); val |= PCS_MII_CTL_RST; nw64_pcs(PCS_MII_CTL, val); while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) { udelay(100); val = nr64_pcs(PCS_MII_CTL); } } static void niu_xpcs_reset(struct niu *np) { int limit = 1000; u64 val = nr64_xpcs(XPCS_CONTROL1); val |= XPCS_CONTROL1_RESET; nw64_xpcs(XPCS_CONTROL1, val); while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) { udelay(100); val = nr64_xpcs(XPCS_CONTROL1); } } static int niu_init_pcs(struct niu *np) { struct niu_link_config *lp = &np->link_config; u64 val; switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_XCVR_SERDES)) { case NIU_FLAGS_FIBER: /* 1G fiber */ nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); nw64_pcs(PCS_DPATH_MODE, 0); niu_pcs_mii_reset(np); break; case NIU_FLAGS_10G: case NIU_FLAGS_10G | NIU_FLAGS_FIBER: case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES: /* 10G SERDES */ if (!(np->flags & NIU_FLAGS_XMAC)) return -EINVAL; /* 10G copper or fiber */ val = nr64_mac(XMAC_CONFIG); val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; nw64_mac(XMAC_CONFIG, val); niu_xpcs_reset(np); val = nr64_xpcs(XPCS_CONTROL1); if (lp->loopback_mode == LOOPBACK_PHY) val |= XPCS_CONTROL1_LOOPBACK; else val &= ~XPCS_CONTROL1_LOOPBACK; nw64_xpcs(XPCS_CONTROL1, val); nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); (void) nr64_xpcs(XPCS_SYMERR_CNT01); (void) nr64_xpcs(XPCS_SYMERR_CNT23); break; case NIU_FLAGS_XCVR_SERDES: /* 1G SERDES */ niu_pcs_mii_reset(np); nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); nw64_pcs(PCS_DPATH_MODE, 0); break; case 0: /* 1G copper */ case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER: /* 1G RGMII FIBER */ nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); niu_pcs_mii_reset(np); break; default: return -EINVAL; } return 0; } static int niu_reset_tx_xmac(struct niu *np) { return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, (XTXMAC_SW_RST_REG_RS | XTXMAC_SW_RST_SOFT_RST), 1000, 100, "XTXMAC_SW_RST"); } static int niu_reset_tx_bmac(struct niu *np) { int limit; nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); limit = 1000; while (--limit >= 0) { if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) break; udelay(100); } if (limit < 0) { dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n", np->port, (unsigned long long) nr64_mac(BTXMAC_SW_RST)); return -ENODEV; } return 0; } static int niu_reset_tx_mac(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) return niu_reset_tx_xmac(np); else return niu_reset_tx_bmac(np); } static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) { u64 val; val = nr64_mac(XMAC_MIN); val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | XMAC_MIN_RX_MIN_PKT_SIZE); val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); nw64_mac(XMAC_MIN, val); nw64_mac(XMAC_MAX, max); nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); val = nr64_mac(XMAC_IPG); if (np->flags & NIU_FLAGS_10G) { val &= ~XMAC_IPG_IPG_XGMII; val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); } else { val &= ~XMAC_IPG_IPG_MII_GMII; val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); } nw64_mac(XMAC_IPG, val); val = nr64_mac(XMAC_CONFIG); val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | XMAC_CONFIG_STRETCH_MODE | XMAC_CONFIG_VAR_MIN_IPG_EN | XMAC_CONFIG_TX_ENABLE); nw64_mac(XMAC_CONFIG, val); nw64_mac(TXMAC_FRM_CNT, 0); nw64_mac(TXMAC_BYTE_CNT, 0); } static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) { u64 val; nw64_mac(BMAC_MIN_FRAME, min); nw64_mac(BMAC_MAX_FRAME, max); nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); nw64_mac(BMAC_CTRL_TYPE, 0x8808); nw64_mac(BMAC_PREAMBLE_SIZE, 7); val = nr64_mac(BTXMAC_CONFIG); val &= ~(BTXMAC_CONFIG_FCS_DISABLE | BTXMAC_CONFIG_ENABLE); nw64_mac(BTXMAC_CONFIG, val); } static void niu_init_tx_mac(struct niu *np) { u64 min, max; min = 64; if (np->dev->mtu > ETH_DATA_LEN) max = 9216; else max = 1522; /* The XMAC_MIN register only accepts values for TX min which * have the low 3 bits cleared. */ BUG_ON(min & 0x7); if (np->flags & NIU_FLAGS_XMAC) niu_init_tx_xmac(np, min, max); else niu_init_tx_bmac(np, min, max); } static int niu_reset_rx_xmac(struct niu *np) { int limit; nw64_mac(XRXMAC_SW_RST, XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); limit = 1000; while (--limit >= 0) { if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST))) break; udelay(100); } if (limit < 0) { dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n", np->port, (unsigned long long) nr64_mac(XRXMAC_SW_RST)); return -ENODEV; } return 0; } static int niu_reset_rx_bmac(struct niu *np) { int limit; nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); limit = 1000; while (--limit >= 0) { if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) break; udelay(100); } if (limit < 0) { dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n", np->port, (unsigned long long) nr64_mac(BRXMAC_SW_RST)); return -ENODEV; } return 0; } static int niu_reset_rx_mac(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) return niu_reset_rx_xmac(np); else return niu_reset_rx_bmac(np); } static void niu_init_rx_xmac(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; int first_rdc_table = tp->first_table_num; unsigned long i; u64 val; nw64_mac(XMAC_ADD_FILT0, 0); nw64_mac(XMAC_ADD_FILT1, 0); nw64_mac(XMAC_ADD_FILT2, 0); nw64_mac(XMAC_ADD_FILT12_MASK, 0); nw64_mac(XMAC_ADD_FILT00_MASK, 0); for (i = 0; i < MAC_NUM_HASH; i++) nw64_mac(XMAC_HASH_TBL(i), 0); nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); val = nr64_mac(XMAC_CONFIG); val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | XMAC_CONFIG_PROMISCUOUS | XMAC_CONFIG_PROMISC_GROUP | XMAC_CONFIG_ERR_CHK_DIS | XMAC_CONFIG_RX_CRC_CHK_DIS | XMAC_CONFIG_RESERVED_MULTICAST | XMAC_CONFIG_RX_CODEV_CHK_DIS | XMAC_CONFIG_ADDR_FILTER_EN | XMAC_CONFIG_RCV_PAUSE_ENABLE | XMAC_CONFIG_STRIP_CRC | XMAC_CONFIG_PASS_FLOW_CTRL | XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); val |= (XMAC_CONFIG_HASH_FILTER_EN); nw64_mac(XMAC_CONFIG, val); nw64_mac(RXMAC_BT_CNT, 0); nw64_mac(RXMAC_BC_FRM_CNT, 0); nw64_mac(RXMAC_MC_FRM_CNT, 0); nw64_mac(RXMAC_FRAG_CNT, 0); nw64_mac(RXMAC_HIST_CNT1, 0); nw64_mac(RXMAC_HIST_CNT2, 0); nw64_mac(RXMAC_HIST_CNT3, 0); nw64_mac(RXMAC_HIST_CNT4, 0); nw64_mac(RXMAC_HIST_CNT5, 0); nw64_mac(RXMAC_HIST_CNT6, 0); nw64_mac(RXMAC_HIST_CNT7, 0); nw64_mac(RXMAC_MPSZER_CNT, 0); nw64_mac(RXMAC_CRC_ER_CNT, 0); nw64_mac(RXMAC_CD_VIO_CNT, 0); nw64_mac(LINK_FAULT_CNT, 0); } static void niu_init_rx_bmac(struct niu *np) { struct niu_parent *parent = np->parent; struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; int first_rdc_table = tp->first_table_num; unsigned long i; u64 val; nw64_mac(BMAC_ADD_FILT0, 0); nw64_mac(BMAC_ADD_FILT1, 0); nw64_mac(BMAC_ADD_FILT2, 0); nw64_mac(BMAC_ADD_FILT12_MASK, 0); nw64_mac(BMAC_ADD_FILT00_MASK, 0); for (i = 0; i < MAC_NUM_HASH; i++) nw64_mac(BMAC_HASH_TBL(i), 0); niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); val = nr64_mac(BRXMAC_CONFIG); val &= ~(BRXMAC_CONFIG_ENABLE | BRXMAC_CONFIG_STRIP_PAD | BRXMAC_CONFIG_STRIP_FCS | BRXMAC_CONFIG_PROMISC | BRXMAC_CONFIG_PROMISC_GRP | BRXMAC_CONFIG_ADDR_FILT_EN | BRXMAC_CONFIG_DISCARD_DIS); val |= (BRXMAC_CONFIG_HASH_FILT_EN); nw64_mac(BRXMAC_CONFIG, val); val = nr64_mac(BMAC_ADDR_CMPEN); val |= BMAC_ADDR_CMPEN_EN0; nw64_mac(BMAC_ADDR_CMPEN, val); } static void niu_init_rx_mac(struct niu *np) { niu_set_primary_mac(np, np->dev->dev_addr); if (np->flags & NIU_FLAGS_XMAC) niu_init_rx_xmac(np); else niu_init_rx_bmac(np); } static void niu_enable_tx_xmac(struct niu *np, int on) { u64 val = nr64_mac(XMAC_CONFIG); if (on) val |= XMAC_CONFIG_TX_ENABLE; else val &= ~XMAC_CONFIG_TX_ENABLE; nw64_mac(XMAC_CONFIG, val); } static void niu_enable_tx_bmac(struct niu *np, int on) { u64 val = nr64_mac(BTXMAC_CONFIG); if (on) val |= BTXMAC_CONFIG_ENABLE; else val &= ~BTXMAC_CONFIG_ENABLE; nw64_mac(BTXMAC_CONFIG, val); } static void niu_enable_tx_mac(struct niu *np, int on) { if (np->flags & NIU_FLAGS_XMAC) niu_enable_tx_xmac(np, on); else niu_enable_tx_bmac(np, on); } static void niu_enable_rx_xmac(struct niu *np, int on) { u64 val = nr64_mac(XMAC_CONFIG); val &= ~(XMAC_CONFIG_HASH_FILTER_EN | XMAC_CONFIG_PROMISCUOUS); if (np->flags & NIU_FLAGS_MCAST) val |= XMAC_CONFIG_HASH_FILTER_EN; if (np->flags & NIU_FLAGS_PROMISC) val |= XMAC_CONFIG_PROMISCUOUS; if (on) val |= XMAC_CONFIG_RX_MAC_ENABLE; else val &= ~XMAC_CONFIG_RX_MAC_ENABLE; nw64_mac(XMAC_CONFIG, val); } static void niu_enable_rx_bmac(struct niu *np, int on) { u64 val = nr64_mac(BRXMAC_CONFIG); val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | BRXMAC_CONFIG_PROMISC); if (np->flags & NIU_FLAGS_MCAST) val |= BRXMAC_CONFIG_HASH_FILT_EN; if (np->flags & NIU_FLAGS_PROMISC) val |= BRXMAC_CONFIG_PROMISC; if (on) val |= BRXMAC_CONFIG_ENABLE; else val &= ~BRXMAC_CONFIG_ENABLE; nw64_mac(BRXMAC_CONFIG, val); } static void niu_enable_rx_mac(struct niu *np, int on) { if (np->flags & NIU_FLAGS_XMAC) niu_enable_rx_xmac(np, on); else niu_enable_rx_bmac(np, on); } static int niu_init_mac(struct niu *np) { int err; niu_init_xif(np); err = niu_init_pcs(np); if (err) return err; err = niu_reset_tx_mac(np); if (err) return err; niu_init_tx_mac(np); err = niu_reset_rx_mac(np); if (err) return err; niu_init_rx_mac(np); /* This looks hookey but the RX MAC reset we just did will * undo some of the state we setup in niu_init_tx_mac() so we * have to call it again. In particular, the RX MAC reset will * set the XMAC_MAX register back to it's default value. */ niu_init_tx_mac(np); niu_enable_tx_mac(np, 1); niu_enable_rx_mac(np, 1); return 0; } static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) { (void) niu_tx_channel_stop(np, rp->tx_channel); } static void niu_stop_tx_channels(struct niu *np) { int i; for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; niu_stop_one_tx_channel(np, rp); } } static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) { (void) niu_tx_channel_reset(np, rp->tx_channel); } static void niu_reset_tx_channels(struct niu *np) { int i; for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; niu_reset_one_tx_channel(np, rp); } } static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) { (void) niu_enable_rx_channel(np, rp->rx_channel, 0); } static void niu_stop_rx_channels(struct niu *np) { int i; for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; niu_stop_one_rx_channel(np, rp); } } static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) { int channel = rp->rx_channel; (void) niu_rx_channel_reset(np, channel); nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); nw64(RX_DMA_CTL_STAT(channel), 0); (void) niu_enable_rx_channel(np, channel, 0); } static void niu_reset_rx_channels(struct niu *np) { int i; for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; niu_reset_one_rx_channel(np, rp); } } static void niu_disable_ipp(struct niu *np) { u64 rd, wr, val; int limit; rd = nr64_ipp(IPP_DFIFO_RD_PTR); wr = nr64_ipp(IPP_DFIFO_WR_PTR); limit = 100; while (--limit >= 0 && (rd != wr)) { rd = nr64_ipp(IPP_DFIFO_RD_PTR); wr = nr64_ipp(IPP_DFIFO_WR_PTR); } if (limit < 0 && (rd != 0 && wr != 1)) { netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n", (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR), (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR)); } val = nr64_ipp(IPP_CFIG); val &= ~(IPP_CFIG_IPP_ENABLE | IPP_CFIG_DFIFO_ECC_EN | IPP_CFIG_DROP_BAD_CRC | IPP_CFIG_CKSUM_EN); nw64_ipp(IPP_CFIG, val); (void) niu_ipp_reset(np); } static int niu_init_hw(struct niu *np) { int i, err; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n"); niu_txc_enable_port(np, 1); niu_txc_port_dma_enable(np, 1); niu_txc_set_imask(np, 0); netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n"); for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; err = niu_init_one_tx_channel(np, rp); if (err) return err; } netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n"); err = niu_init_rx_channels(np); if (err) goto out_uninit_tx_channels; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n"); err = niu_init_classifier_hw(np); if (err) goto out_uninit_rx_channels; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n"); err = niu_init_zcp(np); if (err) goto out_uninit_rx_channels; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n"); err = niu_init_ipp(np); if (err) goto out_uninit_rx_channels; netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n"); err = niu_init_mac(np); if (err) goto out_uninit_ipp; return 0; out_uninit_ipp: netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n"); niu_disable_ipp(np); out_uninit_rx_channels: netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n"); niu_stop_rx_channels(np); niu_reset_rx_channels(np); out_uninit_tx_channels: netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n"); niu_stop_tx_channels(np); niu_reset_tx_channels(np); return err; } static void niu_stop_hw(struct niu *np) { netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n"); niu_enable_interrupts(np, 0); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n"); niu_enable_rx_mac(np, 0); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n"); niu_disable_ipp(np); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n"); niu_stop_tx_channels(np); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n"); niu_stop_rx_channels(np); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n"); niu_reset_tx_channels(np); netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n"); niu_reset_rx_channels(np); } static void niu_set_irq_name(struct niu *np) { int port = np->port; int i, j = 1; sprintf(np->irq_name[0], "%s:MAC", np->dev->name); if (port == 0) { sprintf(np->irq_name[1], "%s:MIF", np->dev->name); sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name); j = 3; } for (i = 0; i < np->num_ldg - j; i++) { if (i < np->num_rx_rings) sprintf(np->irq_name[i+j], "%s-rx-%d", np->dev->name, i); else if (i < np->num_tx_rings + np->num_rx_rings) sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name, i - np->num_rx_rings); } } static int niu_request_irq(struct niu *np) { int i, j, err; niu_set_irq_name(np); err = 0; for (i = 0; i < np->num_ldg; i++) { struct niu_ldg *lp = &np->ldg[i]; err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED, np->irq_name[i], lp); if (err) goto out_free_irqs; } return 0; out_free_irqs: for (j = 0; j < i; j++) { struct niu_ldg *lp = &np->ldg[j]; free_irq(lp->irq, lp); } return err; } static void niu_free_irq(struct niu *np) { int i; for (i = 0; i < np->num_ldg; i++) { struct niu_ldg *lp = &np->ldg[i]; free_irq(lp->irq, lp); } } static void niu_enable_napi(struct niu *np) { int i; for (i = 0; i < np->num_ldg; i++) napi_enable(&np->ldg[i].napi); } static void niu_disable_napi(struct niu *np) { int i; for (i = 0; i < np->num_ldg; i++) napi_disable(&np->ldg[i].napi); } static int niu_open(struct net_device *dev) { struct niu *np = netdev_priv(dev); int err; netif_carrier_off(dev); err = niu_alloc_channels(np); if (err) goto out_err; err = niu_enable_interrupts(np, 0); if (err) goto out_free_channels; err = niu_request_irq(np); if (err) goto out_free_channels; niu_enable_napi(np); spin_lock_irq(&np->lock); err = niu_init_hw(np); if (!err) { init_timer(&np->timer); np->timer.expires = jiffies + HZ; np->timer.data = (unsigned long) np; np->timer.function = niu_timer; err = niu_enable_interrupts(np, 1); if (err) niu_stop_hw(np); } spin_unlock_irq(&np->lock); if (err) { niu_disable_napi(np); goto out_free_irq; } netif_tx_start_all_queues(dev); if (np->link_config.loopback_mode != LOOPBACK_DISABLED) netif_carrier_on(dev); add_timer(&np->timer); return 0; out_free_irq: niu_free_irq(np); out_free_channels: niu_free_channels(np); out_err: return err; } static void niu_full_shutdown(struct niu *np, struct net_device *dev) { cancel_work_sync(&np->reset_task); niu_disable_napi(np); netif_tx_stop_all_queues(dev); del_timer_sync(&np->timer); spin_lock_irq(&np->lock); niu_stop_hw(np); spin_unlock_irq(&np->lock); } static int niu_close(struct net_device *dev) { struct niu *np = netdev_priv(dev); niu_full_shutdown(np, dev); niu_free_irq(np); niu_free_channels(np); niu_handle_led(np, 0); return 0; } static void niu_sync_xmac_stats(struct niu *np) { struct niu_xmac_stats *mp = &np->mac_stats.xmac; mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); mp->rx_octets += nr64_mac(RXMAC_BT_CNT); mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); } static void niu_sync_bmac_stats(struct niu *np) { struct niu_bmac_stats *mp = &np->mac_stats.bmac; mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); } static void niu_sync_mac_stats(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) niu_sync_xmac_stats(np); else niu_sync_bmac_stats(np); } static void niu_get_rx_stats(struct niu *np, struct rtnl_link_stats64 *stats) { u64 pkts, dropped, errors, bytes; struct rx_ring_info *rx_rings; int i; pkts = dropped = errors = bytes = 0; rx_rings = ACCESS_ONCE(np->rx_rings); if (!rx_rings) goto no_rings; for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &rx_rings[i]; niu_sync_rx_discard_stats(np, rp, 0); pkts += rp->rx_packets; bytes += rp->rx_bytes; dropped += rp->rx_dropped; errors += rp->rx_errors; } no_rings: stats->rx_packets = pkts; stats->rx_bytes = bytes; stats->rx_dropped = dropped; stats->rx_errors = errors; } static void niu_get_tx_stats(struct niu *np, struct rtnl_link_stats64 *stats) { u64 pkts, errors, bytes; struct tx_ring_info *tx_rings; int i; pkts = errors = bytes = 0; tx_rings = ACCESS_ONCE(np->tx_rings); if (!tx_rings) goto no_rings; for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &tx_rings[i]; pkts += rp->tx_packets; bytes += rp->tx_bytes; errors += rp->tx_errors; } no_rings: stats->tx_packets = pkts; stats->tx_bytes = bytes; stats->tx_errors = errors; } static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct niu *np = netdev_priv(dev); if (netif_running(dev)) { niu_get_rx_stats(np, stats); niu_get_tx_stats(np, stats); } return stats; } static void niu_load_hash_xmac(struct niu *np, u16 *hash) { int i; for (i = 0; i < 16; i++) nw64_mac(XMAC_HASH_TBL(i), hash[i]); } static void niu_load_hash_bmac(struct niu *np, u16 *hash) { int i; for (i = 0; i < 16; i++) nw64_mac(BMAC_HASH_TBL(i), hash[i]); } static void niu_load_hash(struct niu *np, u16 *hash) { if (np->flags & NIU_FLAGS_XMAC) niu_load_hash_xmac(np, hash); else niu_load_hash_bmac(np, hash); } static void niu_set_rx_mode(struct net_device *dev) { struct niu *np = netdev_priv(dev); int i, alt_cnt, err; struct netdev_hw_addr *ha; unsigned long flags; u16 hash[16] = { 0, }; spin_lock_irqsave(&np->lock, flags); niu_enable_rx_mac(np, 0); np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); if (dev->flags & IFF_PROMISC) np->flags |= NIU_FLAGS_PROMISC; if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev))) np->flags |= NIU_FLAGS_MCAST; alt_cnt = netdev_uc_count(dev); if (alt_cnt > niu_num_alt_addr(np)) { alt_cnt = 0; np->flags |= NIU_FLAGS_PROMISC; } if (alt_cnt) { int index = 0; netdev_for_each_uc_addr(ha, dev) { err = niu_set_alt_mac(np, index, ha->addr); if (err) netdev_warn(dev, "Error %d adding alt mac %d\n", err, index); err = niu_enable_alt_mac(np, index, 1); if (err) netdev_warn(dev, "Error %d enabling alt mac %d\n", err, index); index++; } } else { int alt_start; if (np->flags & NIU_FLAGS_XMAC) alt_start = 0; else alt_start = 1; for (i = alt_start; i < niu_num_alt_addr(np); i++) { err = niu_enable_alt_mac(np, i, 0); if (err) netdev_warn(dev, "Error %d disabling alt mac %d\n", err, i); } } if (dev->flags & IFF_ALLMULTI) { for (i = 0; i < 16; i++) hash[i] = 0xffff; } else if (!netdev_mc_empty(dev)) { netdev_for_each_mc_addr(ha, dev) { u32 crc = ether_crc_le(ETH_ALEN, ha->addr); crc >>= 24; hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); } } if (np->flags & NIU_FLAGS_MCAST) niu_load_hash(np, hash); niu_enable_rx_mac(np, 1); spin_unlock_irqrestore(&np->lock, flags); } static int niu_set_mac_addr(struct net_device *dev, void *p) { struct niu *np = netdev_priv(dev); struct sockaddr *addr = p; unsigned long flags; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); if (!netif_running(dev)) return 0; spin_lock_irqsave(&np->lock, flags); niu_enable_rx_mac(np, 0); niu_set_primary_mac(np, dev->dev_addr); niu_enable_rx_mac(np, 1); spin_unlock_irqrestore(&np->lock, flags); return 0; } static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { return -EOPNOTSUPP; } static void niu_netif_stop(struct niu *np) { np->dev->trans_start = jiffies; /* prevent tx timeout */ niu_disable_napi(np); netif_tx_disable(np->dev); } static void niu_netif_start(struct niu *np) { /* NOTE: unconditional netif_wake_queue is only appropriate * so long as all callers are assured to have free tx slots * (such as after niu_init_hw). */ netif_tx_wake_all_queues(np->dev); niu_enable_napi(np); niu_enable_interrupts(np, 1); } static void niu_reset_buffers(struct niu *np) { int i, j, k, err; if (np->rx_rings) { for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) { struct page *page; page = rp->rxhash[j]; while (page) { struct page *next = (struct page *) page->mapping; u64 base = page->index; base = base >> RBR_DESCR_ADDR_SHIFT; rp->rbr[k++] = cpu_to_le32(base); page = next; } } for (; k < MAX_RBR_RING_SIZE; k++) { err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k); if (unlikely(err)) break; } rp->rbr_index = rp->rbr_table_size - 1; rp->rcr_index = 0; rp->rbr_pending = 0; rp->rbr_refill_pending = 0; } } if (np->tx_rings) { for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; for (j = 0; j < MAX_TX_RING_SIZE; j++) { if (rp->tx_buffs[j].skb) (void) release_tx_packet(np, rp, j); } rp->pending = MAX_TX_RING_SIZE; rp->prod = 0; rp->cons = 0; rp->wrap_bit = 0; } } } static void niu_reset_task(struct work_struct *work) { struct niu *np = container_of(work, struct niu, reset_task); unsigned long flags; int err; spin_lock_irqsave(&np->lock, flags); if (!netif_running(np->dev)) { spin_unlock_irqrestore(&np->lock, flags); return; } spin_unlock_irqrestore(&np->lock, flags); del_timer_sync(&np->timer); niu_netif_stop(np); spin_lock_irqsave(&np->lock, flags); niu_stop_hw(np); spin_unlock_irqrestore(&np->lock, flags); niu_reset_buffers(np); spin_lock_irqsave(&np->lock, flags); err = niu_init_hw(np); if (!err) { np->timer.expires = jiffies + HZ; add_timer(&np->timer); niu_netif_start(np); } spin_unlock_irqrestore(&np->lock, flags); } static void niu_tx_timeout(struct net_device *dev) { struct niu *np = netdev_priv(dev); dev_err(np->device, "%s: Transmit timed out, resetting\n", dev->name); schedule_work(&np->reset_task); } static void niu_set_txd(struct tx_ring_info *rp, int index, u64 mapping, u64 len, u64 mark, u64 n_frags) { __le64 *desc = &rp->descr[index]; *desc = cpu_to_le64(mark | (n_frags << TX_DESC_NUM_PTR_SHIFT) | (len << TX_DESC_TR_LEN_SHIFT) | (mapping & TX_DESC_SAD)); } static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, u64 pad_bytes, u64 len) { u16 eth_proto, eth_proto_inner; u64 csum_bits, l3off, ihl, ret; u8 ip_proto; int ipv6; eth_proto = be16_to_cpu(ehdr->h_proto); eth_proto_inner = eth_proto; if (eth_proto == ETH_P_8021Q) { struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; __be16 val = vp->h_vlan_encapsulated_proto; eth_proto_inner = be16_to_cpu(val); } ipv6 = ihl = 0; switch (skb->protocol) { case cpu_to_be16(ETH_P_IP): ip_proto = ip_hdr(skb)->protocol; ihl = ip_hdr(skb)->ihl; break; case cpu_to_be16(ETH_P_IPV6): ip_proto = ipv6_hdr(skb)->nexthdr; ihl = (40 >> 2); ipv6 = 1; break; default: ip_proto = ihl = 0; break; } csum_bits = TXHDR_CSUM_NONE; if (skb->ip_summed == CHECKSUM_PARTIAL) { u64 start, stuff; csum_bits = (ip_proto == IPPROTO_TCP ? TXHDR_CSUM_TCP : (ip_proto == IPPROTO_UDP ? TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); start = skb_checksum_start_offset(skb) - (pad_bytes + sizeof(struct tx_pkt_hdr)); stuff = start + skb->csum_offset; csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; } l3off = skb_network_offset(skb) - (pad_bytes + sizeof(struct tx_pkt_hdr)); ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | (len << TXHDR_LEN_SHIFT) | ((l3off / 2) << TXHDR_L3START_SHIFT) | (ihl << TXHDR_IHL_SHIFT) | ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) | ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | (ipv6 ? TXHDR_IP_VER : 0) | csum_bits); return ret; } static netdev_tx_t niu_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct niu *np = netdev_priv(dev); unsigned long align, headroom; struct netdev_queue *txq; struct tx_ring_info *rp; struct tx_pkt_hdr *tp; unsigned int len, nfg; struct ethhdr *ehdr; int prod, i, tlen; u64 mapping, mrk; i = skb_get_queue_mapping(skb); rp = &np->tx_rings[i]; txq = netdev_get_tx_queue(dev, i); if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { netif_tx_stop_queue(txq); dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name); rp->tx_errors++; return NETDEV_TX_BUSY; } if (skb->len < ETH_ZLEN) { unsigned int pad_bytes = ETH_ZLEN - skb->len; if (skb_pad(skb, pad_bytes)) goto out; skb_put(skb, pad_bytes); } len = sizeof(struct tx_pkt_hdr) + 15; if (skb_headroom(skb) < len) { struct sk_buff *skb_new; skb_new = skb_realloc_headroom(skb, len); if (!skb_new) { rp->tx_errors++; goto out_drop; } kfree_skb(skb); skb = skb_new; } else skb_orphan(skb); align = ((unsigned long) skb->data & (16 - 1)); headroom = align + sizeof(struct tx_pkt_hdr); ehdr = (struct ethhdr *) skb->data; tp = (struct tx_pkt_hdr *) skb_push(skb, headroom); len = skb->len - sizeof(struct tx_pkt_hdr); tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); tp->resv = 0; len = skb_headlen(skb); mapping = np->ops->map_single(np->device, skb->data, len, DMA_TO_DEVICE); prod = rp->prod; rp->tx_buffs[prod].skb = skb; rp->tx_buffs[prod].mapping = mapping; mrk = TX_DESC_SOP; if (++rp->mark_counter == rp->mark_freq) { rp->mark_counter = 0; mrk |= TX_DESC_MARK; rp->mark_pending++; } tlen = len; nfg = skb_shinfo(skb)->nr_frags; while (tlen > 0) { tlen -= MAX_TX_DESC_LEN; nfg++; } while (len > 0) { unsigned int this_len = len; if (this_len > MAX_TX_DESC_LEN) this_len = MAX_TX_DESC_LEN; niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); mrk = nfg = 0; prod = NEXT_TX(rp, prod); mapping += this_len; len -= this_len; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; len = skb_frag_size(frag); mapping = np->ops->map_page(np->device, skb_frag_page(frag), frag->page_offset, len, DMA_TO_DEVICE); rp->tx_buffs[prod].skb = NULL; rp->tx_buffs[prod].mapping = mapping; niu_set_txd(rp, prod, mapping, len, 0, 0); prod = NEXT_TX(rp, prod); } if (prod < rp->prod) rp->wrap_bit ^= TX_RING_KICK_WRAP; rp->prod = prod; nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { netif_tx_stop_queue(txq); if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) netif_tx_wake_queue(txq); } out: return NETDEV_TX_OK; out_drop: rp->tx_errors++; kfree_skb(skb); goto out; } static int niu_change_mtu(struct net_device *dev, int new_mtu) { struct niu *np = netdev_priv(dev); int err, orig_jumbo, new_jumbo; if (new_mtu < 68 || new_mtu > NIU_MAX_MTU) return -EINVAL; orig_jumbo = (dev->mtu > ETH_DATA_LEN); new_jumbo = (new_mtu > ETH_DATA_LEN); dev->mtu = new_mtu; if (!netif_running(dev) || (orig_jumbo == new_jumbo)) return 0; niu_full_shutdown(np, dev); niu_free_channels(np); niu_enable_napi(np); err = niu_alloc_channels(np); if (err) return err; spin_lock_irq(&np->lock); err = niu_init_hw(np); if (!err) { init_timer(&np->timer); np->timer.expires = jiffies + HZ; np->timer.data = (unsigned long) np; np->timer.function = niu_timer; err = niu_enable_interrupts(np, 1); if (err) niu_stop_hw(np); } spin_unlock_irq(&np->lock); if (!err) { netif_tx_start_all_queues(dev); if (np->link_config.loopback_mode != LOOPBACK_DISABLED) netif_carrier_on(dev); add_timer(&np->timer); } return err; } static void niu_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct niu *np = netdev_priv(dev); struct niu_vpd *vpd = &np->vpd; strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d", vpd->fcode_major, vpd->fcode_minor); if (np->parent->plat_type != PLAT_TYPE_NIU) strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); } static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct niu *np = netdev_priv(dev); struct niu_link_config *lp; lp = &np->link_config; memset(cmd, 0, sizeof(*cmd)); cmd->phy_address = np->phy_addr; cmd->supported = lp->supported; cmd->advertising = lp->active_advertising; cmd->autoneg = lp->active_autoneg; ethtool_cmd_speed_set(cmd, lp->active_speed); cmd->duplex = lp->active_duplex; cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ? XCVR_EXTERNAL : XCVR_INTERNAL; return 0; } static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct niu *np = netdev_priv(dev); struct niu_link_config *lp = &np->link_config; lp->advertising = cmd->advertising; lp->speed = ethtool_cmd_speed(cmd); lp->duplex = cmd->duplex; lp->autoneg = cmd->autoneg; return niu_init_link(np); } static u32 niu_get_msglevel(struct net_device *dev) { struct niu *np = netdev_priv(dev); return np->msg_enable; } static void niu_set_msglevel(struct net_device *dev, u32 value) { struct niu *np = netdev_priv(dev); np->msg_enable = value; } static int niu_nway_reset(struct net_device *dev) { struct niu *np = netdev_priv(dev); if (np->link_config.autoneg) return niu_init_link(np); return 0; } static int niu_get_eeprom_len(struct net_device *dev) { struct niu *np = netdev_priv(dev); return np->eeprom_len; } static int niu_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct niu *np = netdev_priv(dev); u32 offset, len, val; offset = eeprom->offset; len = eeprom->len; if (offset + len < offset) return -EINVAL; if (offset >= np->eeprom_len) return -EINVAL; if (offset + len > np->eeprom_len) len = eeprom->len = np->eeprom_len - offset; if (offset & 3) { u32 b_offset, b_count; b_offset = offset & 3; b_count = 4 - b_offset; if (b_count > len) b_count = len; val = nr64(ESPC_NCR((offset - b_offset) / 4)); memcpy(data, ((char *)&val) + b_offset, b_count); data += b_count; len -= b_count; offset += b_count; } while (len >= 4) { val = nr64(ESPC_NCR(offset / 4)); memcpy(data, &val, 4); data += 4; len -= 4; offset += 4; } if (len) { val = nr64(ESPC_NCR(offset / 4)); memcpy(data, &val, len); } return 0; } static void niu_ethflow_to_l3proto(int flow_type, u8 *pid) { switch (flow_type) { case TCP_V4_FLOW: case TCP_V6_FLOW: *pid = IPPROTO_TCP; break; case UDP_V4_FLOW: case UDP_V6_FLOW: *pid = IPPROTO_UDP; break; case SCTP_V4_FLOW: case SCTP_V6_FLOW: *pid = IPPROTO_SCTP; break; case AH_V4_FLOW: case AH_V6_FLOW: *pid = IPPROTO_AH; break; case ESP_V4_FLOW: case ESP_V6_FLOW: *pid = IPPROTO_ESP; break; default: *pid = 0; break; } } static int niu_class_to_ethflow(u64 class, int *flow_type) { switch (class) { case CLASS_CODE_TCP_IPV4: *flow_type = TCP_V4_FLOW; break; case CLASS_CODE_UDP_IPV4: *flow_type = UDP_V4_FLOW; break; case CLASS_CODE_AH_ESP_IPV4: *flow_type = AH_V4_FLOW; break; case CLASS_CODE_SCTP_IPV4: *flow_type = SCTP_V4_FLOW; break; case CLASS_CODE_TCP_IPV6: *flow_type = TCP_V6_FLOW; break; case CLASS_CODE_UDP_IPV6: *flow_type = UDP_V6_FLOW; break; case CLASS_CODE_AH_ESP_IPV6: *flow_type = AH_V6_FLOW; break; case CLASS_CODE_SCTP_IPV6: *flow_type = SCTP_V6_FLOW; break; case CLASS_CODE_USER_PROG1: case CLASS_CODE_USER_PROG2: case CLASS_CODE_USER_PROG3: case CLASS_CODE_USER_PROG4: *flow_type = IP_USER_FLOW; break; default: return 0; } return 1; } static int niu_ethflow_to_class(int flow_type, u64 *class) { switch (flow_type) { case TCP_V4_FLOW: *class = CLASS_CODE_TCP_IPV4; break; case UDP_V4_FLOW: *class = CLASS_CODE_UDP_IPV4; break; case AH_ESP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: *class = CLASS_CODE_AH_ESP_IPV4; break; case SCTP_V4_FLOW: *class = CLASS_CODE_SCTP_IPV4; break; case TCP_V6_FLOW: *class = CLASS_CODE_TCP_IPV6; break; case UDP_V6_FLOW: *class = CLASS_CODE_UDP_IPV6; break; case AH_ESP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: *class = CLASS_CODE_AH_ESP_IPV6; break; case SCTP_V6_FLOW: *class = CLASS_CODE_SCTP_IPV6; break; default: return 0; } return 1; } static u64 niu_flowkey_to_ethflow(u64 flow_key) { u64 ethflow = 0; if (flow_key & FLOW_KEY_L2DA) ethflow |= RXH_L2DA; if (flow_key & FLOW_KEY_VLAN) ethflow |= RXH_VLAN; if (flow_key & FLOW_KEY_IPSA) ethflow |= RXH_IP_SRC; if (flow_key & FLOW_KEY_IPDA) ethflow |= RXH_IP_DST; if (flow_key & FLOW_KEY_PROTO) ethflow |= RXH_L3_PROTO; if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT)) ethflow |= RXH_L4_B_0_1; if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)) ethflow |= RXH_L4_B_2_3; return ethflow; } static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key) { u64 key = 0; if (ethflow & RXH_L2DA) key |= FLOW_KEY_L2DA; if (ethflow & RXH_VLAN) key |= FLOW_KEY_VLAN; if (ethflow & RXH_IP_SRC) key |= FLOW_KEY_IPSA; if (ethflow & RXH_IP_DST) key |= FLOW_KEY_IPDA; if (ethflow & RXH_L3_PROTO) key |= FLOW_KEY_PROTO; if (ethflow & RXH_L4_B_0_1) key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT); if (ethflow & RXH_L4_B_2_3) key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT); *flow_key = key; return 1; } static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) { u64 class; nfc->data = 0; if (!niu_ethflow_to_class(nfc->flow_type, &class)) return -EINVAL; if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & TCAM_KEY_DISC) nfc->data = RXH_DISCARD; else nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - CLASS_CODE_USER_PROG1]); return 0; } static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, struct ethtool_rx_flow_spec *fsp) { u32 tmp; u16 prt; tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> TCAM_V4KEY2_TOS_SHIFT; fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >> TCAM_V4KEY2_TOS_SHIFT; switch (fsp->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff; fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); break; case AH_V4_FLOW: case ESP_V4_FLOW: tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp); tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp); break; case IP_USER_FLOW: tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> TCAM_V4KEY2_PORT_SPI_SHIFT; fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); fsp->h_u.usr_ip4_spec.proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> TCAM_V4KEY2_PROTO_SHIFT; fsp->m_u.usr_ip4_spec.proto = (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >> TCAM_V4KEY2_PROTO_SHIFT; fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; break; default: break; } } static int niu_get_ethtool_tcam_entry(struct niu *np, struct ethtool_rxnfc *nfc) { struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; struct ethtool_rx_flow_spec *fsp = &nfc->fs; u16 idx; u64 class; int ret = 0; idx = tcam_get_index(np, (u16)nfc->fs.location); tp = &parent->tcam[idx]; if (!tp->valid) { netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n", parent->index, (u16)nfc->fs.location, idx); return -EINVAL; } /* fill the flow spec entry */ class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> TCAM_V4KEY0_CLASS_CODE_SHIFT; ret = niu_class_to_ethflow(class, &fsp->flow_type); if (ret < 0) { netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", parent->index); ret = -EINVAL; goto out; } if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) { u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> TCAM_V4KEY2_PROTO_SHIFT; if (proto == IPPROTO_ESP) { if (fsp->flow_type == AH_V4_FLOW) fsp->flow_type = ESP_V4_FLOW; else fsp->flow_type = ESP_V6_FLOW; } } switch (fsp->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: niu_get_ip4fs_from_tcam_key(tp, fsp); break; case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: /* Not yet implemented */ ret = -EINVAL; break; case IP_USER_FLOW: niu_get_ip4fs_from_tcam_key(tp, fsp); break; default: ret = -EINVAL; break; } if (ret < 0) goto out; if (tp->assoc_data & TCAM_ASSOCDATA_DISC) fsp->ring_cookie = RX_CLS_FLOW_DISC; else fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >> TCAM_ASSOCDATA_OFFSET_SHIFT; /* put the tcam size here */ nfc->data = tcam_get_size(np); out: return ret; } static int niu_get_ethtool_tcam_all(struct niu *np, struct ethtool_rxnfc *nfc, u32 *rule_locs) { struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; int i, idx, cnt; unsigned long flags; int ret = 0; /* put the tcam size here */ nfc->data = tcam_get_size(np); niu_lock_parent(np, flags); for (cnt = 0, i = 0; i < nfc->data; i++) { idx = tcam_get_index(np, i); tp = &parent->tcam[idx]; if (!tp->valid) continue; if (cnt == nfc->rule_cnt) { ret = -EMSGSIZE; break; } rule_locs[cnt] = i; cnt++; } niu_unlock_parent(np, flags); nfc->rule_cnt = cnt; return ret; } static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct niu *np = netdev_priv(dev); int ret = 0; switch (cmd->cmd) { case ETHTOOL_GRXFH: ret = niu_get_hash_opts(np, cmd); break; case ETHTOOL_GRXRINGS: cmd->data = np->num_rx_rings; break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = tcam_get_valid_entry_cnt(np); break; case ETHTOOL_GRXCLSRULE: ret = niu_get_ethtool_tcam_entry(np, cmd); break; case ETHTOOL_GRXCLSRLALL: ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs); break; default: ret = -EINVAL; break; } return ret; } static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc) { u64 class; u64 flow_key = 0; unsigned long flags; if (!niu_ethflow_to_class(nfc->flow_type, &class)) return -EINVAL; if (class < CLASS_CODE_USER_PROG1 || class > CLASS_CODE_SCTP_IPV6) return -EINVAL; if (nfc->data & RXH_DISCARD) { niu_lock_parent(np, flags); flow_key = np->parent->tcam_key[class - CLASS_CODE_USER_PROG1]; flow_key |= TCAM_KEY_DISC; nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; niu_unlock_parent(np, flags); return 0; } else { /* Discard was set before, but is not set now */ if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & TCAM_KEY_DISC) { niu_lock_parent(np, flags); flow_key = np->parent->tcam_key[class - CLASS_CODE_USER_PROG1]; flow_key &= ~TCAM_KEY_DISC; nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; niu_unlock_parent(np, flags); } } if (!niu_ethflow_to_flowkey(nfc->data, &flow_key)) return -EINVAL; niu_lock_parent(np, flags); nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; niu_unlock_parent(np, flags); return 0; } static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp, struct niu_tcam_entry *tp, int l2_rdc_tab, u64 class) { u8 pid = 0; u32 sip, dip, sipm, dipm, spi, spim; u16 sport, dport, spm, dpm; sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src); sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src); dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst); dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst); tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT; tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE; tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT; tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM; tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT; tp->key[3] |= dip; tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT; tp->key_mask[3] |= dipm; tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos << TCAM_V4KEY2_TOS_SHIFT); tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos << TCAM_V4KEY2_TOS_SHIFT); switch (fsp->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); tp->key[2] |= (((u64)sport << 16) | dport); tp->key_mask[2] |= (((u64)spm << 16) | dpm); niu_ethflow_to_l3proto(fsp->flow_type, &pid); break; case AH_V4_FLOW: case ESP_V4_FLOW: spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi); spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi); tp->key[2] |= spi; tp->key_mask[2] |= spim; niu_ethflow_to_l3proto(fsp->flow_type, &pid); break; case IP_USER_FLOW: spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes); spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes); tp->key[2] |= spi; tp->key_mask[2] |= spim; pid = fsp->h_u.usr_ip4_spec.proto; break; default: break; } tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT); if (pid) { tp->key_mask[2] |= TCAM_V4KEY2_PROTO; } } static int niu_add_ethtool_tcam_entry(struct niu *np, struct ethtool_rxnfc *nfc) { struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; struct ethtool_rx_flow_spec *fsp = &nfc->fs; struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; int l2_rdc_table = rdc_table->first_table_num; u16 idx; u64 class; unsigned long flags; int err, ret; ret = 0; idx = nfc->fs.location; if (idx >= tcam_get_size(np)) return -EINVAL; if (fsp->flow_type == IP_USER_FLOW) { int i; int add_usr_cls = 0; struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; if (uspec->ip_ver != ETH_RX_NFC_IP4) return -EINVAL; niu_lock_parent(np, flags); for (i = 0; i < NIU_L3_PROG_CLS; i++) { if (parent->l3_cls[i]) { if (uspec->proto == parent->l3_cls_pid[i]) { class = parent->l3_cls[i]; parent->l3_cls_refcnt[i]++; add_usr_cls = 1; break; } } else { /* Program new user IP class */ switch (i) { case 0: class = CLASS_CODE_USER_PROG1; break; case 1: class = CLASS_CODE_USER_PROG2; break; case 2: class = CLASS_CODE_USER_PROG3; break; case 3: class = CLASS_CODE_USER_PROG4; break; default: break; } ret = tcam_user_ip_class_set(np, class, 0, uspec->proto, uspec->tos, umask->tos); if (ret) goto out; ret = tcam_user_ip_class_enable(np, class, 1); if (ret) goto out; parent->l3_cls[i] = class; parent->l3_cls_pid[i] = uspec->proto; parent->l3_cls_refcnt[i]++; add_usr_cls = 1; break; } } if (!add_usr_cls) { netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n", parent->index, __func__, uspec->proto); ret = -EINVAL; goto out; } niu_unlock_parent(np, flags); } else { if (!niu_ethflow_to_class(fsp->flow_type, &class)) { return -EINVAL; } } niu_lock_parent(np, flags); idx = tcam_get_index(np, idx); tp = &parent->tcam[idx]; memset(tp, 0, sizeof(*tp)); /* fill in the tcam key and mask */ switch (fsp->flow_type) { case TCP_V4_FLOW: case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); break; case TCP_V6_FLOW: case UDP_V6_FLOW: case SCTP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: /* Not yet implemented */ netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n", parent->index, __func__, fsp->flow_type); ret = -EINVAL; goto out; case IP_USER_FLOW: niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class); break; default: netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", parent->index, __func__, fsp->flow_type); ret = -EINVAL; goto out; } /* fill in the assoc data */ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { tp->assoc_data = TCAM_ASSOCDATA_DISC; } else { if (fsp->ring_cookie >= np->num_rx_rings) { netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n", parent->index, __func__, (long long)fsp->ring_cookie); ret = -EINVAL; goto out; } tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | (fsp->ring_cookie << TCAM_ASSOCDATA_OFFSET_SHIFT)); } err = tcam_write(np, idx, tp->key, tp->key_mask); if (err) { ret = -EINVAL; goto out; } err = tcam_assoc_write(np, idx, tp->assoc_data); if (err) { ret = -EINVAL; goto out; } /* validate the entry */ tp->valid = 1; np->clas.tcam_valid_entries++; out: niu_unlock_parent(np, flags); return ret; } static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc) { struct niu_parent *parent = np->parent; struct niu_tcam_entry *tp; u16 idx; unsigned long flags; u64 class; int ret = 0; if (loc >= tcam_get_size(np)) return -EINVAL; niu_lock_parent(np, flags); idx = tcam_get_index(np, loc); tp = &parent->tcam[idx]; /* if the entry is of a user defined class, then update*/ class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> TCAM_V4KEY0_CLASS_CODE_SHIFT; if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) { int i; for (i = 0; i < NIU_L3_PROG_CLS; i++) { if (parent->l3_cls[i] == class) { parent->l3_cls_refcnt[i]--; if (!parent->l3_cls_refcnt[i]) { /* disable class */ ret = tcam_user_ip_class_enable(np, class, 0); if (ret) goto out; parent->l3_cls[i] = 0; parent->l3_cls_pid[i] = 0; } break; } } if (i == NIU_L3_PROG_CLS) { netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n", parent->index, __func__, (unsigned long long)class); ret = -EINVAL; goto out; } } ret = tcam_flush(np, idx); if (ret) goto out; /* invalidate the entry */ tp->valid = 0; np->clas.tcam_valid_entries--; out: niu_unlock_parent(np, flags); return ret; } static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { struct niu *np = netdev_priv(dev); int ret = 0; switch (cmd->cmd) { case ETHTOOL_SRXFH: ret = niu_set_hash_opts(np, cmd); break; case ETHTOOL_SRXCLSRLINS: ret = niu_add_ethtool_tcam_entry(np, cmd); break; case ETHTOOL_SRXCLSRLDEL: ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location); break; default: ret = -EINVAL; break; } return ret; } static const struct { const char string[ETH_GSTRING_LEN]; } niu_xmac_stat_keys[] = { { "tx_frames" }, { "tx_bytes" }, { "tx_fifo_errors" }, { "tx_overflow_errors" }, { "tx_max_pkt_size_errors" }, { "tx_underflow_errors" }, { "rx_local_faults" }, { "rx_remote_faults" }, { "rx_link_faults" }, { "rx_align_errors" }, { "rx_frags" }, { "rx_mcasts" }, { "rx_bcasts" }, { "rx_hist_cnt1" }, { "rx_hist_cnt2" }, { "rx_hist_cnt3" }, { "rx_hist_cnt4" }, { "rx_hist_cnt5" }, { "rx_hist_cnt6" }, { "rx_hist_cnt7" }, { "rx_octets" }, { "rx_code_violations" }, { "rx_len_errors" }, { "rx_crc_errors" }, { "rx_underflows" }, { "rx_overflows" }, { "pause_off_state" }, { "pause_on_state" }, { "pause_received" }, }; #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) static const struct { const char string[ETH_GSTRING_LEN]; } niu_bmac_stat_keys[] = { { "tx_underflow_errors" }, { "tx_max_pkt_size_errors" }, { "tx_bytes" }, { "tx_frames" }, { "rx_overflows" }, { "rx_frames" }, { "rx_align_errors" }, { "rx_crc_errors" }, { "rx_len_errors" }, { "pause_off_state" }, { "pause_on_state" }, { "pause_received" }, }; #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) static const struct { const char string[ETH_GSTRING_LEN]; } niu_rxchan_stat_keys[] = { { "rx_channel" }, { "rx_packets" }, { "rx_bytes" }, { "rx_dropped" }, { "rx_errors" }, }; #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) static const struct { const char string[ETH_GSTRING_LEN]; } niu_txchan_stat_keys[] = { { "tx_channel" }, { "tx_packets" }, { "tx_bytes" }, { "tx_errors" }, }; #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct niu *np = netdev_priv(dev); int i; if (stringset != ETH_SS_STATS) return; if (np->flags & NIU_FLAGS_XMAC) { memcpy(data, niu_xmac_stat_keys, sizeof(niu_xmac_stat_keys)); data += sizeof(niu_xmac_stat_keys); } else { memcpy(data, niu_bmac_stat_keys, sizeof(niu_bmac_stat_keys)); data += sizeof(niu_bmac_stat_keys); } for (i = 0; i < np->num_rx_rings; i++) { memcpy(data, niu_rxchan_stat_keys, sizeof(niu_rxchan_stat_keys)); data += sizeof(niu_rxchan_stat_keys); } for (i = 0; i < np->num_tx_rings; i++) { memcpy(data, niu_txchan_stat_keys, sizeof(niu_txchan_stat_keys)); data += sizeof(niu_txchan_stat_keys); } } static int niu_get_sset_count(struct net_device *dev, int stringset) { struct niu *np = netdev_priv(dev); if (stringset != ETH_SS_STATS) return -EINVAL; return (np->flags & NIU_FLAGS_XMAC ? NUM_XMAC_STAT_KEYS : NUM_BMAC_STAT_KEYS) + (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS); } static void niu_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct niu *np = netdev_priv(dev); int i; niu_sync_mac_stats(np); if (np->flags & NIU_FLAGS_XMAC) { memcpy(data, &np->mac_stats.xmac, sizeof(struct niu_xmac_stats)); data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); } else { memcpy(data, &np->mac_stats.bmac, sizeof(struct niu_bmac_stats)); data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); } for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; niu_sync_rx_discard_stats(np, rp, 0); data[0] = rp->rx_channel; data[1] = rp->rx_packets; data[2] = rp->rx_bytes; data[3] = rp->rx_dropped; data[4] = rp->rx_errors; data += 5; } for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; data[0] = rp->tx_channel; data[1] = rp->tx_packets; data[2] = rp->tx_bytes; data[3] = rp->tx_errors; data += 4; } } static u64 niu_led_state_save(struct niu *np) { if (np->flags & NIU_FLAGS_XMAC) return nr64_mac(XMAC_CONFIG); else return nr64_mac(BMAC_XIF_CONFIG); } static void niu_led_state_restore(struct niu *np, u64 val) { if (np->flags & NIU_FLAGS_XMAC) nw64_mac(XMAC_CONFIG, val); else nw64_mac(BMAC_XIF_CONFIG, val); } static void niu_force_led(struct niu *np, int on) { u64 val, reg, bit; if (np->flags & NIU_FLAGS_XMAC) { reg = XMAC_CONFIG; bit = XMAC_CONFIG_FORCE_LED_ON; } else { reg = BMAC_XIF_CONFIG; bit = BMAC_XIF_CONFIG_LINK_LED; } val = nr64_mac(reg); if (on) val |= bit; else val &= ~bit; nw64_mac(reg, val); } static int niu_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { struct niu *np = netdev_priv(dev); if (!netif_running(dev)) return -EAGAIN; switch (state) { case ETHTOOL_ID_ACTIVE: np->orig_led_state = niu_led_state_save(np); return 1; /* cycle on/off once per second */ case ETHTOOL_ID_ON: niu_force_led(np, 1); break; case ETHTOOL_ID_OFF: niu_force_led(np, 0); break; case ETHTOOL_ID_INACTIVE: niu_led_state_restore(np, np->orig_led_state); } return 0; } static const struct ethtool_ops niu_ethtool_ops = { .get_drvinfo = niu_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = niu_get_msglevel, .set_msglevel = niu_set_msglevel, .nway_reset = niu_nway_reset, .get_eeprom_len = niu_get_eeprom_len, .get_eeprom = niu_get_eeprom, .get_settings = niu_get_settings, .set_settings = niu_set_settings, .get_strings = niu_get_strings, .get_sset_count = niu_get_sset_count, .get_ethtool_stats = niu_get_ethtool_stats, .set_phys_id = niu_set_phys_id, .get_rxnfc = niu_get_nfc, .set_rxnfc = niu_set_nfc, }; static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, int ldg, int ldn) { if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) return -EINVAL; if (ldn < 0 || ldn > LDN_MAX) return -EINVAL; parent->ldg_map[ldn] = ldg; if (np->parent->plat_type == PLAT_TYPE_NIU) { /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by * the firmware, and we're not supposed to change them. * Validate the mapping, because if it's wrong we probably * won't get any interrupts and that's painful to debug. */ if (nr64(LDG_NUM(ldn)) != ldg) { dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n", np->port, ldn, ldg, (unsigned long long) nr64(LDG_NUM(ldn))); return -EINVAL; } } else nw64(LDG_NUM(ldn), ldg); return 0; } static int niu_set_ldg_timer_res(struct niu *np, int res) { if (res < 0 || res > LDG_TIMER_RES_VAL) return -EINVAL; nw64(LDG_TIMER_RES, res); return 0; } static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) { if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || (func < 0 || func > 3) || (vector < 0 || vector > 0x1f)) return -EINVAL; nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); return 0; } static int niu_pci_eeprom_read(struct niu *np, u32 addr) { u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | (addr << ESPC_PIO_STAT_ADDR_SHIFT)); int limit; if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) return -EINVAL; frame = frame_base; nw64(ESPC_PIO_STAT, frame); limit = 64; do { udelay(5); frame = nr64(ESPC_PIO_STAT); if (frame & ESPC_PIO_STAT_READ_END) break; } while (limit--); if (!(frame & ESPC_PIO_STAT_READ_END)) { dev_err(np->device, "EEPROM read timeout frame[%llx]\n", (unsigned long long) frame); return -ENODEV; } frame = frame_base; nw64(ESPC_PIO_STAT, frame); limit = 64; do { udelay(5); frame = nr64(ESPC_PIO_STAT); if (frame & ESPC_PIO_STAT_READ_END) break; } while (limit--); if (!(frame & ESPC_PIO_STAT_READ_END)) { dev_err(np->device, "EEPROM read timeout frame[%llx]\n", (unsigned long long) frame); return -ENODEV; } frame = nr64(ESPC_PIO_STAT); return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; } static int niu_pci_eeprom_read16(struct niu *np, u32 off) { int err = niu_pci_eeprom_read(np, off); u16 val; if (err < 0) return err; val = (err << 8); err = niu_pci_eeprom_read(np, off + 1); if (err < 0) return err; val |= (err & 0xff); return val; } static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off) { int err = niu_pci_eeprom_read(np, off); u16 val; if (err < 0) return err; val = (err & 0xff); err = niu_pci_eeprom_read(np, off + 1); if (err < 0) return err; val |= (err & 0xff) << 8; return val; } static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf, int namebuf_len) { int i; for (i = 0; i < namebuf_len; i++) { int err = niu_pci_eeprom_read(np, off + i); if (err < 0) return err; *namebuf++ = err; if (!err) break; } if (i >= namebuf_len) return -EINVAL; return i + 1; } static void niu_vpd_parse_version(struct niu *np) { struct niu_vpd *vpd = &np->vpd; int len = strlen(vpd->version) + 1; const char *s = vpd->version; int i; for (i = 0; i < len - 5; i++) { if (!strncmp(s + i, "FCode ", 6)) break; } if (i >= len - 5) return; s += i + 5; sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); netif_printk(np, probe, KERN_DEBUG, np->dev, "VPD_SCAN: FCODE major(%d) minor(%d)\n", vpd->fcode_major, vpd->fcode_minor); if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || (vpd->fcode_major == NIU_VPD_MIN_MAJOR && vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) np->flags |= NIU_FLAGS_VPD_VALID; } /* ESPC_PIO_EN_ENABLE must be set */ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end) { unsigned int found_mask = 0; #define FOUND_MASK_MODEL 0x00000001 #define FOUND_MASK_BMODEL 0x00000002 #define FOUND_MASK_VERS 0x00000004 #define FOUND_MASK_MAC 0x00000008 #define FOUND_MASK_NMAC 0x00000010 #define FOUND_MASK_PHY 0x00000020 #define FOUND_MASK_ALL 0x0000003f netif_printk(np, probe, KERN_DEBUG, np->dev, "VPD_SCAN: start[%x] end[%x]\n", start, end); while (start < end) { int len, err, prop_len; char namebuf[64]; u8 *prop_buf; int max_len; if (found_mask == FOUND_MASK_ALL) { niu_vpd_parse_version(np); return 1; } err = niu_pci_eeprom_read(np, start + 2); if (err < 0) return err; len = err; start += 3; prop_len = niu_pci_eeprom_read(np, start + 4); err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); if (err < 0) return err; prop_buf = NULL; max_len = 0; if (!strcmp(namebuf, "model")) { prop_buf = np->vpd.model; max_len = NIU_VPD_MODEL_MAX; found_mask |= FOUND_MASK_MODEL; } else if (!strcmp(namebuf, "board-model")) { prop_buf = np->vpd.board_model; max_len = NIU_VPD_BD_MODEL_MAX; found_mask |= FOUND_MASK_BMODEL; } else if (!strcmp(namebuf, "version")) { prop_buf = np->vpd.version; max_len = NIU_VPD_VERSION_MAX; found_mask |= FOUND_MASK_VERS; } else if (!strcmp(namebuf, "local-mac-address")) { prop_buf = np->vpd.local_mac; max_len = ETH_ALEN; found_mask |= FOUND_MASK_MAC; } else if (!strcmp(namebuf, "num-mac-addresses")) { prop_buf = &np->vpd.mac_num; max_len = 1; found_mask |= FOUND_MASK_NMAC; } else if (!strcmp(namebuf, "phy-type")) { prop_buf = np->vpd.phy_type; max_len = NIU_VPD_PHY_TYPE_MAX; found_mask |= FOUND_MASK_PHY; } if (max_len && prop_len > max_len) { dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len); return -EINVAL; } if (prop_buf) { u32 off = start + 5 + err; int i; netif_printk(np, probe, KERN_DEBUG, np->dev, "VPD_SCAN: Reading in property [%s] len[%d]\n", namebuf, prop_len); for (i = 0; i < prop_len; i++) *prop_buf++ = niu_pci_eeprom_read(np, off + i); } start += len; } return 0; } /* ESPC_PIO_EN_ENABLE must be set */ static void niu_pci_vpd_fetch(struct niu *np, u32 start) { u32 offset; int err; err = niu_pci_eeprom_read16_swp(np, start + 1); if (err < 0) return; offset = err + 3; while (start + offset < ESPC_EEPROM_SIZE) { u32 here = start + offset; u32 end; err = niu_pci_eeprom_read(np, here); if (err != 0x90) return; err = niu_pci_eeprom_read16_swp(np, here + 1); if (err < 0) return; here = start + offset + 3; end = start + offset + err; offset += err; err = niu_pci_vpd_scan_props(np, here, end); if (err < 0 || err == 1) return; } } /* ESPC_PIO_EN_ENABLE must be set */ static u32 niu_pci_vpd_offset(struct niu *np) { u32 start = 0, end = ESPC_EEPROM_SIZE, ret; int err; while (start < end) { ret = start; /* ROM header signature? */ err = niu_pci_eeprom_read16(np, start + 0); if (err != 0x55aa) return 0; /* Apply offset to PCI data structure. */ err = niu_pci_eeprom_read16(np, start + 23); if (err < 0) return 0; start += err; /* Check for "PCIR" signature. */ err = niu_pci_eeprom_read16(np, start + 0); if (err != 0x5043) return 0; err = niu_pci_eeprom_read16(np, start + 2); if (err != 0x4952) return 0; /* Check for OBP image type. */ err = niu_pci_eeprom_read(np, start + 20); if (err < 0) return 0; if (err != 0x01) { err = niu_pci_eeprom_read(np, ret + 2); if (err < 0) return 0; start = ret + (err * 512); continue; } err = niu_pci_eeprom_read16_swp(np, start + 8); if (err < 0) return err; ret += err; err = niu_pci_eeprom_read(np, ret + 0); if (err != 0x82) return 0; return ret; } return 0; } static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop) { if (!strcmp(phy_prop, "mif")) { /* 1G copper, MII */ np->flags &= ~(NIU_FLAGS_FIBER | NIU_FLAGS_10G); np->mac_xcvr = MAC_XCVR_MII; } else if (!strcmp(phy_prop, "xgf")) { /* 10G fiber, XPCS */ np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER); np->mac_xcvr = MAC_XCVR_XPCS; } else if (!strcmp(phy_prop, "pcs")) { /* 1G fiber, PCS */ np->flags &= ~NIU_FLAGS_10G; np->flags |= NIU_FLAGS_FIBER; np->mac_xcvr = MAC_XCVR_PCS; } else if (!strcmp(phy_prop, "xgc")) { /* 10G copper, XPCS */ np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->mac_xcvr = MAC_XCVR_XPCS; } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) { /* 10G Serdes or 1G Serdes, default to 10G */ np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->flags |= NIU_FLAGS_XCVR_SERDES; np->mac_xcvr = MAC_XCVR_XPCS; } else { return -EINVAL; } return 0; } static int niu_pci_vpd_get_nports(struct niu *np) { int ports = 0; if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { ports = 4; } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { ports = 2; } return ports; } static void niu_pci_vpd_validate(struct niu *np) { struct net_device *dev = np->dev; struct niu_vpd *vpd = &np->vpd; u8 val8; if (!is_valid_ether_addr(&vpd->local_mac[0])) { dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n"); np->flags &= ~NIU_FLAGS_VPD_VALID; return; } if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->flags |= NIU_FLAGS_XCVR_SERDES; np->mac_xcvr = MAC_XCVR_PCS; if (np->port > 1) { np->flags |= NIU_FLAGS_FIBER; np->flags &= ~NIU_FLAGS_10G; } if (np->flags & NIU_FLAGS_10G) np->mac_xcvr = MAC_XCVR_XPCS; } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_HOTPLUG_PHY); } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { dev_err(np->device, "Illegal phy string [%s]\n", np->vpd.phy_type); dev_err(np->device, "Falling back to SPROM\n"); np->flags &= ~NIU_FLAGS_VPD_VALID; return; } memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN); val8 = dev->dev_addr[5]; dev->dev_addr[5] += np->port; if (dev->dev_addr[5] < val8) dev->dev_addr[4]++; } static int niu_pci_probe_sprom(struct niu *np) { struct net_device *dev = np->dev; int len, i; u64 val, sum; u8 val8; val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; len = val / 4; np->eeprom_len = len; netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: Image size %llu\n", (unsigned long long)val); sum = 0; for (i = 0; i < len; i++) { val = nr64(ESPC_NCR(i)); sum += (val >> 0) & 0xff; sum += (val >> 8) & 0xff; sum += (val >> 16) & 0xff; sum += (val >> 24) & 0xff; } netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: Checksum %x\n", (int)(sum & 0xff)); if ((sum & 0xff) != 0xab) { dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff)); return -EINVAL; } val = nr64(ESPC_PHY_TYPE); switch (np->port) { case 0: val8 = (val & ESPC_PHY_TYPE_PORT0) >> ESPC_PHY_TYPE_PORT0_SHIFT; break; case 1: val8 = (val & ESPC_PHY_TYPE_PORT1) >> ESPC_PHY_TYPE_PORT1_SHIFT; break; case 2: val8 = (val & ESPC_PHY_TYPE_PORT2) >> ESPC_PHY_TYPE_PORT2_SHIFT; break; case 3: val8 = (val & ESPC_PHY_TYPE_PORT3) >> ESPC_PHY_TYPE_PORT3_SHIFT; break; default: dev_err(np->device, "Bogus port number %u\n", np->port); return -EINVAL; } netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: PHY type %x\n", val8); switch (val8) { case ESPC_PHY_TYPE_1G_COPPER: /* 1G copper, MII */ np->flags &= ~(NIU_FLAGS_FIBER | NIU_FLAGS_10G); np->mac_xcvr = MAC_XCVR_MII; break; case ESPC_PHY_TYPE_1G_FIBER: /* 1G fiber, PCS */ np->flags &= ~NIU_FLAGS_10G; np->flags |= NIU_FLAGS_FIBER; np->mac_xcvr = MAC_XCVR_PCS; break; case ESPC_PHY_TYPE_10G_COPPER: /* 10G copper, XPCS */ np->flags |= NIU_FLAGS_10G; np->flags &= ~NIU_FLAGS_FIBER; np->mac_xcvr = MAC_XCVR_XPCS; break; case ESPC_PHY_TYPE_10G_FIBER: /* 10G fiber, XPCS */ np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER); np->mac_xcvr = MAC_XCVR_XPCS; break; default: dev_err(np->device, "Bogus SPROM phy type %u\n", val8); return -EINVAL; } val = nr64(ESPC_MAC_ADDR0); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val); dev->dev_addr[0] = (val >> 0) & 0xff; dev->dev_addr[1] = (val >> 8) & 0xff; dev->dev_addr[2] = (val >> 16) & 0xff; dev->dev_addr[3] = (val >> 24) & 0xff; val = nr64(ESPC_MAC_ADDR1); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val); dev->dev_addr[4] = (val >> 0) & 0xff; dev->dev_addr[5] = (val >> 8) & 0xff; if (!is_valid_ether_addr(&dev->dev_addr[0])) { dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", dev->dev_addr); return -EINVAL; } val8 = dev->dev_addr[5]; dev->dev_addr[5] += np->port; if (dev->dev_addr[5] < val8) dev->dev_addr[4]++; val = nr64(ESPC_MOD_STR_LEN); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val); if (val >= 8 * 4) return -EINVAL; for (i = 0; i < val; i += 4) { u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); np->vpd.model[i + 3] = (tmp >> 0) & 0xff; np->vpd.model[i + 2] = (tmp >> 8) & 0xff; np->vpd.model[i + 1] = (tmp >> 16) & 0xff; np->vpd.model[i + 0] = (tmp >> 24) & 0xff; } np->vpd.model[val] = '\0'; val = nr64(ESPC_BD_MOD_STR_LEN); netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val); if (val >= 4 * 4) return -EINVAL; for (i = 0; i < val; i += 4) { u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; } np->vpd.board_model[val] = '\0'; np->vpd.mac_num = nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; netif_printk(np, probe, KERN_DEBUG, np->dev, "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num); return 0; } static int niu_get_and_validate_port(struct niu *np) { struct niu_parent *parent = np->parent; if (np->port <= 1) np->flags |= NIU_FLAGS_XMAC; if (!parent->num_ports) { if (parent->plat_type == PLAT_TYPE_NIU) { parent->num_ports = 2; } else { parent->num_ports = niu_pci_vpd_get_nports(np); if (!parent->num_ports) { /* Fall back to SPROM as last resort. * This will fail on most cards. */ parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; /* All of the current probing methods fail on * Maramba on-board parts. */ if (!parent->num_ports) parent->num_ports = 4; } } } if (np->port >= parent->num_ports) return -ENODEV; return 0; } static int phy_record(struct niu_parent *parent, struct phy_probe_info *p, int dev_id_1, int dev_id_2, u8 phy_port, int type) { u32 id = (dev_id_1 << 16) | dev_id_2; u8 idx; if (dev_id_1 < 0 || dev_id_2 < 0) return 0; if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { /* Because of the NIU_PHY_ID_MASK being applied, the 8704 * test covers the 8706 as well. */ if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) && ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011)) return 0; } else { if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) return 0; } pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", parent->index, id, type == PHY_TYPE_PMA_PMD ? "PMA/PMD" : type == PHY_TYPE_PCS ? "PCS" : "MII", phy_port); if (p->cur[type] >= NIU_MAX_PORTS) { pr_err("Too many PHY ports\n"); return -EINVAL; } idx = p->cur[type]; p->phy_id[type][idx] = id; p->phy_port[type][idx] = phy_port; p->cur[type] = idx + 1; return 0; } static int port_has_10g(struct phy_probe_info *p, int port) { int i; for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) return 1; } for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { if (p->phy_port[PHY_TYPE_PCS][i] == port) return 1; } return 0; } static int count_10g_ports(struct phy_probe_info *p, int *lowest) { int port, cnt; cnt = 0; *lowest = 32; for (port = 8; port < 32; port++) { if (port_has_10g(p, port)) { if (!cnt) *lowest = port; cnt++; } } return cnt; } static int count_1g_ports(struct phy_probe_info *p, int *lowest) { *lowest = 32; if (p->cur[PHY_TYPE_MII]) *lowest = p->phy_port[PHY_TYPE_MII][0]; return p->cur[PHY_TYPE_MII]; } static void niu_n2_divide_channels(struct niu_parent *parent) { int num_ports = parent->num_ports; int i; for (i = 0; i < num_ports; i++) { parent->rxchan_per_port[i] = (16 / num_ports); parent->txchan_per_port[i] = (16 / num_ports); pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", parent->index, i, parent->rxchan_per_port[i], parent->txchan_per_port[i]); } } static void niu_divide_channels(struct niu_parent *parent, int num_10g, int num_1g) { int num_ports = parent->num_ports; int rx_chans_per_10g, rx_chans_per_1g; int tx_chans_per_10g, tx_chans_per_1g; int i, tot_rx, tot_tx; if (!num_10g || !num_1g) { rx_chans_per_10g = rx_chans_per_1g = (NIU_NUM_RXCHAN / num_ports); tx_chans_per_10g = tx_chans_per_1g = (NIU_NUM_TXCHAN / num_ports); } else { rx_chans_per_1g = NIU_NUM_RXCHAN / 8; rx_chans_per_10g = (NIU_NUM_RXCHAN - (rx_chans_per_1g * num_1g)) / num_10g; tx_chans_per_1g = NIU_NUM_TXCHAN / 6; tx_chans_per_10g = (NIU_NUM_TXCHAN - (tx_chans_per_1g * num_1g)) / num_10g; } tot_rx = tot_tx = 0; for (i = 0; i < num_ports; i++) { int type = phy_decode(parent->port_phy, i); if (type == PORT_TYPE_10G) { parent->rxchan_per_port[i] = rx_chans_per_10g; parent->txchan_per_port[i] = tx_chans_per_10g; } else { parent->rxchan_per_port[i] = rx_chans_per_1g; parent->txchan_per_port[i] = tx_chans_per_1g; } pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n", parent->index, i, parent->rxchan_per_port[i], parent->txchan_per_port[i]); tot_rx += parent->rxchan_per_port[i]; tot_tx += parent->txchan_per_port[i]; } if (tot_rx > NIU_NUM_RXCHAN) { pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n", parent->index, tot_rx); for (i = 0; i < num_ports; i++) parent->rxchan_per_port[i] = 1; } if (tot_tx > NIU_NUM_TXCHAN) { pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n", parent->index, tot_tx); for (i = 0; i < num_ports; i++) parent->txchan_per_port[i] = 1; } if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n", parent->index, tot_rx, tot_tx); } } static void niu_divide_rdc_groups(struct niu_parent *parent, int num_10g, int num_1g) { int i, num_ports = parent->num_ports; int rdc_group, rdc_groups_per_port; int rdc_channel_base; rdc_group = 0; rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; rdc_channel_base = 0; for (i = 0; i < num_ports; i++) { struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; int grp, num_channels = parent->rxchan_per_port[i]; int this_channel_offset; tp->first_table_num = rdc_group; tp->num_tables = rdc_groups_per_port; this_channel_offset = 0; for (grp = 0; grp < tp->num_tables; grp++) { struct rdc_table *rt = &tp->tables[grp]; int slot; pr_info("niu%d: Port %d RDC tbl(%d) [ ", parent->index, i, tp->first_table_num + grp); for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { rt->rxdma_channel[slot] = rdc_channel_base + this_channel_offset; pr_cont("%d ", rt->rxdma_channel[slot]); if (++this_channel_offset == num_channels) this_channel_offset = 0; } pr_cont("]\n"); } parent->rdc_default[i] = rdc_channel_base; rdc_channel_base += num_channels; rdc_group += rdc_groups_per_port; } } static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent, struct phy_probe_info *info) { unsigned long flags; int port, err; memset(info, 0, sizeof(*info)); /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ niu_lock_parent(np, flags); err = 0; for (port = 8; port < 32; port++) { int dev_id_1, dev_id_2; dev_id_1 = mdio_read(np, port, NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); dev_id_2 = mdio_read(np, port, NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); err = phy_record(parent, info, dev_id_1, dev_id_2, port, PHY_TYPE_PMA_PMD); if (err) break; dev_id_1 = mdio_read(np, port, NIU_PCS_DEV_ADDR, MII_PHYSID1); dev_id_2 = mdio_read(np, port, NIU_PCS_DEV_ADDR, MII_PHYSID2); err = phy_record(parent, info, dev_id_1, dev_id_2, port, PHY_TYPE_PCS); if (err) break; dev_id_1 = mii_read(np, port, MII_PHYSID1); dev_id_2 = mii_read(np, port, MII_PHYSID2); err = phy_record(parent, info, dev_id_1, dev_id_2, port, PHY_TYPE_MII); if (err) break; } niu_unlock_parent(np, flags); return err; } static int walk_phys(struct niu *np, struct niu_parent *parent) { struct phy_probe_info *info = &parent->phy_probe_info; int lowest_10g, lowest_1g; int num_10g, num_1g; u32 val; int err; num_10g = num_1g = 0; if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { num_10g = 0; num_1g = 2; parent->plat_type = PLAT_TYPE_ATCA_CP3220; parent->num_ports = 4; val = (phy_encode(PORT_TYPE_1G, 0) | phy_encode(PORT_TYPE_1G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { num_10g = 2; num_1g = 0; parent->num_ports = 2; val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1)); } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && (parent->plat_type == PLAT_TYPE_NIU)) { /* this is the Monza case */ if (np->flags & NIU_FLAGS_10G) { val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1)); } else { val = (phy_encode(PORT_TYPE_1G, 0) | phy_encode(PORT_TYPE_1G, 1)); } } else { err = fill_phy_probe_info(np, parent, info); if (err) return err; num_10g = count_10g_ports(info, &lowest_10g); num_1g = count_1g_ports(info, &lowest_1g); switch ((num_10g << 4) | num_1g) { case 0x24: if (lowest_1g == 10) parent->plat_type = PLAT_TYPE_VF_P0; else if (lowest_1g == 26) parent->plat_type = PLAT_TYPE_VF_P1; else goto unknown_vg_1g_port; /* fallthru */ case 0x22: val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); break; case 0x20: val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_10G, 1)); break; case 0x10: val = phy_encode(PORT_TYPE_10G, np->port); break; case 0x14: if (lowest_1g == 10) parent->plat_type = PLAT_TYPE_VF_P0; else if (lowest_1g == 26) parent->plat_type = PLAT_TYPE_VF_P1; else goto unknown_vg_1g_port; /* fallthru */ case 0x13: if ((lowest_10g & 0x7) == 0) val = (phy_encode(PORT_TYPE_10G, 0) | phy_encode(PORT_TYPE_1G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); else val = (phy_encode(PORT_TYPE_1G, 0) | phy_encode(PORT_TYPE_10G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); break; case 0x04: if (lowest_1g == 10) parent->plat_type = PLAT_TYPE_VF_P0; else if (lowest_1g == 26) parent->plat_type = PLAT_TYPE_VF_P1; else goto unknown_vg_1g_port; val = (phy_encode(PORT_TYPE_1G, 0) | phy_encode(PORT_TYPE_1G, 1) | phy_encode(PORT_TYPE_1G, 2) | phy_encode(PORT_TYPE_1G, 3)); break; default: pr_err("Unsupported port config 10G[%d] 1G[%d]\n", num_10g, num_1g); return -EINVAL; } } parent->port_phy = val; if (parent->plat_type == PLAT_TYPE_NIU) niu_n2_divide_channels(parent); else niu_divide_channels(parent, num_10g, num_1g); niu_divide_rdc_groups(parent, num_10g, num_1g); return 0; unknown_vg_1g_port: pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g); return -EINVAL; } static int niu_probe_ports(struct niu *np) { struct niu_parent *parent = np->parent; int err, i; if (parent->port_phy == PORT_PHY_UNKNOWN) { err = walk_phys(np, parent); if (err) return err; niu_set_ldg_timer_res(np, 2); for (i = 0; i <= LDN_MAX; i++) niu_ldn_irq_enable(np, i, 0); } if (parent->port_phy == PORT_PHY_INVALID) return -EINVAL; return 0; } static int niu_classifier_swstate_init(struct niu *np) { struct niu_classifier *cp = &np->clas; cp->tcam_top = (u16) np->port; cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; cp->h1_init = 0xffffffff; cp->h2_init = 0xffff; return fflp_early_init(np); } static void niu_link_config_init(struct niu *np) { struct niu_link_config *lp = &np->link_config; lp->advertising = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg); lp->speed = lp->active_speed = SPEED_INVALID; lp->duplex = DUPLEX_FULL; lp->active_duplex = DUPLEX_INVALID; lp->autoneg = 1; #if 0 lp->loopback_mode = LOOPBACK_MAC; lp->active_speed = SPEED_10000; lp->active_duplex = DUPLEX_FULL; #else lp->loopback_mode = LOOPBACK_DISABLED; #endif } static int niu_init_mac_ipp_pcs_base(struct niu *np) { switch (np->port) { case 0: np->mac_regs = np->regs + XMAC_PORT0_OFF; np->ipp_off = 0x00000; np->pcs_off = 0x04000; np->xpcs_off = 0x02000; break; case 1: np->mac_regs = np->regs + XMAC_PORT1_OFF; np->ipp_off = 0x08000; np->pcs_off = 0x0a000; np->xpcs_off = 0x08000; break; case 2: np->mac_regs = np->regs + BMAC_PORT2_OFF; np->ipp_off = 0x04000; np->pcs_off = 0x0e000; np->xpcs_off = ~0UL; break; case 3: np->mac_regs = np->regs + BMAC_PORT3_OFF; np->ipp_off = 0x0c000; np->pcs_off = 0x12000; np->xpcs_off = ~0UL; break; default: dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port); return -EINVAL; } return 0; } static void niu_try_msix(struct niu *np, u8 *ldg_num_map) { struct msix_entry msi_vec[NIU_NUM_LDG]; struct niu_parent *parent = np->parent; struct pci_dev *pdev = np->pdev; int i, num_irqs, err; u8 first_ldg; first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) ldg_num_map[i] = first_ldg + i; num_irqs = (parent->rxchan_per_port[np->port] + parent->txchan_per_port[np->port] + (np->port == 0 ? 3 : 1)); BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); retry: for (i = 0; i < num_irqs; i++) { msi_vec[i].vector = 0; msi_vec[i].entry = i; } err = pci_enable_msix(pdev, msi_vec, num_irqs); if (err < 0) { np->flags &= ~NIU_FLAGS_MSIX; return; } if (err > 0) { num_irqs = err; goto retry; } np->flags |= NIU_FLAGS_MSIX; for (i = 0; i < num_irqs; i++) np->ldg[i].irq = msi_vec[i].vector; np->num_ldg = num_irqs; } static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) { #ifdef CONFIG_SPARC64 struct platform_device *op = np->op; const u32 *int_prop; int i; int_prop = of_get_property(op->dev.of_node, "interrupts", NULL); if (!int_prop) return -ENODEV; for (i = 0; i < op->archdata.num_irqs; i++) { ldg_num_map[i] = int_prop[i]; np->ldg[i].irq = op->archdata.irqs[i]; } np->num_ldg = op->archdata.num_irqs; return 0; #else return -EINVAL; #endif } static int niu_ldg_init(struct niu *np) { struct niu_parent *parent = np->parent; u8 ldg_num_map[NIU_NUM_LDG]; int first_chan, num_chan; int i, err, ldg_rotor; u8 port; np->num_ldg = 1; np->ldg[0].irq = np->dev->irq; if (parent->plat_type == PLAT_TYPE_NIU) { err = niu_n2_irq_init(np, ldg_num_map); if (err) return err; } else niu_try_msix(np, ldg_num_map); port = np->port; for (i = 0; i < np->num_ldg; i++) { struct niu_ldg *lp = &np->ldg[i]; netif_napi_add(np->dev, &lp->napi, niu_poll, 64); lp->np = np; lp->ldg_num = ldg_num_map[i]; lp->timer = 2; /* XXX */ /* On N2 NIU the firmware has setup the SID mappings so they go * to the correct values that will route the LDG to the proper * interrupt in the NCU interrupt table. */ if (np->parent->plat_type != PLAT_TYPE_NIU) { err = niu_set_ldg_sid(np, lp->ldg_num, port, i); if (err) return err; } } /* We adopt the LDG assignment ordering used by the N2 NIU * 'interrupt' properties because that simplifies a lot of * things. This ordering is: * * MAC * MIF (if port zero) * SYSERR (if port zero) * RX channels * TX channels */ ldg_rotor = 0; err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_MAC(port)); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; if (port == 0) { err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_MIF); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_DEVICE_ERROR); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; } first_chan = 0; for (i = 0; i < port; i++) first_chan += parent->rxchan_per_port[i]; num_chan = parent->rxchan_per_port[port]; for (i = first_chan; i < (first_chan + num_chan); i++) { err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_RXDMA(i)); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; } first_chan = 0; for (i = 0; i < port; i++) first_chan += parent->txchan_per_port[i]; num_chan = parent->txchan_per_port[port]; for (i = first_chan; i < (first_chan + num_chan); i++) { err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], LDN_TXDMA(i)); if (err) return err; ldg_rotor++; if (ldg_rotor == np->num_ldg) ldg_rotor = 0; } return 0; } static void niu_ldg_free(struct niu *np) { if (np->flags & NIU_FLAGS_MSIX) pci_disable_msix(np->pdev); } static int niu_get_of_props(struct niu *np) { #ifdef CONFIG_SPARC64 struct net_device *dev = np->dev; struct device_node *dp; const char *phy_type; const u8 *mac_addr; const char *model; int prop_len; if (np->parent->plat_type == PLAT_TYPE_NIU) dp = np->op->dev.of_node; else dp = pci_device_to_OF_node(np->pdev); phy_type = of_get_property(dp, "phy-type", &prop_len); if (!phy_type) { netdev_err(dev, "%s: OF node lacks phy-type property\n", dp->full_name); return -EINVAL; } if (!strcmp(phy_type, "none")) return -ENODEV; strcpy(np->vpd.phy_type, phy_type); if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { netdev_err(dev, "%s: Illegal phy string [%s]\n", dp->full_name, np->vpd.phy_type); return -EINVAL; } mac_addr = of_get_property(dp, "local-mac-address", &prop_len); if (!mac_addr) { netdev_err(dev, "%s: OF node lacks local-mac-address property\n", dp->full_name); return -EINVAL; } if (prop_len != dev->addr_len) { netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n", dp->full_name, prop_len); } memcpy(dev->dev_addr, mac_addr, dev->addr_len); if (!is_valid_ether_addr(&dev->dev_addr[0])) { netdev_err(dev, "%s: OF MAC address is invalid\n", dp->full_name); netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr); return -EINVAL; } model = of_get_property(dp, "model", &prop_len); if (model) strcpy(np->vpd.model, model); if (of_find_property(dp, "hot-swappable-phy", &prop_len)) { np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_HOTPLUG_PHY); } return 0; #else return -EINVAL; #endif } static int niu_get_invariants(struct niu *np) { int err, have_props; u32 offset; err = niu_get_of_props(np); if (err == -ENODEV) return err; have_props = !err; err = niu_init_mac_ipp_pcs_base(np); if (err) return err; if (have_props) { err = niu_get_and_validate_port(np); if (err) return err; } else { if (np->parent->plat_type == PLAT_TYPE_NIU) return -EINVAL; nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); offset = niu_pci_vpd_offset(np); netif_printk(np, probe, KERN_DEBUG, np->dev, "%s() VPD offset [%08x]\n", __func__, offset); if (offset) niu_pci_vpd_fetch(np, offset); nw64(ESPC_PIO_EN, 0); if (np->flags & NIU_FLAGS_VPD_VALID) { niu_pci_vpd_validate(np); err = niu_get_and_validate_port(np); if (err) return err; } if (!(np->flags & NIU_FLAGS_VPD_VALID)) { err = niu_get_and_validate_port(np); if (err) return err; err = niu_pci_probe_sprom(np); if (err) return err; } } err = niu_probe_ports(np); if (err) return err; niu_ldg_init(np); niu_classifier_swstate_init(np); niu_link_config_init(np); err = niu_determine_phy_disposition(np); if (!err) err = niu_init_link(np); return err; } static LIST_HEAD(niu_parent_list); static DEFINE_MUTEX(niu_parent_lock); static int niu_parent_index; static ssize_t show_port_phy(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *plat_dev = to_platform_device(dev); struct niu_parent *p = plat_dev->dev.platform_data; u32 port_phy = p->port_phy; char *orig_buf = buf; int i; if (port_phy == PORT_PHY_UNKNOWN || port_phy == PORT_PHY_INVALID) return 0; for (i = 0; i < p->num_ports; i++) { const char *type_str; int type; type = phy_decode(port_phy, i); if (type == PORT_TYPE_10G) type_str = "10G"; else type_str = "1G"; buf += sprintf(buf, (i == 0) ? "%s" : " %s", type_str); } buf += sprintf(buf, "\n"); return buf - orig_buf; } static ssize_t show_plat_type(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *plat_dev = to_platform_device(dev); struct niu_parent *p = plat_dev->dev.platform_data; const char *type_str; switch (p->plat_type) { case PLAT_TYPE_ATLAS: type_str = "atlas"; break; case PLAT_TYPE_NIU: type_str = "niu"; break; case PLAT_TYPE_VF_P0: type_str = "vf_p0"; break; case PLAT_TYPE_VF_P1: type_str = "vf_p1"; break; default: type_str = "unknown"; break; } return sprintf(buf, "%s\n", type_str); } static ssize_t __show_chan_per_port(struct device *dev, struct device_attribute *attr, char *buf, int rx) { struct platform_device *plat_dev = to_platform_device(dev); struct niu_parent *p = plat_dev->dev.platform_data; char *orig_buf = buf; u8 *arr; int i; arr = (rx ? p->rxchan_per_port : p->txchan_per_port); for (i = 0; i < p->num_ports; i++) { buf += sprintf(buf, (i == 0) ? "%d" : " %d", arr[i]); } buf += sprintf(buf, "\n"); return buf - orig_buf; } static ssize_t show_rxchan_per_port(struct device *dev, struct device_attribute *attr, char *buf) { return __show_chan_per_port(dev, attr, buf, 1); } static ssize_t show_txchan_per_port(struct device *dev, struct device_attribute *attr, char *buf) { return __show_chan_per_port(dev, attr, buf, 1); } static ssize_t show_num_ports(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *plat_dev = to_platform_device(dev); struct niu_parent *p = plat_dev->dev.platform_data; return sprintf(buf, "%d\n", p->num_ports); } static struct device_attribute niu_parent_attributes[] = { __ATTR(port_phy, S_IRUGO, show_port_phy, NULL), __ATTR(plat_type, S_IRUGO, show_plat_type, NULL), __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL), __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL), __ATTR(num_ports, S_IRUGO, show_num_ports, NULL), {} }; static struct niu_parent *niu_new_parent(struct niu *np, union niu_parent_id *id, u8 ptype) { struct platform_device *plat_dev; struct niu_parent *p; int i; plat_dev = platform_device_register_simple("niu-board", niu_parent_index, NULL, 0); if (IS_ERR(plat_dev)) return NULL; for (i = 0; attr_name(niu_parent_attributes[i]); i++) { int err = device_create_file(&plat_dev->dev, &niu_parent_attributes[i]); if (err) goto fail_unregister; } p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) goto fail_unregister; p->index = niu_parent_index++; plat_dev->dev.platform_data = p; p->plat_dev = plat_dev; memcpy(&p->id, id, sizeof(*id)); p->plat_type = ptype; INIT_LIST_HEAD(&p->list); atomic_set(&p->refcnt, 0); list_add(&p->list, &niu_parent_list); spin_lock_init(&p->lock); p->rxdma_clock_divider = 7500; p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; if (p->plat_type == PLAT_TYPE_NIU) p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { int index = i - CLASS_CODE_USER_PROG1; p->tcam_key[index] = TCAM_KEY_TSEL; p->flow_key[index] = (FLOW_KEY_IPSA | FLOW_KEY_IPDA | FLOW_KEY_PROTO | (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT) | (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT)); } for (i = 0; i < LDN_MAX + 1; i++) p->ldg_map[i] = LDG_INVALID; return p; fail_unregister: platform_device_unregister(plat_dev); return NULL; } static struct niu_parent *niu_get_parent(struct niu *np, union niu_parent_id *id, u8 ptype) { struct niu_parent *p, *tmp; int port = np->port; mutex_lock(&niu_parent_lock); p = NULL; list_for_each_entry(tmp, &niu_parent_list, list) { if (!memcmp(id, &tmp->id, sizeof(*id))) { p = tmp; break; } } if (!p) p = niu_new_parent(np, id, ptype); if (p) { char port_name[6]; int err; sprintf(port_name, "port%d", port); err = sysfs_create_link(&p->plat_dev->dev.kobj, &np->device->kobj, port_name); if (!err) { p->ports[port] = np; atomic_inc(&p->refcnt); } } mutex_unlock(&niu_parent_lock); return p; } static void niu_put_parent(struct niu *np) { struct niu_parent *p = np->parent; u8 port = np->port; char port_name[6]; BUG_ON(!p || p->ports[port] != np); netif_printk(np, probe, KERN_DEBUG, np->dev, "%s() port[%u]\n", __func__, port); sprintf(port_name, "port%d", port); mutex_lock(&niu_parent_lock); sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); p->ports[port] = NULL; np->parent = NULL; if (atomic_dec_and_test(&p->refcnt)) { list_del(&p->list); platform_device_unregister(p->plat_dev); } mutex_unlock(&niu_parent_lock); } static void *niu_pci_alloc_coherent(struct device *dev, size_t size, u64 *handle, gfp_t flag) { dma_addr_t dh; void *ret; ret = dma_alloc_coherent(dev, size, &dh, flag); if (ret) *handle = dh; return ret; } static void niu_pci_free_coherent(struct device *dev, size_t size, void *cpu_addr, u64 handle) { dma_free_coherent(dev, size, cpu_addr, handle); } static u64 niu_pci_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { return dma_map_page(dev, page, offset, size, direction); } static void niu_pci_unmap_page(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction) { dma_unmap_page(dev, dma_address, size, direction); } static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { return dma_map_single(dev, cpu_addr, size, direction); } static void niu_pci_unmap_single(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction) { dma_unmap_single(dev, dma_address, size, direction); } static const struct niu_ops niu_pci_ops = { .alloc_coherent = niu_pci_alloc_coherent, .free_coherent = niu_pci_free_coherent, .map_page = niu_pci_map_page, .unmap_page = niu_pci_unmap_page, .map_single = niu_pci_map_single, .unmap_single = niu_pci_unmap_single, }; static void niu_driver_version(void) { static int niu_version_printed; if (niu_version_printed++ == 0) pr_info("%s", version); } static struct net_device *niu_alloc_and_init(struct device *gen_dev, struct pci_dev *pdev, struct platform_device *op, const struct niu_ops *ops, u8 port) { struct net_device *dev; struct niu *np; dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); if (!dev) return NULL; SET_NETDEV_DEV(dev, gen_dev); np = netdev_priv(dev); np->dev = dev; np->pdev = pdev; np->op = op; np->device = gen_dev; np->ops = ops; np->msg_enable = niu_debug; spin_lock_init(&np->lock); INIT_WORK(&np->reset_task, niu_reset_task); np->port = port; return dev; } static const struct net_device_ops niu_netdev_ops = { .ndo_open = niu_open, .ndo_stop = niu_close, .ndo_start_xmit = niu_start_xmit, .ndo_get_stats64 = niu_get_stats, .ndo_set_rx_mode = niu_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = niu_set_mac_addr, .ndo_do_ioctl = niu_ioctl, .ndo_tx_timeout = niu_tx_timeout, .ndo_change_mtu = niu_change_mtu, }; static void niu_assign_netdev_ops(struct net_device *dev) { dev->netdev_ops = &niu_netdev_ops; dev->ethtool_ops = &niu_ethtool_ops; dev->watchdog_timeo = NIU_TX_TIMEOUT; } static void niu_device_announce(struct niu *np) { struct net_device *dev = np->dev; pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr); if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", dev->name, (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"), (np->mac_xcvr == MAC_XCVR_MII ? "MII" : (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), np->vpd.phy_type); } else { pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", dev->name, (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), (np->flags & NIU_FLAGS_FIBER ? "FIBER" : (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : "COPPER")), (np->mac_xcvr == MAC_XCVR_MII ? "MII" : (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), np->vpd.phy_type); } } static void niu_set_basic_features(struct net_device *dev) { dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH; dev->features |= dev->hw_features | NETIF_F_RXCSUM; } static int niu_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { union niu_parent_id parent_id; struct net_device *dev; struct niu *np; int err; u64 dma_mask; niu_driver_version(); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; } if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n"); err = -ENODEV; goto err_out_disable_pdev; } err = pci_request_regions(pdev, DRV_MODULE_NAME); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); goto err_out_disable_pdev; } if (!pci_is_pcie(pdev)) { dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); err = -ENODEV; goto err_out_free_res; } dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, &niu_pci_ops, PCI_FUNC(pdev->devfn)); if (!dev) { err = -ENOMEM; goto err_out_free_res; } np = netdev_priv(dev); memset(&parent_id, 0, sizeof(parent_id)); parent_id.pci.domain = pci_domain_nr(pdev->bus); parent_id.pci.bus = pdev->bus->number; parent_id.pci.device = PCI_SLOT(pdev->devfn); np->parent = niu_get_parent(np, &parent_id, PLAT_TYPE_ATLAS); if (!np->parent) { err = -ENOMEM; goto err_out_free_dev; } pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_NOSNOOP_EN, PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_RELAX_EN); dma_mask = DMA_BIT_MASK(44); err = pci_set_dma_mask(pdev, dma_mask); if (!err) { dev->features |= NETIF_F_HIGHDMA; err = pci_set_consistent_dma_mask(pdev, dma_mask); if (err) { dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n"); goto err_out_release_parent; } } if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_out_release_parent; } } niu_set_basic_features(dev); dev->priv_flags |= IFF_UNICAST_FLT; np->regs = pci_ioremap_bar(pdev, 0); if (!np->regs) { dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); err = -ENOMEM; goto err_out_release_parent; } pci_set_master(pdev); pci_save_state(pdev); dev->irq = pdev->irq; niu_assign_netdev_ops(dev); err = niu_get_invariants(np); if (err) { if (err != -ENODEV) dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n"); goto err_out_iounmap; } err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Cannot register net device, aborting\n"); goto err_out_iounmap; } pci_set_drvdata(pdev, dev); niu_device_announce(np); return 0; err_out_iounmap: if (np->regs) { iounmap(np->regs); np->regs = NULL; } err_out_release_parent: niu_put_parent(np); err_out_free_dev: free_netdev(dev); err_out_free_res: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; } static void niu_pci_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct niu *np = netdev_priv(dev); unregister_netdev(dev); if (np->regs) { iounmap(np->regs); np->regs = NULL; } niu_ldg_free(np); niu_put_parent(np); free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } } static int niu_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct niu *np = netdev_priv(dev); unsigned long flags; if (!netif_running(dev)) return 0; flush_work(&np->reset_task); niu_netif_stop(np); del_timer_sync(&np->timer); spin_lock_irqsave(&np->lock, flags); niu_enable_interrupts(np, 0); spin_unlock_irqrestore(&np->lock, flags); netif_device_detach(dev); spin_lock_irqsave(&np->lock, flags); niu_stop_hw(np); spin_unlock_irqrestore(&np->lock, flags); pci_save_state(pdev); return 0; } static int niu_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct niu *np = netdev_priv(dev); unsigned long flags; int err; if (!netif_running(dev)) return 0; pci_restore_state(pdev); netif_device_attach(dev); spin_lock_irqsave(&np->lock, flags); err = niu_init_hw(np); if (!err) { np->timer.expires = jiffies + HZ; add_timer(&np->timer); niu_netif_start(np); } spin_unlock_irqrestore(&np->lock, flags); return err; } static struct pci_driver niu_pci_driver = { .name = DRV_MODULE_NAME, .id_table = niu_pci_tbl, .probe = niu_pci_init_one, .remove = niu_pci_remove_one, .suspend = niu_suspend, .resume = niu_resume, }; #ifdef CONFIG_SPARC64 static void *niu_phys_alloc_coherent(struct device *dev, size_t size, u64 *dma_addr, gfp_t flag) { unsigned long order = get_order(size); unsigned long page = __get_free_pages(flag, order); if (page == 0UL) return NULL; memset((char *)page, 0, PAGE_SIZE << order); *dma_addr = __pa(page); return (void *) page; } static void niu_phys_free_coherent(struct device *dev, size_t size, void *cpu_addr, u64 handle) { unsigned long order = get_order(size); free_pages((unsigned long) cpu_addr, order); } static u64 niu_phys_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { return page_to_phys(page) + offset; } static void niu_phys_unmap_page(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction) { /* Nothing to do. */ } static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { return __pa(cpu_addr); } static void niu_phys_unmap_single(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction) { /* Nothing to do. */ } static const struct niu_ops niu_phys_ops = { .alloc_coherent = niu_phys_alloc_coherent, .free_coherent = niu_phys_free_coherent, .map_page = niu_phys_map_page, .unmap_page = niu_phys_unmap_page, .map_single = niu_phys_map_single, .unmap_single = niu_phys_unmap_single, }; static int niu_of_probe(struct platform_device *op) { union niu_parent_id parent_id; struct net_device *dev; struct niu *np; const u32 *reg; int err; niu_driver_version(); reg = of_get_property(op->dev.of_node, "reg", NULL); if (!reg) { dev_err(&op->dev, "%s: No 'reg' property, aborting\n", op->dev.of_node->full_name); return -ENODEV; } dev = niu_alloc_and_init(&op->dev, NULL, op, &niu_phys_ops, reg[0] & 0x1); if (!dev) { err = -ENOMEM; goto err_out; } np = netdev_priv(dev); memset(&parent_id, 0, sizeof(parent_id)); parent_id.of = of_get_parent(op->dev.of_node); np->parent = niu_get_parent(np, &parent_id, PLAT_TYPE_NIU); if (!np->parent) { err = -ENOMEM; goto err_out_free_dev; } niu_set_basic_features(dev); np->regs = of_ioremap(&op->resource[1], 0, resource_size(&op->resource[1]), "niu regs"); if (!np->regs) { dev_err(&op->dev, "Cannot map device registers, aborting\n"); err = -ENOMEM; goto err_out_release_parent; } np->vir_regs_1 = of_ioremap(&op->resource[2], 0, resource_size(&op->resource[2]), "niu vregs-1"); if (!np->vir_regs_1) { dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n"); err = -ENOMEM; goto err_out_iounmap; } np->vir_regs_2 = of_ioremap(&op->resource[3], 0, resource_size(&op->resource[3]), "niu vregs-2"); if (!np->vir_regs_2) { dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n"); err = -ENOMEM; goto err_out_iounmap; } niu_assign_netdev_ops(dev); err = niu_get_invariants(np); if (err) { if (err != -ENODEV) dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n"); goto err_out_iounmap; } err = register_netdev(dev); if (err) { dev_err(&op->dev, "Cannot register net device, aborting\n"); goto err_out_iounmap; } dev_set_drvdata(&op->dev, dev); niu_device_announce(np); return 0; err_out_iounmap: if (np->vir_regs_1) { of_iounmap(&op->resource[2], np->vir_regs_1, resource_size(&op->resource[2])); np->vir_regs_1 = NULL; } if (np->vir_regs_2) { of_iounmap(&op->resource[3], np->vir_regs_2, resource_size(&op->resource[3])); np->vir_regs_2 = NULL; } if (np->regs) { of_iounmap(&op->resource[1], np->regs, resource_size(&op->resource[1])); np->regs = NULL; } err_out_release_parent: niu_put_parent(np); err_out_free_dev: free_netdev(dev); err_out: return err; } static int niu_of_remove(struct platform_device *op) { struct net_device *dev = dev_get_drvdata(&op->dev); if (dev) { struct niu *np = netdev_priv(dev); unregister_netdev(dev); if (np->vir_regs_1) { of_iounmap(&op->resource[2], np->vir_regs_1, resource_size(&op->resource[2])); np->vir_regs_1 = NULL; } if (np->vir_regs_2) { of_iounmap(&op->resource[3], np->vir_regs_2, resource_size(&op->resource[3])); np->vir_regs_2 = NULL; } if (np->regs) { of_iounmap(&op->resource[1], np->regs, resource_size(&op->resource[1])); np->regs = NULL; } niu_ldg_free(np); niu_put_parent(np); free_netdev(dev); dev_set_drvdata(&op->dev, NULL); } return 0; } static const struct of_device_id niu_match[] = { { .name = "network", .compatible = "SUNW,niusl", }, {}, }; MODULE_DEVICE_TABLE(of, niu_match); static struct platform_driver niu_of_driver = { .driver = { .name = "niu", .owner = THIS_MODULE, .of_match_table = niu_match, }, .probe = niu_of_probe, .remove = niu_of_remove, }; #endif /* CONFIG_SPARC64 */ static int __init niu_init(void) { int err = 0; BUILD_BUG_ON(PAGE_SIZE < 4 * 1024); niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); #ifdef CONFIG_SPARC64 err = platform_driver_register(&niu_of_driver); #endif if (!err) { err = pci_register_driver(&niu_pci_driver); #ifdef CONFIG_SPARC64 if (err) platform_driver_unregister(&niu_of_driver); #endif } return err; } static void __exit niu_exit(void) { pci_unregister_driver(&niu_pci_driver); #ifdef CONFIG_SPARC64 platform_driver_unregister(&niu_of_driver); #endif } module_init(niu_init); module_exit(niu_exit);
gpl-2.0
PikkonX/T989_ICS_KERNEL-
drivers/input/touchscreen/max11801_ts.c
2339
7673
/* * Driver for MAXI MAX11801 - A Resistive touch screen controller with * i2c interface * * Copyright (C) 2011 Freescale Semiconductor, Inc. * Author: Zhang Jiejing <jiejing.zhang@freescale.com> * * Based on mcs5000_ts.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. */ /* * This driver aims to support the series of MAXI touch chips max11801 * through max11803. The main difference between these 4 chips can be * found in the table below: * ----------------------------------------------------- * | CHIP | AUTO MODE SUPPORT(FIFO) | INTERFACE | * |----------------------------------------------------| * | max11800 | YES | SPI | * | max11801 | YES | I2C | * | max11802 | NO | SPI | * | max11803 | NO | I2C | * ------------------------------------------------------ * * Currently, this driver only supports max11801. * * Data Sheet: * http://www.maxim-ic.com/datasheet/index.mvp/id/5943 */ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/bitops.h> /* Register Address define */ #define GENERNAL_STATUS_REG 0x00 #define GENERNAL_CONF_REG 0x01 #define MESURE_RES_CONF_REG 0x02 #define MESURE_AVER_CONF_REG 0x03 #define ADC_SAMPLE_TIME_CONF_REG 0x04 #define PANEL_SETUPTIME_CONF_REG 0x05 #define DELAY_CONVERSION_CONF_REG 0x06 #define TOUCH_DETECT_PULLUP_CONF_REG 0x07 #define AUTO_MODE_TIME_CONF_REG 0x08 /* only for max11800/max11801 */ #define APERTURE_CONF_REG 0x09 /* only for max11800/max11801 */ #define AUX_MESURE_CONF_REG 0x0a #define OP_MODE_CONF_REG 0x0b /* FIFO is found only in max11800 and max11801 */ #define FIFO_RD_CMD (0x50 << 1) #define MAX11801_FIFO_INT (1 << 2) #define MAX11801_FIFO_OVERFLOW (1 << 3) #define XY_BUFSIZE 4 #define XY_BUF_OFFSET 4 #define MAX11801_MAX_X 0xfff #define MAX11801_MAX_Y 0xfff #define MEASURE_TAG_OFFSET 2 #define MEASURE_TAG_MASK (3 << MEASURE_TAG_OFFSET) #define EVENT_TAG_OFFSET 0 #define EVENT_TAG_MASK (3 << EVENT_TAG_OFFSET) #define MEASURE_X_TAG (0 << MEASURE_TAG_OFFSET) #define MEASURE_Y_TAG (1 << MEASURE_TAG_OFFSET) /* These are the state of touch event state machine */ enum { EVENT_INIT, EVENT_MIDDLE, EVENT_RELEASE, EVENT_FIFO_END }; struct max11801_data { struct i2c_client *client; struct input_dev *input_dev; }; static u8 read_register(struct i2c_client *client, int addr) { /* XXX: The chip ignores LSB of register address */ return i2c_smbus_read_byte_data(client, addr << 1); } static int max11801_write_reg(struct i2c_client *client, int addr, int data) { /* XXX: The chip ignores LSB of register address */ return i2c_smbus_write_byte_data(client, addr << 1, data); } static irqreturn_t max11801_ts_interrupt(int irq, void *dev_id) { struct max11801_data *data = dev_id; struct i2c_client *client = data->client; int status, i, ret; u8 buf[XY_BUFSIZE]; int x = -1; int y = -1; status = read_register(data->client, GENERNAL_STATUS_REG); if (status & (MAX11801_FIFO_INT | MAX11801_FIFO_OVERFLOW)) { status = read_register(data->client, GENERNAL_STATUS_REG); ret = i2c_smbus_read_i2c_block_data(client, FIFO_RD_CMD, XY_BUFSIZE, buf); /* * We should get 4 bytes buffer that contains X,Y * and event tag */ if (ret < XY_BUFSIZE) goto out; for (i = 0; i < XY_BUFSIZE; i += XY_BUFSIZE / 2) { if ((buf[i + 1] & MEASURE_TAG_MASK) == MEASURE_X_TAG) x = (buf[i] << XY_BUF_OFFSET) + (buf[i + 1] >> XY_BUF_OFFSET); else if ((buf[i + 1] & MEASURE_TAG_MASK) == MEASURE_Y_TAG) y = (buf[i] << XY_BUF_OFFSET) + (buf[i + 1] >> XY_BUF_OFFSET); } if ((buf[1] & EVENT_TAG_MASK) != (buf[3] & EVENT_TAG_MASK)) goto out; switch (buf[1] & EVENT_TAG_MASK) { case EVENT_INIT: /* fall through */ case EVENT_MIDDLE: input_report_abs(data->input_dev, ABS_X, x); input_report_abs(data->input_dev, ABS_Y, y); input_event(data->input_dev, EV_KEY, BTN_TOUCH, 1); input_sync(data->input_dev); break; case EVENT_RELEASE: input_event(data->input_dev, EV_KEY, BTN_TOUCH, 0); input_sync(data->input_dev); break; case EVENT_FIFO_END: break; } } out: return IRQ_HANDLED; } static void __devinit max11801_ts_phy_init(struct max11801_data *data) { struct i2c_client *client = data->client; /* Average X,Y, take 16 samples, average eight media sample */ max11801_write_reg(client, MESURE_AVER_CONF_REG, 0xff); /* X,Y panel setup time set to 20us */ max11801_write_reg(client, PANEL_SETUPTIME_CONF_REG, 0x11); /* Rough pullup time (2uS), Fine pullup time (10us) */ max11801_write_reg(client, TOUCH_DETECT_PULLUP_CONF_REG, 0x10); /* Auto mode init period = 5ms , scan period = 5ms*/ max11801_write_reg(client, AUTO_MODE_TIME_CONF_REG, 0xaa); /* Aperture X,Y set to +- 4LSB */ max11801_write_reg(client, APERTURE_CONF_REG, 0x33); /* Enable Power, enable Automode, enable Aperture, enable Average X,Y */ max11801_write_reg(client, OP_MODE_CONF_REG, 0x36); } static int __devinit max11801_ts_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct max11801_data *data; struct input_dev *input_dev; int error; data = kzalloc(sizeof(struct max11801_data), GFP_KERNEL); input_dev = input_allocate_device(); if (!data || !input_dev) { dev_err(&client->dev, "Failed to allocate memory\n"); error = -ENOMEM; goto err_free_mem; } data->client = client; data->input_dev = input_dev; input_dev->name = "max11801_ts"; input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; __set_bit(EV_ABS, input_dev->evbit); __set_bit(EV_KEY, input_dev->evbit); __set_bit(BTN_TOUCH, input_dev->keybit); input_set_abs_params(input_dev, ABS_X, 0, MAX11801_MAX_X, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, MAX11801_MAX_Y, 0, 0); input_set_drvdata(input_dev, data); max11801_ts_phy_init(data); error = request_threaded_irq(client->irq, NULL, max11801_ts_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "max11801_ts", data); if (error) { dev_err(&client->dev, "Failed to register interrupt\n"); goto err_free_mem; } error = input_register_device(data->input_dev); if (error) goto err_free_irq; i2c_set_clientdata(client, data); return 0; err_free_irq: free_irq(client->irq, data); err_free_mem: input_free_device(input_dev); kfree(data); return error; } static __devexit int max11801_ts_remove(struct i2c_client *client) { struct max11801_data *data = i2c_get_clientdata(client); free_irq(client->irq, data); input_unregister_device(data->input_dev); kfree(data); return 0; } static const struct i2c_device_id max11801_ts_id[] = { {"max11801", 0}, { } }; MODULE_DEVICE_TABLE(i2c, max11801_ts_id); static struct i2c_driver max11801_ts_driver = { .driver = { .name = "max11801_ts", .owner = THIS_MODULE, }, .id_table = max11801_ts_id, .probe = max11801_ts_probe, .remove = __devexit_p(max11801_ts_remove), }; static int __init max11801_ts_init(void) { return i2c_add_driver(&max11801_ts_driver); } static void __exit max11801_ts_exit(void) { i2c_del_driver(&max11801_ts_driver); } module_init(max11801_ts_init); module_exit(max11801_ts_exit); MODULE_AUTHOR("Zhang Jiejing <jiejing.zhang@freescale.com>"); MODULE_DESCRIPTION("Touchscreen driver for MAXI MAX11801 controller"); MODULE_LICENSE("GPL");
gpl-2.0
faux123/private_msm8660_ics
drivers/media/dvb/frontends/zl10036.c
3107
12236
/** * Driver for Zarlink zl10036 DVB-S silicon tuner * * Copyright (C) 2006 Tino Reichardt * Copyright (C) 2007-2009 Matthias Schwarzott <zzam@gentoo.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License Version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ** * The data sheet for this tuner can be found at: * http://www.mcmilk.de/projects/dvb-card/datasheets/ZL10036.pdf * * This one is working: (at my Avermedia DVB-S Pro) * - zl10036 (40pin, FTA) * * A driver for zl10038 should be very similar. */ #include <linux/module.h> #include <linux/dvb/frontend.h> #include <linux/slab.h> #include <linux/types.h> #include "zl10036.h" static int zl10036_debug; #define dprintk(level, args...) \ do { if (zl10036_debug & level) printk(KERN_DEBUG "zl10036: " args); \ } while (0) #define deb_info(args...) dprintk(0x01, args) #define deb_i2c(args...) dprintk(0x02, args) struct zl10036_state { struct i2c_adapter *i2c; const struct zl10036_config *config; u32 frequency; u8 br, bf; }; /* This driver assumes the tuner is driven by a 10.111MHz Cristal */ #define _XTAL 10111 /* Some of the possible dividers: * 64, (write 0x05 to reg), freq step size 158kHz * 10, (write 0x0a to reg), freq step size 1.011kHz (used here) * 5, (write 0x09 to reg), freq step size 2.022kHz */ #define _RDIV 10 #define _RDIV_REG 0x0a #define _FR (_XTAL/_RDIV) #define STATUS_POR 0x80 /* Power on Reset */ #define STATUS_FL 0x40 /* Frequency & Phase Lock */ /* read/write for zl10036 and zl10038 */ static int zl10036_read_status_reg(struct zl10036_state *state) { u8 status; struct i2c_msg msg[1] = { { .addr = state->config->tuner_address, .flags = I2C_M_RD, .buf = &status, .len = sizeof(status) }, }; if (i2c_transfer(state->i2c, msg, 1) != 1) { printk(KERN_ERR "%s: i2c read failed at addr=%02x\n", __func__, state->config->tuner_address); return -EIO; } deb_i2c("R(status): %02x [FL=%d]\n", status, (status & STATUS_FL) ? 1 : 0); if (status & STATUS_POR) deb_info("%s: Power-On-Reset bit enabled - " "need to initialize the tuner\n", __func__); return status; } static int zl10036_write(struct zl10036_state *state, u8 buf[], u8 count) { struct i2c_msg msg[1] = { { .addr = state->config->tuner_address, .flags = 0, .buf = buf, .len = count }, }; u8 reg = 0; int ret; if (zl10036_debug & 0x02) { /* every 8bit-value satisifes this! * so only check for debug log */ if ((buf[0] & 0x80) == 0x00) reg = 2; else if ((buf[0] & 0xc0) == 0x80) reg = 4; else if ((buf[0] & 0xf0) == 0xc0) reg = 6; else if ((buf[0] & 0xf0) == 0xd0) reg = 8; else if ((buf[0] & 0xf0) == 0xe0) reg = 10; else if ((buf[0] & 0xf0) == 0xf0) reg = 12; deb_i2c("W(%d):", reg); { int i; for (i = 0; i < count; i++) printk(KERN_CONT " %02x", buf[i]); printk(KERN_CONT "\n"); } } ret = i2c_transfer(state->i2c, msg, 1); if (ret != 1) { printk(KERN_ERR "%s: i2c error, ret=%d\n", __func__, ret); return -EIO; } return 0; } static int zl10036_release(struct dvb_frontend *fe) { struct zl10036_state *state = fe->tuner_priv; fe->tuner_priv = NULL; kfree(state); return 0; } static int zl10036_sleep(struct dvb_frontend *fe) { struct zl10036_state *state = fe->tuner_priv; u8 buf[] = { 0xf0, 0x80 }; /* regs 12/13 */ int ret; deb_info("%s\n", __func__); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */ ret = zl10036_write(state, buf, sizeof(buf)); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ return ret; } /** * register map of the ZL10036/ZL10038 * * reg[default] content * 2[0x00]: 0 | N14 | N13 | N12 | N11 | N10 | N9 | N8 * 3[0x00]: N7 | N6 | N5 | N4 | N3 | N2 | N1 | N0 * 4[0x80]: 1 | 0 | RFG | BA1 | BA0 | BG1 | BG0 | LEN * 5[0x00]: P0 | C1 | C0 | R4 | R3 | R2 | R1 | R0 * 6[0xc0]: 1 | 1 | 0 | 0 | RSD | 0 | 0 | 0 * 7[0x20]: P1 | BF6 | BF5 | BF4 | BF3 | BF2 | BF1 | 0 * 8[0xdb]: 1 | 1 | 0 | 1 | 0 | CC | 1 | 1 * 9[0x30]: VSD | V2 | V1 | V0 | S3 | S2 | S1 | S0 * 10[0xe1]: 1 | 1 | 1 | 0 | 0 | LS2 | LS1 | LS0 * 11[0xf5]: WS | WH2 | WH1 | WH0 | WL2 | WL1 | WL0 | WRE * 12[0xf0]: 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 * 13[0x28]: PD | BR4 | BR3 | BR2 | BR1 | BR0 | CLR | TL */ static int zl10036_set_frequency(struct zl10036_state *state, u32 frequency) { u8 buf[2]; u32 div, foffset; div = (frequency + _FR/2) / _FR; state->frequency = div * _FR; foffset = frequency - state->frequency; buf[0] = (div >> 8) & 0x7f; buf[1] = (div >> 0) & 0xff; deb_info("%s: ftodo=%u fpriv=%u ferr=%d div=%u\n", __func__, frequency, state->frequency, foffset, div); return zl10036_write(state, buf, sizeof(buf)); } static int zl10036_set_bandwidth(struct zl10036_state *state, u32 fbw) { /* fbw is measured in kHz */ u8 br, bf; int ret; u8 buf_bf[] = { 0xc0, 0x00, /* 6/7: rsd=0 bf=0 */ }; u8 buf_br[] = { 0xf0, 0x00, /* 12/13: br=0xa clr=0 tl=0*/ }; u8 zl10036_rsd_off[] = { 0xc8 }; /* set RSD=1 */ /* ensure correct values */ if (fbw > 35000) fbw = 35000; if (fbw < 8000) fbw = 8000; #define _BR_MAXIMUM (_XTAL/575) /* _XTAL / 575kHz = 17 */ /* <= 28,82 MHz */ if (fbw <= 28820) { br = _BR_MAXIMUM; } else { /** * f(bw)=34,6MHz f(xtal)=10.111MHz * br = (10111/34600) * 63 * 1/K = 14; */ br = ((_XTAL * 21 * 1000) / (fbw * 419)); } /* ensure correct values */ if (br < 4) br = 4; if (br > _BR_MAXIMUM) br = _BR_MAXIMUM; /* * k = 1.257 * bf = fbw/_XTAL * br * k - 1 */ bf = (fbw * br * 1257) / (_XTAL * 1000) - 1; /* ensure correct values */ if (bf > 62) bf = 62; buf_bf[1] = (bf << 1) & 0x7e; buf_br[1] = (br << 2) & 0x7c; deb_info("%s: BW=%d br=%u bf=%u\n", __func__, fbw, br, bf); if (br != state->br) { ret = zl10036_write(state, buf_br, sizeof(buf_br)); if (ret < 0) return ret; } if (bf != state->bf) { ret = zl10036_write(state, buf_bf, sizeof(buf_bf)); if (ret < 0) return ret; /* time = br/(32* fxtal) */ /* minimal sleep time to be calculated * maximum br is 63 -> max time = 2 /10 MHz = 2e-7 */ msleep(1); ret = zl10036_write(state, zl10036_rsd_off, sizeof(zl10036_rsd_off)); if (ret < 0) return ret; } state->br = br; state->bf = bf; return 0; } static int zl10036_set_gain_params(struct zl10036_state *state, int c) { u8 buf[2]; u8 rfg, ba, bg; /* default values */ rfg = 0; /* enable when using an lna */ ba = 1; bg = 1; /* reg 4 */ buf[0] = 0x80 | ((rfg << 5) & 0x20) | ((ba << 3) & 0x18) | ((bg << 1) & 0x06); if (!state->config->rf_loop_enable) buf[0] |= 0x01; /* P0=0 */ buf[1] = _RDIV_REG | ((c << 5) & 0x60); deb_info("%s: c=%u rfg=%u ba=%u bg=%u\n", __func__, c, rfg, ba, bg); return zl10036_write(state, buf, sizeof(buf)); } static int zl10036_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) { struct zl10036_state *state = fe->tuner_priv; int ret = 0; u32 frequency = params->frequency; u32 fbw; int i; u8 c; /* ensure correct values * maybe redundant as core already checks this */ if ((frequency < fe->ops.info.frequency_min) || (frequency > fe->ops.info.frequency_max)) return -EINVAL; /** * alpha = 1.35 for dvb-s * fBW = (alpha*symbolrate)/(2*0.8) * 1.35 / (2*0.8) = 27 / 32 */ fbw = (27 * params->u.qpsk.symbol_rate) / 32; /* scale to kHz */ fbw /= 1000; /* Add safe margin of 3MHz */ fbw += 3000; /* setting the charge pump - guessed values */ if (frequency < 950000) return -EINVAL; else if (frequency < 1250000) c = 0; else if (frequency < 1750000) c = 1; else if (frequency < 2175000) c = 2; else return -EINVAL; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */ ret = zl10036_set_gain_params(state, c); if (ret < 0) goto error; ret = zl10036_set_frequency(state, params->frequency); if (ret < 0) goto error; ret = zl10036_set_bandwidth(state, fbw); if (ret < 0) goto error; /* wait for tuner lock - no idea if this is really needed */ for (i = 0; i < 20; i++) { ret = zl10036_read_status_reg(state); if (ret < 0) goto error; /* check Frequency & Phase Lock Bit */ if (ret & STATUS_FL) break; msleep(10); } error: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ return ret; } static int zl10036_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct zl10036_state *state = fe->tuner_priv; *frequency = state->frequency; return 0; } static int zl10036_init_regs(struct zl10036_state *state) { int ret; int i; /* could also be one block from reg 2 to 13 and additional 10/11 */ u8 zl10036_init_tab[][2] = { { 0x04, 0x00 }, /* 2/3: div=0x400 - arbitrary value */ { 0x8b, _RDIV_REG }, /* 4/5: rfg=0 ba=1 bg=1 len=? */ /* p0=0 c=0 r=_RDIV_REG */ { 0xc0, 0x20 }, /* 6/7: rsd=0 bf=0x10 */ { 0xd3, 0x40 }, /* 8/9: from datasheet */ { 0xe3, 0x5b }, /* 10/11: lock window level */ { 0xf0, 0x28 }, /* 12/13: br=0xa clr=0 tl=0*/ { 0xe3, 0xf9 }, /* 10/11: unlock window level */ }; /* invalid values to trigger writing */ state->br = 0xff; state->bf = 0xff; if (!state->config->rf_loop_enable) zl10036_init_tab[1][0] |= 0x01; deb_info("%s\n", __func__); for (i = 0; i < ARRAY_SIZE(zl10036_init_tab); i++) { ret = zl10036_write(state, zl10036_init_tab[i], 2); if (ret < 0) return ret; } return 0; } static int zl10036_init(struct dvb_frontend *fe) { struct zl10036_state *state = fe->tuner_priv; int ret = 0; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */ ret = zl10036_read_status_reg(state); if (ret < 0) return ret; /* Only init if Power-on-Reset bit is set? */ ret = zl10036_init_regs(state); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ return ret; } static struct dvb_tuner_ops zl10036_tuner_ops = { .info = { .name = "Zarlink ZL10036", .frequency_min = 950000, .frequency_max = 2175000 }, .init = zl10036_init, .release = zl10036_release, .sleep = zl10036_sleep, .set_params = zl10036_set_params, .get_frequency = zl10036_get_frequency, }; struct dvb_frontend *zl10036_attach(struct dvb_frontend *fe, const struct zl10036_config *config, struct i2c_adapter *i2c) { struct zl10036_state *state; int ret; if (!config) { printk(KERN_ERR "%s: no config specified", __func__); return NULL; } state = kzalloc(sizeof(struct zl10036_state), GFP_KERNEL); if (!state) return NULL; state->config = config; state->i2c = i2c; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* open i2c_gate */ ret = zl10036_read_status_reg(state); if (ret < 0) { printk(KERN_ERR "%s: No zl10036 found\n", __func__); goto error; } ret = zl10036_init_regs(state); if (ret < 0) { printk(KERN_ERR "%s: tuner initialization failed\n", __func__); goto error; } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* close i2c_gate */ fe->tuner_priv = state; memcpy(&fe->ops.tuner_ops, &zl10036_tuner_ops, sizeof(struct dvb_tuner_ops)); printk(KERN_INFO "%s: tuner initialization (%s addr=0x%02x) ok\n", __func__, fe->ops.tuner_ops.info.name, config->tuner_address); return fe; error: kfree(state); return NULL; } EXPORT_SYMBOL(zl10036_attach); module_param_named(debug, zl10036_debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("DVB ZL10036 driver"); MODULE_AUTHOR("Tino Reichardt"); MODULE_AUTHOR("Matthias Schwarzott"); MODULE_LICENSE("GPL");
gpl-2.0
HTCKernels/One-SV-boost-k2cl
drivers/video/s3c-fb.c
4643
52644
/* linux/drivers/video/s3c-fb.c * * Copyright 2008 Openmoko Inc. * Copyright 2008-2010 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * Samsung SoC Framebuffer driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software FoundatIon. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/fb.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> #include <mach/map.h> #include <plat/regs-fb-v4.h> #include <plat/fb.h> /* This driver will export a number of framebuffer interfaces depending * on the configuration passed in via the platform data. Each fb instance * maps to a hardware window. Currently there is no support for runtime * setting of the alpha-blending functions that each window has, so only * window 0 is actually useful. * * Window 0 is treated specially, it is used for the basis of the LCD * output timings and as the control for the output power-down state. */ /* note, the previous use of <mach/regs-fb.h> to get platform specific data * has been replaced by using the platform device name to pick the correct * configuration data for the system. */ #ifdef CONFIG_FB_S3C_DEBUG_REGWRITE #undef writel #define writel(v, r) do { \ printk(KERN_DEBUG "%s: %08x => %p\n", __func__, (unsigned int)v, r); \ __raw_writel(v, r); \ } while (0) #endif /* FB_S3C_DEBUG_REGWRITE */ /* irq_flags bits */ #define S3C_FB_VSYNC_IRQ_EN 0 #define VSYNC_TIMEOUT_MSEC 50 struct s3c_fb; #define VALID_BPP(x) (1 << ((x) - 1)) #define OSD_BASE(win, variant) ((variant).osd + ((win) * (variant).osd_stride)) #define VIDOSD_A(win, variant) (OSD_BASE(win, variant) + 0x00) #define VIDOSD_B(win, variant) (OSD_BASE(win, variant) + 0x04) #define VIDOSD_C(win, variant) (OSD_BASE(win, variant) + 0x08) #define VIDOSD_D(win, variant) (OSD_BASE(win, variant) + 0x0C) /** * struct s3c_fb_variant - fb variant information * @is_2443: Set if S3C2443/S3C2416 style hardware. * @nr_windows: The number of windows. * @vidtcon: The base for the VIDTCONx registers * @wincon: The base for the WINxCON registers. * @winmap: The base for the WINxMAP registers. * @keycon: The abse for the WxKEYCON registers. * @buf_start: Offset of buffer start registers. * @buf_size: Offset of buffer size registers. * @buf_end: Offset of buffer end registers. * @osd: The base for the OSD registers. * @palette: Address of palette memory, or 0 if none. * @has_prtcon: Set if has PRTCON register. * @has_shadowcon: Set if has SHADOWCON register. * @has_blendcon: Set if has BLENDCON register. * @has_clksel: Set if VIDCON0 register has CLKSEL bit. * @has_fixvclk: Set if VIDCON1 register has FIXVCLK bits. */ struct s3c_fb_variant { unsigned int is_2443:1; unsigned short nr_windows; unsigned int vidtcon; unsigned short wincon; unsigned short winmap; unsigned short keycon; unsigned short buf_start; unsigned short buf_end; unsigned short buf_size; unsigned short osd; unsigned short osd_stride; unsigned short palette[S3C_FB_MAX_WIN]; unsigned int has_prtcon:1; unsigned int has_shadowcon:1; unsigned int has_blendcon:1; unsigned int has_clksel:1; unsigned int has_fixvclk:1; }; /** * struct s3c_fb_win_variant * @has_osd_c: Set if has OSD C register. * @has_osd_d: Set if has OSD D register. * @has_osd_alpha: Set if can change alpha transparency for a window. * @palette_sz: Size of palette in entries. * @palette_16bpp: Set if palette is 16bits wide. * @osd_size_off: If != 0, supports setting up OSD for a window; the appropriate * register is located at the given offset from OSD_BASE. * @valid_bpp: 1 bit per BPP setting to show valid bits-per-pixel. * * valid_bpp bit x is set if (x+1)BPP is supported. */ struct s3c_fb_win_variant { unsigned int has_osd_c:1; unsigned int has_osd_d:1; unsigned int has_osd_alpha:1; unsigned int palette_16bpp:1; unsigned short osd_size_off; unsigned short palette_sz; u32 valid_bpp; }; /** * struct s3c_fb_driverdata - per-device type driver data for init time. * @variant: The variant information for this driver. * @win: The window information for each window. */ struct s3c_fb_driverdata { struct s3c_fb_variant variant; struct s3c_fb_win_variant *win[S3C_FB_MAX_WIN]; }; /** * struct s3c_fb_palette - palette information * @r: Red bitfield. * @g: Green bitfield. * @b: Blue bitfield. * @a: Alpha bitfield. */ struct s3c_fb_palette { struct fb_bitfield r; struct fb_bitfield g; struct fb_bitfield b; struct fb_bitfield a; }; /** * struct s3c_fb_win - per window private data for each framebuffer. * @windata: The platform data supplied for the window configuration. * @parent: The hardware that this window is part of. * @fbinfo: Pointer pack to the framebuffer info for this window. * @varint: The variant information for this window. * @palette_buffer: Buffer/cache to hold palette entries. * @pseudo_palette: For use in TRUECOLOUR modes for entries 0..15/ * @index: The window number of this window. * @palette: The bitfields for changing r/g/b into a hardware palette entry. */ struct s3c_fb_win { struct s3c_fb_pd_win *windata; struct s3c_fb *parent; struct fb_info *fbinfo; struct s3c_fb_palette palette; struct s3c_fb_win_variant variant; u32 *palette_buffer; u32 pseudo_palette[16]; unsigned int index; }; /** * struct s3c_fb_vsync - vsync information * @wait: a queue for processes waiting for vsync * @count: vsync interrupt count */ struct s3c_fb_vsync { wait_queue_head_t wait; unsigned int count; }; /** * struct s3c_fb - overall hardware state of the hardware * @slock: The spinlock protection for this data sturcture. * @dev: The device that we bound to, for printing, etc. * @bus_clk: The clk (hclk) feeding our interface and possibly pixclk. * @lcd_clk: The clk (sclk) feeding pixclk. * @regs: The mapped hardware registers. * @variant: Variant information for this hardware. * @enabled: A bitmask of enabled hardware windows. * @output_on: Flag if the physical output is enabled. * @pdata: The platform configuration data passed with the device. * @windows: The hardware windows that have been claimed. * @irq_no: IRQ line number * @irq_flags: irq flags * @vsync_info: VSYNC-related information (count, queues...) */ struct s3c_fb { spinlock_t slock; struct device *dev; struct clk *bus_clk; struct clk *lcd_clk; void __iomem *regs; struct s3c_fb_variant variant; unsigned char enabled; bool output_on; struct s3c_fb_platdata *pdata; struct s3c_fb_win *windows[S3C_FB_MAX_WIN]; int irq_no; unsigned long irq_flags; struct s3c_fb_vsync vsync_info; }; /** * s3c_fb_validate_win_bpp - validate the bits-per-pixel for this mode. * @win: The device window. * @bpp: The bit depth. */ static bool s3c_fb_validate_win_bpp(struct s3c_fb_win *win, unsigned int bpp) { return win->variant.valid_bpp & VALID_BPP(bpp); } /** * s3c_fb_check_var() - framebuffer layer request to verify a given mode. * @var: The screen information to verify. * @info: The framebuffer device. * * Framebuffer layer call to verify the given information and allow us to * update various information depending on the hardware capabilities. */ static int s3c_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct s3c_fb_win *win = info->par; struct s3c_fb *sfb = win->parent; dev_dbg(sfb->dev, "checking parameters\n"); var->xres_virtual = max(var->xres_virtual, var->xres); var->yres_virtual = max(var->yres_virtual, var->yres); if (!s3c_fb_validate_win_bpp(win, var->bits_per_pixel)) { dev_dbg(sfb->dev, "win %d: unsupported bpp %d\n", win->index, var->bits_per_pixel); return -EINVAL; } /* always ensure these are zero, for drop through cases below */ var->transp.offset = 0; var->transp.length = 0; switch (var->bits_per_pixel) { case 1: case 2: case 4: case 8: if (sfb->variant.palette[win->index] != 0) { /* non palletised, A:1,R:2,G:3,B:2 mode */ var->red.offset = 4; var->green.offset = 2; var->blue.offset = 0; var->red.length = 5; var->green.length = 3; var->blue.length = 2; var->transp.offset = 7; var->transp.length = 1; } else { var->red.offset = 0; var->red.length = var->bits_per_pixel; var->green = var->red; var->blue = var->red; } break; case 19: /* 666 with one bit alpha/transparency */ var->transp.offset = 18; var->transp.length = 1; case 18: var->bits_per_pixel = 32; /* 666 format */ var->red.offset = 12; var->green.offset = 6; var->blue.offset = 0; var->red.length = 6; var->green.length = 6; var->blue.length = 6; break; case 16: /* 16 bpp, 565 format */ var->red.offset = 11; var->green.offset = 5; var->blue.offset = 0; var->red.length = 5; var->green.length = 6; var->blue.length = 5; break; case 32: case 28: case 25: var->transp.length = var->bits_per_pixel - 24; var->transp.offset = 24; /* drop through */ case 24: /* our 24bpp is unpacked, so 32bpp */ var->bits_per_pixel = 32; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; break; default: dev_err(sfb->dev, "invalid bpp\n"); } dev_dbg(sfb->dev, "%s: verified parameters\n", __func__); return 0; } /** * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock. * @sfb: The hardware state. * @pixclock: The pixel clock wanted, in picoseconds. * * Given the specified pixel clock, work out the necessary divider to get * close to the output frequency. */ static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk) { unsigned long clk; unsigned long long tmp; unsigned int result; if (sfb->variant.has_clksel) clk = clk_get_rate(sfb->bus_clk); else clk = clk_get_rate(sfb->lcd_clk); tmp = (unsigned long long)clk; tmp *= pixclk; do_div(tmp, 1000000000UL); result = (unsigned int)tmp / 1000; dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", pixclk, clk, result, clk / result); return result; } /** * s3c_fb_align_word() - align pixel count to word boundary * @bpp: The number of bits per pixel * @pix: The value to be aligned. * * Align the given pixel count so that it will start on an 32bit word * boundary. */ static int s3c_fb_align_word(unsigned int bpp, unsigned int pix) { int pix_per_word; if (bpp > 16) return pix; pix_per_word = (8 * 32) / bpp; return ALIGN(pix, pix_per_word); } /** * vidosd_set_size() - set OSD size for a window * * @win: the window to set OSD size for * @size: OSD size register value */ static void vidosd_set_size(struct s3c_fb_win *win, u32 size) { struct s3c_fb *sfb = win->parent; /* OSD can be set up if osd_size_off != 0 for this window */ if (win->variant.osd_size_off) writel(size, sfb->regs + OSD_BASE(win->index, sfb->variant) + win->variant.osd_size_off); } /** * vidosd_set_alpha() - set alpha transparency for a window * * @win: the window to set OSD size for * @alpha: alpha register value */ static void vidosd_set_alpha(struct s3c_fb_win *win, u32 alpha) { struct s3c_fb *sfb = win->parent; if (win->variant.has_osd_alpha) writel(alpha, sfb->regs + VIDOSD_C(win->index, sfb->variant)); } /** * shadow_protect_win() - disable updating values from shadow registers at vsync * * @win: window to protect registers for * @protect: 1 to protect (disable updates) */ static void shadow_protect_win(struct s3c_fb_win *win, bool protect) { struct s3c_fb *sfb = win->parent; u32 reg; if (protect) { if (sfb->variant.has_prtcon) { writel(PRTCON_PROTECT, sfb->regs + PRTCON); } else if (sfb->variant.has_shadowcon) { reg = readl(sfb->regs + SHADOWCON); writel(reg | SHADOWCON_WINx_PROTECT(win->index), sfb->regs + SHADOWCON); } } else { if (sfb->variant.has_prtcon) { writel(0, sfb->regs + PRTCON); } else if (sfb->variant.has_shadowcon) { reg = readl(sfb->regs + SHADOWCON); writel(reg & ~SHADOWCON_WINx_PROTECT(win->index), sfb->regs + SHADOWCON); } } } /** * s3c_fb_enable() - Set the state of the main LCD output * @sfb: The main framebuffer state. * @enable: The state to set. */ static void s3c_fb_enable(struct s3c_fb *sfb, int enable) { u32 vidcon0 = readl(sfb->regs + VIDCON0); if (enable && !sfb->output_on) pm_runtime_get_sync(sfb->dev); if (enable) { vidcon0 |= VIDCON0_ENVID | VIDCON0_ENVID_F; } else { /* see the note in the framebuffer datasheet about * why you cannot take both of these bits down at the * same time. */ if (vidcon0 & VIDCON0_ENVID) { vidcon0 |= VIDCON0_ENVID; vidcon0 &= ~VIDCON0_ENVID_F; } } writel(vidcon0, sfb->regs + VIDCON0); if (!enable && sfb->output_on) pm_runtime_put_sync(sfb->dev); sfb->output_on = enable; } /** * s3c_fb_set_par() - framebuffer request to set new framebuffer state. * @info: The framebuffer to change. * * Framebuffer layer request to set a new mode for the specified framebuffer */ static int s3c_fb_set_par(struct fb_info *info) { struct fb_var_screeninfo *var = &info->var; struct s3c_fb_win *win = info->par; struct s3c_fb *sfb = win->parent; void __iomem *regs = sfb->regs; void __iomem *buf = regs; int win_no = win->index; u32 alpha = 0; u32 data; u32 pagewidth; int clkdiv; dev_dbg(sfb->dev, "setting framebuffer parameters\n"); pm_runtime_get_sync(sfb->dev); shadow_protect_win(win, 1); switch (var->bits_per_pixel) { case 32: case 24: case 16: case 12: info->fix.visual = FB_VISUAL_TRUECOLOR; break; case 8: if (win->variant.palette_sz >= 256) info->fix.visual = FB_VISUAL_PSEUDOCOLOR; else info->fix.visual = FB_VISUAL_TRUECOLOR; break; case 1: info->fix.visual = FB_VISUAL_MONO01; break; default: info->fix.visual = FB_VISUAL_PSEUDOCOLOR; break; } info->fix.line_length = (var->xres_virtual * var->bits_per_pixel) / 8; info->fix.xpanstep = info->var.xres_virtual > info->var.xres ? 1 : 0; info->fix.ypanstep = info->var.yres_virtual > info->var.yres ? 1 : 0; /* disable the window whilst we update it */ writel(0, regs + WINCON(win_no)); /* use platform specified window as the basis for the lcd timings */ if (win_no == sfb->pdata->default_win) { clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock); data = sfb->pdata->vidcon0; data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR); if (clkdiv > 1) data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR; else data &= ~VIDCON0_CLKDIR; /* 1:1 clock */ /* write the timing data to the panel */ if (sfb->variant.is_2443) data |= (1 << 5); writel(data, regs + VIDCON0); s3c_fb_enable(sfb, 1); data = VIDTCON0_VBPD(var->upper_margin - 1) | VIDTCON0_VFPD(var->lower_margin - 1) | VIDTCON0_VSPW(var->vsync_len - 1); writel(data, regs + sfb->variant.vidtcon); data = VIDTCON1_HBPD(var->left_margin - 1) | VIDTCON1_HFPD(var->right_margin - 1) | VIDTCON1_HSPW(var->hsync_len - 1); /* VIDTCON1 */ writel(data, regs + sfb->variant.vidtcon + 4); data = VIDTCON2_LINEVAL(var->yres - 1) | VIDTCON2_HOZVAL(var->xres - 1) | VIDTCON2_LINEVAL_E(var->yres - 1) | VIDTCON2_HOZVAL_E(var->xres - 1); writel(data, regs + sfb->variant.vidtcon + 8); } /* write the buffer address */ /* start and end registers stride is 8 */ buf = regs + win_no * 8; writel(info->fix.smem_start, buf + sfb->variant.buf_start); data = info->fix.smem_start + info->fix.line_length * var->yres; writel(data, buf + sfb->variant.buf_end); pagewidth = (var->xres * var->bits_per_pixel) >> 3; data = VIDW_BUF_SIZE_OFFSET(info->fix.line_length - pagewidth) | VIDW_BUF_SIZE_PAGEWIDTH(pagewidth) | VIDW_BUF_SIZE_OFFSET_E(info->fix.line_length - pagewidth) | VIDW_BUF_SIZE_PAGEWIDTH_E(pagewidth); writel(data, regs + sfb->variant.buf_size + (win_no * 4)); /* write 'OSD' registers to control position of framebuffer */ data = VIDOSDxA_TOPLEFT_X(0) | VIDOSDxA_TOPLEFT_Y(0) | VIDOSDxA_TOPLEFT_X_E(0) | VIDOSDxA_TOPLEFT_Y_E(0); writel(data, regs + VIDOSD_A(win_no, sfb->variant)); data = VIDOSDxB_BOTRIGHT_X(s3c_fb_align_word(var->bits_per_pixel, var->xres - 1)) | VIDOSDxB_BOTRIGHT_Y(var->yres - 1) | VIDOSDxB_BOTRIGHT_X_E(s3c_fb_align_word(var->bits_per_pixel, var->xres - 1)) | VIDOSDxB_BOTRIGHT_Y_E(var->yres - 1); writel(data, regs + VIDOSD_B(win_no, sfb->variant)); data = var->xres * var->yres; alpha = VIDISD14C_ALPHA1_R(0xf) | VIDISD14C_ALPHA1_G(0xf) | VIDISD14C_ALPHA1_B(0xf); vidosd_set_alpha(win, alpha); vidosd_set_size(win, data); /* Enable DMA channel for this window */ if (sfb->variant.has_shadowcon) { data = readl(sfb->regs + SHADOWCON); data |= SHADOWCON_CHx_ENABLE(win_no); writel(data, sfb->regs + SHADOWCON); } data = WINCONx_ENWIN; sfb->enabled |= (1 << win->index); /* note, since we have to round up the bits-per-pixel, we end up * relying on the bitfield information for r/g/b/a to work out * exactly which mode of operation is intended. */ switch (var->bits_per_pixel) { case 1: data |= WINCON0_BPPMODE_1BPP; data |= WINCONx_BITSWP; data |= WINCONx_BURSTLEN_4WORD; break; case 2: data |= WINCON0_BPPMODE_2BPP; data |= WINCONx_BITSWP; data |= WINCONx_BURSTLEN_8WORD; break; case 4: data |= WINCON0_BPPMODE_4BPP; data |= WINCONx_BITSWP; data |= WINCONx_BURSTLEN_8WORD; break; case 8: if (var->transp.length != 0) data |= WINCON1_BPPMODE_8BPP_1232; else data |= WINCON0_BPPMODE_8BPP_PALETTE; data |= WINCONx_BURSTLEN_8WORD; data |= WINCONx_BYTSWP; break; case 16: if (var->transp.length != 0) data |= WINCON1_BPPMODE_16BPP_A1555; else data |= WINCON0_BPPMODE_16BPP_565; data |= WINCONx_HAWSWP; data |= WINCONx_BURSTLEN_16WORD; break; case 24: case 32: if (var->red.length == 6) { if (var->transp.length != 0) data |= WINCON1_BPPMODE_19BPP_A1666; else data |= WINCON1_BPPMODE_18BPP_666; } else if (var->transp.length == 1) data |= WINCON1_BPPMODE_25BPP_A1888 | WINCON1_BLD_PIX; else if ((var->transp.length == 4) || (var->transp.length == 8)) data |= WINCON1_BPPMODE_28BPP_A4888 | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL; else data |= WINCON0_BPPMODE_24BPP_888; data |= WINCONx_WSWP; data |= WINCONx_BURSTLEN_16WORD; break; } /* Enable the colour keying for the window below this one */ if (win_no > 0) { u32 keycon0_data = 0, keycon1_data = 0; void __iomem *keycon = regs + sfb->variant.keycon; keycon0_data = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F | WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0); keycon1_data = WxKEYCON1_COLVAL(0xffffff); keycon += (win_no - 1) * 8; writel(keycon0_data, keycon + WKEYCON0); writel(keycon1_data, keycon + WKEYCON1); } writel(data, regs + sfb->variant.wincon + (win_no * 4)); writel(0x0, regs + sfb->variant.winmap + (win_no * 4)); /* Set alpha value width */ if (sfb->variant.has_blendcon) { data = readl(sfb->regs + BLENDCON); data &= ~BLENDCON_NEW_MASK; if (var->transp.length > 4) data |= BLENDCON_NEW_8BIT_ALPHA_VALUE; else data |= BLENDCON_NEW_4BIT_ALPHA_VALUE; writel(data, sfb->regs + BLENDCON); } shadow_protect_win(win, 0); pm_runtime_put_sync(sfb->dev); return 0; } /** * s3c_fb_update_palette() - set or schedule a palette update. * @sfb: The hardware information. * @win: The window being updated. * @reg: The palette index being changed. * @value: The computed palette value. * * Change the value of a palette register, either by directly writing to * the palette (this requires the palette RAM to be disconnected from the * hardware whilst this is in progress) or schedule the update for later. * * At the moment, since we have no VSYNC interrupt support, we simply set * the palette entry directly. */ static void s3c_fb_update_palette(struct s3c_fb *sfb, struct s3c_fb_win *win, unsigned int reg, u32 value) { void __iomem *palreg; u32 palcon; palreg = sfb->regs + sfb->variant.palette[win->index]; dev_dbg(sfb->dev, "%s: win %d, reg %d (%p): %08x\n", __func__, win->index, reg, palreg, value); win->palette_buffer[reg] = value; palcon = readl(sfb->regs + WPALCON); writel(palcon | WPALCON_PAL_UPDATE, sfb->regs + WPALCON); if (win->variant.palette_16bpp) writew(value, palreg + (reg * 2)); else writel(value, palreg + (reg * 4)); writel(palcon, sfb->regs + WPALCON); } static inline unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf) { chan &= 0xffff; chan >>= 16 - bf->length; return chan << bf->offset; } /** * s3c_fb_setcolreg() - framebuffer layer request to change palette. * @regno: The palette index to change. * @red: The red field for the palette data. * @green: The green field for the palette data. * @blue: The blue field for the palette data. * @trans: The transparency (alpha) field for the palette data. * @info: The framebuffer being changed. */ static int s3c_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct s3c_fb_win *win = info->par; struct s3c_fb *sfb = win->parent; unsigned int val; dev_dbg(sfb->dev, "%s: win %d: %d => rgb=%d/%d/%d\n", __func__, win->index, regno, red, green, blue); pm_runtime_get_sync(sfb->dev); switch (info->fix.visual) { case FB_VISUAL_TRUECOLOR: /* true-colour, use pseudo-palette */ if (regno < 16) { u32 *pal = info->pseudo_palette; val = chan_to_field(red, &info->var.red); val |= chan_to_field(green, &info->var.green); val |= chan_to_field(blue, &info->var.blue); pal[regno] = val; } break; case FB_VISUAL_PSEUDOCOLOR: if (regno < win->variant.palette_sz) { val = chan_to_field(red, &win->palette.r); val |= chan_to_field(green, &win->palette.g); val |= chan_to_field(blue, &win->palette.b); s3c_fb_update_palette(sfb, win, regno, val); } break; default: pm_runtime_put_sync(sfb->dev); return 1; /* unknown type */ } pm_runtime_put_sync(sfb->dev); return 0; } /** * s3c_fb_blank() - blank or unblank the given window * @blank_mode: The blank state from FB_BLANK_* * @info: The framebuffer to blank. * * Framebuffer layer request to change the power state. */ static int s3c_fb_blank(int blank_mode, struct fb_info *info) { struct s3c_fb_win *win = info->par; struct s3c_fb *sfb = win->parent; unsigned int index = win->index; u32 wincon; dev_dbg(sfb->dev, "blank mode %d\n", blank_mode); pm_runtime_get_sync(sfb->dev); wincon = readl(sfb->regs + sfb->variant.wincon + (index * 4)); switch (blank_mode) { case FB_BLANK_POWERDOWN: wincon &= ~WINCONx_ENWIN; sfb->enabled &= ~(1 << index); /* fall through to FB_BLANK_NORMAL */ case FB_BLANK_NORMAL: /* disable the DMA and display 0x0 (black) */ shadow_protect_win(win, 1); writel(WINxMAP_MAP | WINxMAP_MAP_COLOUR(0x0), sfb->regs + sfb->variant.winmap + (index * 4)); shadow_protect_win(win, 0); break; case FB_BLANK_UNBLANK: shadow_protect_win(win, 1); writel(0x0, sfb->regs + sfb->variant.winmap + (index * 4)); shadow_protect_win(win, 0); wincon |= WINCONx_ENWIN; sfb->enabled |= (1 << index); break; case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: default: pm_runtime_put_sync(sfb->dev); return 1; } shadow_protect_win(win, 1); writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4)); shadow_protect_win(win, 0); /* Check the enabled state to see if we need to be running the * main LCD interface, as if there are no active windows then * it is highly likely that we also do not need to output * anything. */ /* We could do something like the following code, but the current * system of using framebuffer events means that we cannot make * the distinction between just window 0 being inactive and all * the windows being down. * * s3c_fb_enable(sfb, sfb->enabled ? 1 : 0); */ /* we're stuck with this until we can do something about overriding * the power control using the blanking event for a single fb. */ if (index == sfb->pdata->default_win) { shadow_protect_win(win, 1); s3c_fb_enable(sfb, blank_mode != FB_BLANK_POWERDOWN ? 1 : 0); shadow_protect_win(win, 0); } pm_runtime_put_sync(sfb->dev); return 0; } /** * s3c_fb_pan_display() - Pan the display. * * Note that the offsets can be written to the device at any time, as their * values are latched at each vsync automatically. This also means that only * the last call to this function will have any effect on next vsync, but * there is no need to sleep waiting for it to prevent tearing. * * @var: The screen information to verify. * @info: The framebuffer device. */ static int s3c_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct s3c_fb_win *win = info->par; struct s3c_fb *sfb = win->parent; void __iomem *buf = sfb->regs + win->index * 8; unsigned int start_boff, end_boff; pm_runtime_get_sync(sfb->dev); /* Offset in bytes to the start of the displayed area */ start_boff = var->yoffset * info->fix.line_length; /* X offset depends on the current bpp */ if (info->var.bits_per_pixel >= 8) { start_boff += var->xoffset * (info->var.bits_per_pixel >> 3); } else { switch (info->var.bits_per_pixel) { case 4: start_boff += var->xoffset >> 1; break; case 2: start_boff += var->xoffset >> 2; break; case 1: start_boff += var->xoffset >> 3; break; default: dev_err(sfb->dev, "invalid bpp\n"); pm_runtime_put_sync(sfb->dev); return -EINVAL; } } /* Offset in bytes to the end of the displayed area */ end_boff = start_boff + info->var.yres * info->fix.line_length; /* Temporarily turn off per-vsync update from shadow registers until * both start and end addresses are updated to prevent corruption */ shadow_protect_win(win, 1); writel(info->fix.smem_start + start_boff, buf + sfb->variant.buf_start); writel(info->fix.smem_start + end_boff, buf + sfb->variant.buf_end); shadow_protect_win(win, 0); pm_runtime_put_sync(sfb->dev); return 0; } /** * s3c_fb_enable_irq() - enable framebuffer interrupts * @sfb: main hardware state */ static void s3c_fb_enable_irq(struct s3c_fb *sfb) { void __iomem *regs = sfb->regs; u32 irq_ctrl_reg; if (!test_and_set_bit(S3C_FB_VSYNC_IRQ_EN, &sfb->irq_flags)) { /* IRQ disabled, enable it */ irq_ctrl_reg = readl(regs + VIDINTCON0); irq_ctrl_reg |= VIDINTCON0_INT_ENABLE; irq_ctrl_reg |= VIDINTCON0_INT_FRAME; irq_ctrl_reg &= ~VIDINTCON0_FRAMESEL0_MASK; irq_ctrl_reg |= VIDINTCON0_FRAMESEL0_VSYNC; irq_ctrl_reg &= ~VIDINTCON0_FRAMESEL1_MASK; irq_ctrl_reg |= VIDINTCON0_FRAMESEL1_NONE; writel(irq_ctrl_reg, regs + VIDINTCON0); } } /** * s3c_fb_disable_irq() - disable framebuffer interrupts * @sfb: main hardware state */ static void s3c_fb_disable_irq(struct s3c_fb *sfb) { void __iomem *regs = sfb->regs; u32 irq_ctrl_reg; if (test_and_clear_bit(S3C_FB_VSYNC_IRQ_EN, &sfb->irq_flags)) { /* IRQ enabled, disable it */ irq_ctrl_reg = readl(regs + VIDINTCON0); irq_ctrl_reg &= ~VIDINTCON0_INT_FRAME; irq_ctrl_reg &= ~VIDINTCON0_INT_ENABLE; writel(irq_ctrl_reg, regs + VIDINTCON0); } } static irqreturn_t s3c_fb_irq(int irq, void *dev_id) { struct s3c_fb *sfb = dev_id; void __iomem *regs = sfb->regs; u32 irq_sts_reg; spin_lock(&sfb->slock); irq_sts_reg = readl(regs + VIDINTCON1); if (irq_sts_reg & VIDINTCON1_INT_FRAME) { /* VSYNC interrupt, accept it */ writel(VIDINTCON1_INT_FRAME, regs + VIDINTCON1); sfb->vsync_info.count++; wake_up_interruptible(&sfb->vsync_info.wait); } /* We only support waiting for VSYNC for now, so it's safe * to always disable irqs here. */ s3c_fb_disable_irq(sfb); spin_unlock(&sfb->slock); return IRQ_HANDLED; } /** * s3c_fb_wait_for_vsync() - sleep until next VSYNC interrupt or timeout * @sfb: main hardware state * @crtc: head index. */ static int s3c_fb_wait_for_vsync(struct s3c_fb *sfb, u32 crtc) { unsigned long count; int ret; if (crtc != 0) return -ENODEV; pm_runtime_get_sync(sfb->dev); count = sfb->vsync_info.count; s3c_fb_enable_irq(sfb); ret = wait_event_interruptible_timeout(sfb->vsync_info.wait, count != sfb->vsync_info.count, msecs_to_jiffies(VSYNC_TIMEOUT_MSEC)); pm_runtime_put_sync(sfb->dev); if (ret == 0) return -ETIMEDOUT; return 0; } static int s3c_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct s3c_fb_win *win = info->par; struct s3c_fb *sfb = win->parent; int ret; u32 crtc; switch (cmd) { case FBIO_WAITFORVSYNC: if (get_user(crtc, (u32 __user *)arg)) { ret = -EFAULT; break; } ret = s3c_fb_wait_for_vsync(sfb, crtc); break; default: ret = -ENOTTY; } return ret; } static struct fb_ops s3c_fb_ops = { .owner = THIS_MODULE, .fb_check_var = s3c_fb_check_var, .fb_set_par = s3c_fb_set_par, .fb_blank = s3c_fb_blank, .fb_setcolreg = s3c_fb_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_pan_display = s3c_fb_pan_display, .fb_ioctl = s3c_fb_ioctl, }; /** * s3c_fb_missing_pixclock() - calculates pixel clock * @mode: The video mode to change. * * Calculate the pixel clock when none has been given through platform data. */ static void __devinit s3c_fb_missing_pixclock(struct fb_videomode *mode) { u64 pixclk = 1000000000000ULL; u32 div; div = mode->left_margin + mode->hsync_len + mode->right_margin + mode->xres; div *= mode->upper_margin + mode->vsync_len + mode->lower_margin + mode->yres; div *= mode->refresh ? : 60; do_div(pixclk, div); mode->pixclock = pixclk; } /** * s3c_fb_alloc_memory() - allocate display memory for framebuffer window * @sfb: The base resources for the hardware. * @win: The window to initialise memory for. * * Allocate memory for the given framebuffer. */ static int __devinit s3c_fb_alloc_memory(struct s3c_fb *sfb, struct s3c_fb_win *win) { struct s3c_fb_pd_win *windata = win->windata; unsigned int real_size, virt_size, size; struct fb_info *fbi = win->fbinfo; dma_addr_t map_dma; dev_dbg(sfb->dev, "allocating memory for display\n"); real_size = windata->win_mode.xres * windata->win_mode.yres; virt_size = windata->virtual_x * windata->virtual_y; dev_dbg(sfb->dev, "real_size=%u (%u.%u), virt_size=%u (%u.%u)\n", real_size, windata->win_mode.xres, windata->win_mode.yres, virt_size, windata->virtual_x, windata->virtual_y); size = (real_size > virt_size) ? real_size : virt_size; size *= (windata->max_bpp > 16) ? 32 : windata->max_bpp; size /= 8; fbi->fix.smem_len = size; size = PAGE_ALIGN(size); dev_dbg(sfb->dev, "want %u bytes for window\n", size); fbi->screen_base = dma_alloc_writecombine(sfb->dev, size, &map_dma, GFP_KERNEL); if (!fbi->screen_base) return -ENOMEM; dev_dbg(sfb->dev, "mapped %x to %p\n", (unsigned int)map_dma, fbi->screen_base); memset(fbi->screen_base, 0x0, size); fbi->fix.smem_start = map_dma; return 0; } /** * s3c_fb_free_memory() - free the display memory for the given window * @sfb: The base resources for the hardware. * @win: The window to free the display memory for. * * Free the display memory allocated by s3c_fb_alloc_memory(). */ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win) { struct fb_info *fbi = win->fbinfo; if (fbi->screen_base) dma_free_writecombine(sfb->dev, PAGE_ALIGN(fbi->fix.smem_len), fbi->screen_base, fbi->fix.smem_start); } /** * s3c_fb_release_win() - release resources for a framebuffer window. * @win: The window to cleanup the resources for. * * Release the resources that where claimed for the hardware window, * such as the framebuffer instance and any memory claimed for it. */ static void s3c_fb_release_win(struct s3c_fb *sfb, struct s3c_fb_win *win) { u32 data; if (win->fbinfo) { if (sfb->variant.has_shadowcon) { data = readl(sfb->regs + SHADOWCON); data &= ~SHADOWCON_CHx_ENABLE(win->index); data &= ~SHADOWCON_CHx_LOCAL_ENABLE(win->index); writel(data, sfb->regs + SHADOWCON); } unregister_framebuffer(win->fbinfo); if (win->fbinfo->cmap.len) fb_dealloc_cmap(&win->fbinfo->cmap); s3c_fb_free_memory(sfb, win); framebuffer_release(win->fbinfo); } } /** * s3c_fb_probe_win() - register an hardware window * @sfb: The base resources for the hardware * @variant: The variant information for this window. * @res: Pointer to where to place the resultant window. * * Allocate and do the basic initialisation for one of the hardware's graphics * windows. */ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no, struct s3c_fb_win_variant *variant, struct s3c_fb_win **res) { struct fb_var_screeninfo *var; struct fb_videomode *initmode; struct s3c_fb_pd_win *windata; struct s3c_fb_win *win; struct fb_info *fbinfo; int palette_size; int ret; dev_dbg(sfb->dev, "probing window %d, variant %p\n", win_no, variant); init_waitqueue_head(&sfb->vsync_info.wait); palette_size = variant->palette_sz * 4; fbinfo = framebuffer_alloc(sizeof(struct s3c_fb_win) + palette_size * sizeof(u32), sfb->dev); if (!fbinfo) { dev_err(sfb->dev, "failed to allocate framebuffer\n"); return -ENOENT; } windata = sfb->pdata->win[win_no]; initmode = &windata->win_mode; WARN_ON(windata->max_bpp == 0); WARN_ON(windata->win_mode.xres == 0); WARN_ON(windata->win_mode.yres == 0); win = fbinfo->par; *res = win; var = &fbinfo->var; win->variant = *variant; win->fbinfo = fbinfo; win->parent = sfb; win->windata = windata; win->index = win_no; win->palette_buffer = (u32 *)(win + 1); ret = s3c_fb_alloc_memory(sfb, win); if (ret) { dev_err(sfb->dev, "failed to allocate display memory\n"); return ret; } /* setup the r/b/g positions for the window's palette */ if (win->variant.palette_16bpp) { /* Set RGB 5:6:5 as default */ win->palette.r.offset = 11; win->palette.r.length = 5; win->palette.g.offset = 5; win->palette.g.length = 6; win->palette.b.offset = 0; win->palette.b.length = 5; } else { /* Set 8bpp or 8bpp and 1bit alpha */ win->palette.r.offset = 16; win->palette.r.length = 8; win->palette.g.offset = 8; win->palette.g.length = 8; win->palette.b.offset = 0; win->palette.b.length = 8; } /* setup the initial video mode from the window */ fb_videomode_to_var(&fbinfo->var, initmode); fbinfo->fix.type = FB_TYPE_PACKED_PIXELS; fbinfo->fix.accel = FB_ACCEL_NONE; fbinfo->var.activate = FB_ACTIVATE_NOW; fbinfo->var.vmode = FB_VMODE_NONINTERLACED; fbinfo->var.bits_per_pixel = windata->default_bpp; fbinfo->fbops = &s3c_fb_ops; fbinfo->flags = FBINFO_FLAG_DEFAULT; fbinfo->pseudo_palette = &win->pseudo_palette; /* prepare to actually start the framebuffer */ ret = s3c_fb_check_var(&fbinfo->var, fbinfo); if (ret < 0) { dev_err(sfb->dev, "check_var failed on initial video params\n"); return ret; } /* create initial colour map */ ret = fb_alloc_cmap(&fbinfo->cmap, win->variant.palette_sz, 1); if (ret == 0) fb_set_cmap(&fbinfo->cmap, fbinfo); else dev_err(sfb->dev, "failed to allocate fb cmap\n"); s3c_fb_set_par(fbinfo); dev_dbg(sfb->dev, "about to register framebuffer\n"); /* run the check_var and set_par on our configuration. */ ret = register_framebuffer(fbinfo); if (ret < 0) { dev_err(sfb->dev, "failed to register framebuffer\n"); return ret; } dev_info(sfb->dev, "window %d: fb %s\n", win_no, fbinfo->fix.id); return 0; } /** * s3c_fb_clear_win() - clear hardware window registers. * @sfb: The base resources for the hardware. * @win: The window to process. * * Reset the specific window registers to a known state. */ static void s3c_fb_clear_win(struct s3c_fb *sfb, int win) { void __iomem *regs = sfb->regs; u32 reg; writel(0, regs + sfb->variant.wincon + (win * 4)); writel(0, regs + VIDOSD_A(win, sfb->variant)); writel(0, regs + VIDOSD_B(win, sfb->variant)); writel(0, regs + VIDOSD_C(win, sfb->variant)); reg = readl(regs + SHADOWCON); writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON); } static int __devinit s3c_fb_probe(struct platform_device *pdev) { const struct platform_device_id *platid; struct s3c_fb_driverdata *fbdrv; struct device *dev = &pdev->dev; struct s3c_fb_platdata *pd; struct s3c_fb *sfb; struct resource *res; int win; int ret = 0; u32 reg; platid = platform_get_device_id(pdev); fbdrv = (struct s3c_fb_driverdata *)platid->driver_data; if (fbdrv->variant.nr_windows > S3C_FB_MAX_WIN) { dev_err(dev, "too many windows, cannot attach\n"); return -EINVAL; } pd = pdev->dev.platform_data; if (!pd) { dev_err(dev, "no platform data specified\n"); return -EINVAL; } sfb = devm_kzalloc(dev, sizeof(struct s3c_fb), GFP_KERNEL); if (!sfb) { dev_err(dev, "no memory for framebuffers\n"); return -ENOMEM; } dev_dbg(dev, "allocate new framebuffer %p\n", sfb); sfb->dev = dev; sfb->pdata = pd; sfb->variant = fbdrv->variant; spin_lock_init(&sfb->slock); sfb->bus_clk = clk_get(dev, "lcd"); if (IS_ERR(sfb->bus_clk)) { dev_err(dev, "failed to get bus clock\n"); ret = PTR_ERR(sfb->bus_clk); goto err_sfb; } clk_enable(sfb->bus_clk); if (!sfb->variant.has_clksel) { sfb->lcd_clk = clk_get(dev, "sclk_fimd"); if (IS_ERR(sfb->lcd_clk)) { dev_err(dev, "failed to get lcd clock\n"); ret = PTR_ERR(sfb->lcd_clk); goto err_bus_clk; } clk_enable(sfb->lcd_clk); } pm_runtime_enable(sfb->dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "failed to find registers\n"); ret = -ENOENT; goto err_lcd_clk; } sfb->regs = devm_request_and_ioremap(dev, res); if (!sfb->regs) { dev_err(dev, "failed to map registers\n"); ret = -ENXIO; goto err_lcd_clk; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "failed to acquire irq resource\n"); ret = -ENOENT; goto err_lcd_clk; } sfb->irq_no = res->start; ret = devm_request_irq(dev, sfb->irq_no, s3c_fb_irq, 0, "s3c_fb", sfb); if (ret) { dev_err(dev, "irq request failed\n"); goto err_lcd_clk; } dev_dbg(dev, "got resources (regs %p), probing windows\n", sfb->regs); platform_set_drvdata(pdev, sfb); pm_runtime_get_sync(sfb->dev); /* setup gpio and output polarity controls */ pd->setup_gpio(); writel(pd->vidcon1, sfb->regs + VIDCON1); /* set video clock running at under-run */ if (sfb->variant.has_fixvclk) { reg = readl(sfb->regs + VIDCON1); reg &= ~VIDCON1_VCLK_MASK; reg |= VIDCON1_VCLK_RUN; writel(reg, sfb->regs + VIDCON1); } /* zero all windows before we do anything */ for (win = 0; win < fbdrv->variant.nr_windows; win++) s3c_fb_clear_win(sfb, win); /* initialise colour key controls */ for (win = 0; win < (fbdrv->variant.nr_windows - 1); win++) { void __iomem *regs = sfb->regs + sfb->variant.keycon; regs += (win * 8); writel(0xffffff, regs + WKEYCON0); writel(0xffffff, regs + WKEYCON1); } /* we have the register setup, start allocating framebuffers */ for (win = 0; win < fbdrv->variant.nr_windows; win++) { if (!pd->win[win]) continue; if (!pd->win[win]->win_mode.pixclock) s3c_fb_missing_pixclock(&pd->win[win]->win_mode); ret = s3c_fb_probe_win(sfb, win, fbdrv->win[win], &sfb->windows[win]); if (ret < 0) { dev_err(dev, "failed to create window %d\n", win); for (; win >= 0; win--) s3c_fb_release_win(sfb, sfb->windows[win]); goto err_pm_runtime; } } platform_set_drvdata(pdev, sfb); pm_runtime_put_sync(sfb->dev); return 0; err_pm_runtime: pm_runtime_put_sync(sfb->dev); err_lcd_clk: pm_runtime_disable(sfb->dev); if (!sfb->variant.has_clksel) { clk_disable(sfb->lcd_clk); clk_put(sfb->lcd_clk); } err_bus_clk: clk_disable(sfb->bus_clk); clk_put(sfb->bus_clk); err_sfb: return ret; } /** * s3c_fb_remove() - Cleanup on module finalisation * @pdev: The platform device we are bound to. * * Shutdown and then release all the resources that the driver allocated * on initialisation. */ static int __devexit s3c_fb_remove(struct platform_device *pdev) { struct s3c_fb *sfb = platform_get_drvdata(pdev); int win; pm_runtime_get_sync(sfb->dev); for (win = 0; win < S3C_FB_MAX_WIN; win++) if (sfb->windows[win]) s3c_fb_release_win(sfb, sfb->windows[win]); if (!sfb->variant.has_clksel) { clk_disable(sfb->lcd_clk); clk_put(sfb->lcd_clk); } clk_disable(sfb->bus_clk); clk_put(sfb->bus_clk); pm_runtime_put_sync(sfb->dev); pm_runtime_disable(sfb->dev); return 0; } #ifdef CONFIG_PM_SLEEP static int s3c_fb_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct s3c_fb *sfb = platform_get_drvdata(pdev); struct s3c_fb_win *win; int win_no; for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) { win = sfb->windows[win_no]; if (!win) continue; /* use the blank function to push into power-down */ s3c_fb_blank(FB_BLANK_POWERDOWN, win->fbinfo); } if (!sfb->variant.has_clksel) clk_disable(sfb->lcd_clk); clk_disable(sfb->bus_clk); return 0; } static int s3c_fb_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct s3c_fb *sfb = platform_get_drvdata(pdev); struct s3c_fb_platdata *pd = sfb->pdata; struct s3c_fb_win *win; int win_no; u32 reg; clk_enable(sfb->bus_clk); if (!sfb->variant.has_clksel) clk_enable(sfb->lcd_clk); /* setup gpio and output polarity controls */ pd->setup_gpio(); writel(pd->vidcon1, sfb->regs + VIDCON1); /* set video clock running at under-run */ if (sfb->variant.has_fixvclk) { reg = readl(sfb->regs + VIDCON1); reg &= ~VIDCON1_VCLK_MASK; reg |= VIDCON1_VCLK_RUN; writel(reg, sfb->regs + VIDCON1); } /* zero all windows before we do anything */ for (win_no = 0; win_no < sfb->variant.nr_windows; win_no++) s3c_fb_clear_win(sfb, win_no); for (win_no = 0; win_no < sfb->variant.nr_windows - 1; win_no++) { void __iomem *regs = sfb->regs + sfb->variant.keycon; win = sfb->windows[win_no]; if (!win) continue; shadow_protect_win(win, 1); regs += (win_no * 8); writel(0xffffff, regs + WKEYCON0); writel(0xffffff, regs + WKEYCON1); shadow_protect_win(win, 0); } /* restore framebuffers */ for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) { win = sfb->windows[win_no]; if (!win) continue; dev_dbg(&pdev->dev, "resuming window %d\n", win_no); s3c_fb_set_par(win->fbinfo); } return 0; } #endif #ifdef CONFIG_PM_RUNTIME static int s3c_fb_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct s3c_fb *sfb = platform_get_drvdata(pdev); if (!sfb->variant.has_clksel) clk_disable(sfb->lcd_clk); clk_disable(sfb->bus_clk); return 0; } static int s3c_fb_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct s3c_fb *sfb = platform_get_drvdata(pdev); struct s3c_fb_platdata *pd = sfb->pdata; clk_enable(sfb->bus_clk); if (!sfb->variant.has_clksel) clk_enable(sfb->lcd_clk); /* setup gpio and output polarity controls */ pd->setup_gpio(); writel(pd->vidcon1, sfb->regs + VIDCON1); return 0; } #endif #define VALID_BPP124 (VALID_BPP(1) | VALID_BPP(2) | VALID_BPP(4)) #define VALID_BPP1248 (VALID_BPP124 | VALID_BPP(8)) static struct s3c_fb_win_variant s3c_fb_data_64xx_wins[] = { [0] = { .has_osd_c = 1, .osd_size_off = 0x8, .palette_sz = 256, .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(24)), }, [1] = { .has_osd_c = 1, .has_osd_d = 1, .osd_size_off = 0xc, .has_osd_alpha = 1, .palette_sz = 256, .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(19) | VALID_BPP(24) | VALID_BPP(25) | VALID_BPP(28)), }, [2] = { .has_osd_c = 1, .has_osd_d = 1, .osd_size_off = 0xc, .has_osd_alpha = 1, .palette_sz = 16, .palette_16bpp = 1, .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(19) | VALID_BPP(24) | VALID_BPP(25) | VALID_BPP(28)), }, [3] = { .has_osd_c = 1, .has_osd_alpha = 1, .palette_sz = 16, .palette_16bpp = 1, .valid_bpp = (VALID_BPP124 | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(19) | VALID_BPP(24) | VALID_BPP(25) | VALID_BPP(28)), }, [4] = { .has_osd_c = 1, .has_osd_alpha = 1, .palette_sz = 4, .palette_16bpp = 1, .valid_bpp = (VALID_BPP(1) | VALID_BPP(2) | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(19) | VALID_BPP(24) | VALID_BPP(25) | VALID_BPP(28)), }, }; static struct s3c_fb_win_variant s3c_fb_data_s5p_wins[] = { [0] = { .has_osd_c = 1, .osd_size_off = 0x8, .palette_sz = 256, .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) | VALID_BPP(15) | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(19) | VALID_BPP(24) | VALID_BPP(25) | VALID_BPP(32)), }, [1] = { .has_osd_c = 1, .has_osd_d = 1, .osd_size_off = 0xc, .has_osd_alpha = 1, .palette_sz = 256, .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) | VALID_BPP(15) | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(19) | VALID_BPP(24) | VALID_BPP(25) | VALID_BPP(32)), }, [2] = { .has_osd_c = 1, .has_osd_d = 1, .osd_size_off = 0xc, .has_osd_alpha = 1, .palette_sz = 256, .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) | VALID_BPP(15) | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(19) | VALID_BPP(24) | VALID_BPP(25) | VALID_BPP(32)), }, [3] = { .has_osd_c = 1, .has_osd_alpha = 1, .palette_sz = 256, .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) | VALID_BPP(15) | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(19) | VALID_BPP(24) | VALID_BPP(25) | VALID_BPP(32)), }, [4] = { .has_osd_c = 1, .has_osd_alpha = 1, .palette_sz = 256, .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) | VALID_BPP(15) | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(19) | VALID_BPP(24) | VALID_BPP(25) | VALID_BPP(32)), }, }; static struct s3c_fb_driverdata s3c_fb_data_64xx = { .variant = { .nr_windows = 5, .vidtcon = VIDTCON0, .wincon = WINCON(0), .winmap = WINxMAP(0), .keycon = WKEYCON, .osd = VIDOSD_BASE, .osd_stride = 16, .buf_start = VIDW_BUF_START(0), .buf_size = VIDW_BUF_SIZE(0), .buf_end = VIDW_BUF_END(0), .palette = { [0] = 0x400, [1] = 0x800, [2] = 0x300, [3] = 0x320, [4] = 0x340, }, .has_prtcon = 1, .has_clksel = 1, }, .win[0] = &s3c_fb_data_64xx_wins[0], .win[1] = &s3c_fb_data_64xx_wins[1], .win[2] = &s3c_fb_data_64xx_wins[2], .win[3] = &s3c_fb_data_64xx_wins[3], .win[4] = &s3c_fb_data_64xx_wins[4], }; static struct s3c_fb_driverdata s3c_fb_data_s5pc100 = { .variant = { .nr_windows = 5, .vidtcon = VIDTCON0, .wincon = WINCON(0), .winmap = WINxMAP(0), .keycon = WKEYCON, .osd = VIDOSD_BASE, .osd_stride = 16, .buf_start = VIDW_BUF_START(0), .buf_size = VIDW_BUF_SIZE(0), .buf_end = VIDW_BUF_END(0), .palette = { [0] = 0x2400, [1] = 0x2800, [2] = 0x2c00, [3] = 0x3000, [4] = 0x3400, }, .has_prtcon = 1, .has_blendcon = 1, .has_clksel = 1, }, .win[0] = &s3c_fb_data_s5p_wins[0], .win[1] = &s3c_fb_data_s5p_wins[1], .win[2] = &s3c_fb_data_s5p_wins[2], .win[3] = &s3c_fb_data_s5p_wins[3], .win[4] = &s3c_fb_data_s5p_wins[4], }; static struct s3c_fb_driverdata s3c_fb_data_s5pv210 = { .variant = { .nr_windows = 5, .vidtcon = VIDTCON0, .wincon = WINCON(0), .winmap = WINxMAP(0), .keycon = WKEYCON, .osd = VIDOSD_BASE, .osd_stride = 16, .buf_start = VIDW_BUF_START(0), .buf_size = VIDW_BUF_SIZE(0), .buf_end = VIDW_BUF_END(0), .palette = { [0] = 0x2400, [1] = 0x2800, [2] = 0x2c00, [3] = 0x3000, [4] = 0x3400, }, .has_shadowcon = 1, .has_blendcon = 1, .has_clksel = 1, .has_fixvclk = 1, }, .win[0] = &s3c_fb_data_s5p_wins[0], .win[1] = &s3c_fb_data_s5p_wins[1], .win[2] = &s3c_fb_data_s5p_wins[2], .win[3] = &s3c_fb_data_s5p_wins[3], .win[4] = &s3c_fb_data_s5p_wins[4], }; static struct s3c_fb_driverdata s3c_fb_data_exynos4 = { .variant = { .nr_windows = 5, .vidtcon = VIDTCON0, .wincon = WINCON(0), .winmap = WINxMAP(0), .keycon = WKEYCON, .osd = VIDOSD_BASE, .osd_stride = 16, .buf_start = VIDW_BUF_START(0), .buf_size = VIDW_BUF_SIZE(0), .buf_end = VIDW_BUF_END(0), .palette = { [0] = 0x2400, [1] = 0x2800, [2] = 0x2c00, [3] = 0x3000, [4] = 0x3400, }, .has_shadowcon = 1, .has_blendcon = 1, .has_fixvclk = 1, }, .win[0] = &s3c_fb_data_s5p_wins[0], .win[1] = &s3c_fb_data_s5p_wins[1], .win[2] = &s3c_fb_data_s5p_wins[2], .win[3] = &s3c_fb_data_s5p_wins[3], .win[4] = &s3c_fb_data_s5p_wins[4], }; static struct s3c_fb_driverdata s3c_fb_data_exynos5 = { .variant = { .nr_windows = 5, .vidtcon = VIDTCON0, .wincon = WINCON(0), .winmap = WINxMAP(0), .keycon = WKEYCON, .osd = VIDOSD_BASE, .osd_stride = 16, .buf_start = VIDW_BUF_START(0), .buf_size = VIDW_BUF_SIZE(0), .buf_end = VIDW_BUF_END(0), .palette = { [0] = 0x2400, [1] = 0x2800, [2] = 0x2c00, [3] = 0x3000, [4] = 0x3400, }, .has_shadowcon = 1, .has_blendcon = 1, .has_fixvclk = 1, }, .win[0] = &s3c_fb_data_s5p_wins[0], .win[1] = &s3c_fb_data_s5p_wins[1], .win[2] = &s3c_fb_data_s5p_wins[2], .win[3] = &s3c_fb_data_s5p_wins[3], .win[4] = &s3c_fb_data_s5p_wins[4], }; /* S3C2443/S3C2416 style hardware */ static struct s3c_fb_driverdata s3c_fb_data_s3c2443 = { .variant = { .nr_windows = 2, .is_2443 = 1, .vidtcon = 0x08, .wincon = 0x14, .winmap = 0xd0, .keycon = 0xb0, .osd = 0x28, .osd_stride = 12, .buf_start = 0x64, .buf_size = 0x94, .buf_end = 0x7c, .palette = { [0] = 0x400, [1] = 0x800, }, .has_clksel = 1, }, .win[0] = &(struct s3c_fb_win_variant) { .palette_sz = 256, .valid_bpp = VALID_BPP1248 | VALID_BPP(16) | VALID_BPP(24), }, .win[1] = &(struct s3c_fb_win_variant) { .has_osd_c = 1, .has_osd_alpha = 1, .palette_sz = 256, .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(19) | VALID_BPP(24) | VALID_BPP(25) | VALID_BPP(28)), }, }; static struct s3c_fb_driverdata s3c_fb_data_s5p64x0 = { .variant = { .nr_windows = 3, .vidtcon = VIDTCON0, .wincon = WINCON(0), .winmap = WINxMAP(0), .keycon = WKEYCON, .osd = VIDOSD_BASE, .osd_stride = 16, .buf_start = VIDW_BUF_START(0), .buf_size = VIDW_BUF_SIZE(0), .buf_end = VIDW_BUF_END(0), .palette = { [0] = 0x2400, [1] = 0x2800, [2] = 0x2c00, }, .has_blendcon = 1, .has_fixvclk = 1, }, .win[0] = &s3c_fb_data_s5p_wins[0], .win[1] = &s3c_fb_data_s5p_wins[1], .win[2] = &s3c_fb_data_s5p_wins[2], }; static struct platform_device_id s3c_fb_driver_ids[] = { { .name = "s3c-fb", .driver_data = (unsigned long)&s3c_fb_data_64xx, }, { .name = "s5pc100-fb", .driver_data = (unsigned long)&s3c_fb_data_s5pc100, }, { .name = "s5pv210-fb", .driver_data = (unsigned long)&s3c_fb_data_s5pv210, }, { .name = "exynos4-fb", .driver_data = (unsigned long)&s3c_fb_data_exynos4, }, { .name = "exynos5-fb", .driver_data = (unsigned long)&s3c_fb_data_exynos5, }, { .name = "s3c2443-fb", .driver_data = (unsigned long)&s3c_fb_data_s3c2443, }, { .name = "s5p64x0-fb", .driver_data = (unsigned long)&s3c_fb_data_s5p64x0, }, {}, }; MODULE_DEVICE_TABLE(platform, s3c_fb_driver_ids); static const struct dev_pm_ops s3cfb_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(s3c_fb_suspend, s3c_fb_resume) SET_RUNTIME_PM_OPS(s3c_fb_runtime_suspend, s3c_fb_runtime_resume, NULL) }; static struct platform_driver s3c_fb_driver = { .probe = s3c_fb_probe, .remove = __devexit_p(s3c_fb_remove), .id_table = s3c_fb_driver_ids, .driver = { .name = "s3c-fb", .owner = THIS_MODULE, .pm = &s3cfb_pm_ops, }, }; module_platform_driver(s3c_fb_driver); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("Samsung S3C SoC Framebuffer driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:s3c-fb");
gpl-2.0
GalaxyTab4/android_kernel_samsung_matissewifi
drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
4899
45619
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> ******************************************************************************/ #include "rtl_core.h" #include "r8192E_hw.h" #include "r8192E_phyreg.h" #include "r8190P_rtl8256.h" #include "r8192E_phy.h" #include "rtl_dm.h" #include "r8192E_hwimg.h" static u32 RF_CHANNEL_TABLE_ZEBRA[] = { 0, 0x085c, 0x08dc, 0x095c, 0x09dc, 0x0a5c, 0x0adc, 0x0b5c, 0x0bdc, 0x0c5c, 0x0cdc, 0x0d5c, 0x0ddc, 0x0e5c, 0x0f72, }; /*************************Define local function prototype**********************/ static u32 phy_FwRFSerialRead(struct net_device *dev, enum rf90_radio_path eRFPath, u32 Offset); static void phy_FwRFSerialWrite(struct net_device *dev, enum rf90_radio_path eRFPath, u32 Offset, u32 Data); static u32 rtl8192_CalculateBitShift(u32 dwBitMask) { u32 i; for (i = 0; i <= 31; i++) { if (((dwBitMask >> i) & 0x1) == 1) break; } return i; } u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath) { u8 ret = 1; struct r8192_priv *priv = rtllib_priv(dev); if (priv->rf_type == RF_2T4R) ret = 0; else if (priv->rf_type == RF_1T2R) { if (eRFPath == RF90_PATH_A || eRFPath == RF90_PATH_B) ret = 1; else if (eRFPath == RF90_PATH_C || eRFPath == RF90_PATH_D) ret = 0; } return ret; } void rtl8192_setBBreg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask, u32 dwData) { u32 OriginalValue, BitShift, NewValue; if (dwBitMask != bMaskDWord) { OriginalValue = read_nic_dword(dev, dwRegAddr); BitShift = rtl8192_CalculateBitShift(dwBitMask); NewValue = (((OriginalValue) & (~dwBitMask)) | (dwData << BitShift)); write_nic_dword(dev, dwRegAddr, NewValue); } else write_nic_dword(dev, dwRegAddr, dwData); return; } u32 rtl8192_QueryBBReg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask) { u32 Ret = 0, OriginalValue, BitShift; OriginalValue = read_nic_dword(dev, dwRegAddr); BitShift = rtl8192_CalculateBitShift(dwBitMask); Ret = (OriginalValue & dwBitMask) >> BitShift; return Ret; } static u32 rtl8192_phy_RFSerialRead(struct net_device *dev, enum rf90_radio_path eRFPath, u32 Offset) { struct r8192_priv *priv = rtllib_priv(dev); u32 ret = 0; u32 NewOffset = 0; struct bb_reg_definition *pPhyReg = &priv->PHYRegDef[eRFPath]; Offset &= 0x3f; if (priv->rf_chip == RF_8256) { rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0); if (Offset >= 31) { priv->RfReg0Value[eRFPath] |= 0x140; rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16)); NewOffset = Offset - 30; } else if (Offset >= 16) { priv->RfReg0Value[eRFPath] |= 0x100; priv->RfReg0Value[eRFPath] &= (~0x40); rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16)); NewOffset = Offset - 15; } else NewOffset = Offset; } else { RT_TRACE((COMP_PHY|COMP_ERR), "check RF type here, need" " to be 8256\n"); NewOffset = Offset; } rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress, NewOffset); rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x0); rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x1); mdelay(1); ret = rtl8192_QueryBBReg(dev, pPhyReg->rfLSSIReadBack, bLSSIReadBackData); if (priv->rf_chip == RF_8256) { priv->RfReg0Value[eRFPath] &= 0xebf; rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3); } return ret; } static void rtl8192_phy_RFSerialWrite(struct net_device *dev, enum rf90_radio_path eRFPath, u32 Offset, u32 Data) { struct r8192_priv *priv = rtllib_priv(dev); u32 DataAndAddr = 0, NewOffset = 0; struct bb_reg_definition *pPhyReg = &priv->PHYRegDef[eRFPath]; Offset &= 0x3f; if (priv->rf_chip == RF_8256) { rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0); if (Offset >= 31) { priv->RfReg0Value[eRFPath] |= 0x140; rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); NewOffset = Offset - 30; } else if (Offset >= 16) { priv->RfReg0Value[eRFPath] |= 0x100; priv->RfReg0Value[eRFPath] &= (~0x40); rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); NewOffset = Offset - 15; } else NewOffset = Offset; } else { RT_TRACE((COMP_PHY|COMP_ERR), "check RF type here, need to be" " 8256\n"); NewOffset = Offset; } DataAndAddr = (Data<<16) | (NewOffset&0x3f); rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr); if (Offset == 0x0) priv->RfReg0Value[eRFPath] = Data; if (priv->rf_chip == RF_8256) { if (Offset != 0) { priv->RfReg0Value[eRFPath] &= 0xebf; rtl8192_setBBreg( dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); } rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3); } return; } void rtl8192_phy_SetRFReg(struct net_device *dev, enum rf90_radio_path eRFPath, u32 RegAddr, u32 BitMask, u32 Data) { struct r8192_priv *priv = rtllib_priv(dev); u32 Original_Value, BitShift, New_Value; if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) return; if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter) return; RT_TRACE(COMP_PHY, "FW RF CTRL is not ready now\n"); if (priv->Rf_Mode == RF_OP_By_FW) { if (BitMask != bMask12Bits) { Original_Value = phy_FwRFSerialRead(dev, eRFPath, RegAddr); BitShift = rtl8192_CalculateBitShift(BitMask); New_Value = (((Original_Value) & (~BitMask)) | (Data << BitShift)); phy_FwRFSerialWrite(dev, eRFPath, RegAddr, New_Value); } else phy_FwRFSerialWrite(dev, eRFPath, RegAddr, Data); udelay(200); } else { if (BitMask != bMask12Bits) { Original_Value = rtl8192_phy_RFSerialRead(dev, eRFPath, RegAddr); BitShift = rtl8192_CalculateBitShift(BitMask); New_Value = (((Original_Value) & (~BitMask)) | (Data << BitShift)); rtl8192_phy_RFSerialWrite(dev, eRFPath, RegAddr, New_Value); } else rtl8192_phy_RFSerialWrite(dev, eRFPath, RegAddr, Data); } return; } u32 rtl8192_phy_QueryRFReg(struct net_device *dev, enum rf90_radio_path eRFPath, u32 RegAddr, u32 BitMask) { u32 Original_Value, Readback_Value, BitShift; struct r8192_priv *priv = rtllib_priv(dev); if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) return 0; if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter) return 0; down(&priv->rf_sem); if (priv->Rf_Mode == RF_OP_By_FW) { Original_Value = phy_FwRFSerialRead(dev, eRFPath, RegAddr); udelay(200); } else { Original_Value = rtl8192_phy_RFSerialRead(dev, eRFPath, RegAddr); } BitShift = rtl8192_CalculateBitShift(BitMask); Readback_Value = (Original_Value & BitMask) >> BitShift; up(&priv->rf_sem); return Readback_Value; } static u32 phy_FwRFSerialRead(struct net_device *dev, enum rf90_radio_path eRFPath, u32 Offset) { u32 retValue = 0; u32 Data = 0; u8 time = 0; Data |= ((Offset & 0xFF) << 12); Data |= ((eRFPath & 0x3) << 20); Data |= 0x80000000; while (read_nic_dword(dev, QPNR)&0x80000000) { if (time++ < 100) udelay(10); else break; } write_nic_dword(dev, QPNR, Data); while (read_nic_dword(dev, QPNR) & 0x80000000) { if (time++ < 100) udelay(10); else return 0; } retValue = read_nic_dword(dev, RF_DATA); return retValue; } /* phy_FwRFSerialRead */ static void phy_FwRFSerialWrite(struct net_device *dev, enum rf90_radio_path eRFPath, u32 Offset, u32 Data) { u8 time = 0; Data |= ((Offset & 0xFF) << 12); Data |= ((eRFPath & 0x3) << 20); Data |= 0x400000; Data |= 0x80000000; while (read_nic_dword(dev, QPNR) & 0x80000000) { if (time++ < 100) udelay(10); else break; } write_nic_dword(dev, QPNR, Data); } /* phy_FwRFSerialWrite */ void rtl8192_phy_configmac(struct net_device *dev) { u32 dwArrayLen = 0, i = 0; u32 *pdwArray = NULL; struct r8192_priv *priv = rtllib_priv(dev); if (priv->bTXPowerDataReadFromEEPORM) { RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array_PG\n"); dwArrayLen = MACPHY_Array_PGLength; pdwArray = Rtl819XMACPHY_Array_PG; } else { RT_TRACE(COMP_PHY, "Read rtl819XMACPHY_Array\n"); dwArrayLen = MACPHY_ArrayLength; pdwArray = Rtl819XMACPHY_Array; } for (i = 0; i < dwArrayLen; i += 3) { RT_TRACE(COMP_DBG, "The Rtl8190MACPHY_Array[0] is %x Rtl8190MAC" "PHY_Array[1] is %x Rtl8190MACPHY_Array[2] is %x\n", pdwArray[i], pdwArray[i+1], pdwArray[i+2]); if (pdwArray[i] == 0x318) pdwArray[i+2] = 0x00000800; rtl8192_setBBreg(dev, pdwArray[i], pdwArray[i+1], pdwArray[i+2]); } return; } void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType) { int i; u32 *Rtl819XPHY_REGArray_Table = NULL; u32 *Rtl819XAGCTAB_Array_Table = NULL; u16 AGCTAB_ArrayLen, PHY_REGArrayLen = 0; struct r8192_priv *priv = rtllib_priv(dev); AGCTAB_ArrayLen = AGCTAB_ArrayLength; Rtl819XAGCTAB_Array_Table = Rtl819XAGCTAB_Array; if (priv->rf_type == RF_2T4R) { PHY_REGArrayLen = PHY_REGArrayLength; Rtl819XPHY_REGArray_Table = Rtl819XPHY_REGArray; } else if (priv->rf_type == RF_1T2R) { PHY_REGArrayLen = PHY_REG_1T2RArrayLength; Rtl819XPHY_REGArray_Table = Rtl819XPHY_REG_1T2RArray; } if (ConfigType == BaseBand_Config_PHY_REG) { for (i = 0; i < PHY_REGArrayLen; i += 2) { rtl8192_setBBreg(dev, Rtl819XPHY_REGArray_Table[i], bMaskDWord, Rtl819XPHY_REGArray_Table[i+1]); RT_TRACE(COMP_DBG, "i: %x, The Rtl819xUsbPHY_REGArray" "[0] is %x Rtl819xUsbPHY_REGArray[1] is %x\n", i, Rtl819XPHY_REGArray_Table[i], Rtl819XPHY_REGArray_Table[i+1]); } } else if (ConfigType == BaseBand_Config_AGC_TAB) { for (i = 0; i < AGCTAB_ArrayLen; i += 2) { rtl8192_setBBreg(dev, Rtl819XAGCTAB_Array_Table[i], bMaskDWord, Rtl819XAGCTAB_Array_Table[i+1]); RT_TRACE(COMP_DBG, "i:%x, The rtl819XAGCTAB_Array[0] " "is %x rtl819XAGCTAB_Array[1] is %x\n", i, Rtl819XAGCTAB_Array_Table[i], Rtl819XAGCTAB_Array_Table[i+1]); } } return; } static void rtl8192_InitBBRFRegDef(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); priv->PHYRegDef[RF90_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW; priv->PHYRegDef[RF90_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW; priv->PHYRegDef[RF90_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW; priv->PHYRegDef[RF90_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW; priv->PHYRegDef[RF90_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB; priv->PHYRegDef[RF90_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB; priv->PHYRegDef[RF90_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB; priv->PHYRegDef[RF90_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB; priv->PHYRegDef[RF90_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_C].rfintfo = rFPGA0_XC_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_D].rfintfo = rFPGA0_XD_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_C].rfintfe = rFPGA0_XC_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_D].rfintfe = rFPGA0_XD_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter; priv->PHYRegDef[RF90_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter; priv->PHYRegDef[RF90_PATH_C].rf3wireOffset = rFPGA0_XC_LSSIParameter; priv->PHYRegDef[RF90_PATH_D].rf3wireOffset = rFPGA0_XD_LSSIParameter; priv->PHYRegDef[RF90_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter; priv->PHYRegDef[RF90_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter; priv->PHYRegDef[RF90_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter; priv->PHYRegDef[RF90_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter; priv->PHYRegDef[RF90_PATH_A].rfTxGainStage = rFPGA0_TxGainStage; priv->PHYRegDef[RF90_PATH_B].rfTxGainStage = rFPGA0_TxGainStage; priv->PHYRegDef[RF90_PATH_C].rfTxGainStage = rFPGA0_TxGainStage; priv->PHYRegDef[RF90_PATH_D].rfTxGainStage = rFPGA0_TxGainStage; priv->PHYRegDef[RF90_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; priv->PHYRegDef[RF90_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1; priv->PHYRegDef[RF90_PATH_C].rfHSSIPara1 = rFPGA0_XC_HSSIParameter1; priv->PHYRegDef[RF90_PATH_D].rfHSSIPara1 = rFPGA0_XD_HSSIParameter1; priv->PHYRegDef[RF90_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2; priv->PHYRegDef[RF90_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2; priv->PHYRegDef[RF90_PATH_C].rfHSSIPara2 = rFPGA0_XC_HSSIParameter2; priv->PHYRegDef[RF90_PATH_D].rfHSSIPara2 = rFPGA0_XD_HSSIParameter2; priv->PHYRegDef[RF90_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl; priv->PHYRegDef[RF90_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl; priv->PHYRegDef[RF90_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl; priv->PHYRegDef[RF90_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl; priv->PHYRegDef[RF90_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1; priv->PHYRegDef[RF90_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1; priv->PHYRegDef[RF90_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1; priv->PHYRegDef[RF90_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1; priv->PHYRegDef[RF90_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2; priv->PHYRegDef[RF90_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2; priv->PHYRegDef[RF90_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2; priv->PHYRegDef[RF90_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2; priv->PHYRegDef[RF90_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance; priv->PHYRegDef[RF90_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance; priv->PHYRegDef[RF90_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance; priv->PHYRegDef[RF90_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance; priv->PHYRegDef[RF90_PATH_A].rfRxAFE = rOFDM0_XARxAFE; priv->PHYRegDef[RF90_PATH_B].rfRxAFE = rOFDM0_XBRxAFE; priv->PHYRegDef[RF90_PATH_C].rfRxAFE = rOFDM0_XCRxAFE; priv->PHYRegDef[RF90_PATH_D].rfRxAFE = rOFDM0_XDRxAFE; priv->PHYRegDef[RF90_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance; priv->PHYRegDef[RF90_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance; priv->PHYRegDef[RF90_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance; priv->PHYRegDef[RF90_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance; priv->PHYRegDef[RF90_PATH_A].rfTxAFE = rOFDM0_XATxAFE; priv->PHYRegDef[RF90_PATH_B].rfTxAFE = rOFDM0_XBTxAFE; priv->PHYRegDef[RF90_PATH_C].rfTxAFE = rOFDM0_XCTxAFE; priv->PHYRegDef[RF90_PATH_D].rfTxAFE = rOFDM0_XDTxAFE; priv->PHYRegDef[RF90_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack; priv->PHYRegDef[RF90_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack; priv->PHYRegDef[RF90_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack; priv->PHYRegDef[RF90_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack; } bool rtl8192_phy_checkBBAndRF(struct net_device *dev, enum hw90_block CheckBlock, enum rf90_radio_path eRFPath) { bool ret = true; u32 i, CheckTimes = 4, dwRegRead = 0; u32 WriteAddr[4]; u32 WriteData[] = {0xfffff027, 0xaa55a02f, 0x00000027, 0x55aa502f}; WriteAddr[HW90_BLOCK_MAC] = 0x100; WriteAddr[HW90_BLOCK_PHY0] = 0x900; WriteAddr[HW90_BLOCK_PHY1] = 0x800; WriteAddr[HW90_BLOCK_RF] = 0x3; RT_TRACE(COMP_PHY, "=======>%s(), CheckBlock:%d\n", __func__, CheckBlock); for (i = 0; i < CheckTimes; i++) { switch (CheckBlock) { case HW90_BLOCK_MAC: RT_TRACE(COMP_ERR, "PHY_CheckBBRFOK(): Never Write " "0x100 here!"); break; case HW90_BLOCK_PHY0: case HW90_BLOCK_PHY1: write_nic_dword(dev, WriteAddr[CheckBlock], WriteData[i]); dwRegRead = read_nic_dword(dev, WriteAddr[CheckBlock]); break; case HW90_BLOCK_RF: WriteData[i] &= 0xfff; rtl8192_phy_SetRFReg(dev, eRFPath, WriteAddr[HW90_BLOCK_RF], bMask12Bits, WriteData[i]); mdelay(10); dwRegRead = rtl8192_phy_QueryRFReg(dev, eRFPath, WriteAddr[HW90_BLOCK_RF], bMaskDWord); mdelay(10); break; default: ret = false; break; } if (dwRegRead != WriteData[i]) { RT_TRACE(COMP_ERR, "====>error=====dwRegRead: %x, " "WriteData: %x\n", dwRegRead, WriteData[i]); ret = false; break; } } return ret; } static bool rtl8192_BB_Config_ParaFile(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); bool rtStatus = true; u8 bRegValue = 0, eCheckItem = 0; u32 dwRegValue = 0; bRegValue = read_nic_byte(dev, BB_GLOBAL_RESET); write_nic_byte(dev, BB_GLOBAL_RESET, (bRegValue|BB_GLOBAL_RESET_BIT)); dwRegValue = read_nic_dword(dev, CPU_GEN); write_nic_dword(dev, CPU_GEN, (dwRegValue&(~CPU_GEN_BB_RST))); for (eCheckItem = (enum hw90_block)HW90_BLOCK_PHY0; eCheckItem <= HW90_BLOCK_PHY1; eCheckItem++) { rtStatus = rtl8192_phy_checkBBAndRF(dev, (enum hw90_block)eCheckItem, (enum rf90_radio_path)0); if (rtStatus != true) { RT_TRACE((COMP_ERR | COMP_PHY), "PHY_RF8256_Config():" "Check PHY%d Fail!!\n", eCheckItem-1); return rtStatus; } } rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0); rtl8192_phyConfigBB(dev, BaseBand_Config_PHY_REG); dwRegValue = read_nic_dword(dev, CPU_GEN); write_nic_dword(dev, CPU_GEN, (dwRegValue|CPU_GEN_BB_RST)); rtl8192_phyConfigBB(dev, BaseBand_Config_AGC_TAB); if (priv->IC_Cut > VERSION_8190_BD) { if (priv->rf_type == RF_2T4R) dwRegValue = (priv->AntennaTxPwDiff[2]<<8 | priv->AntennaTxPwDiff[1]<<4 | priv->AntennaTxPwDiff[0]); else dwRegValue = 0x0; rtl8192_setBBreg(dev, rFPGA0_TxGainStage, (bXBTxAGC|bXCTxAGC|bXDTxAGC), dwRegValue); dwRegValue = priv->CrystalCap; rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, bXtalCap92x, dwRegValue); } return rtStatus; } bool rtl8192_BBConfig(struct net_device *dev) { bool rtStatus = true; rtl8192_InitBBRFRegDef(dev); rtStatus = rtl8192_BB_Config_ParaFile(dev); return rtStatus; } void rtl8192_phy_getTxPower(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); priv->MCSTxPowerLevelOriginalOffset[0] = read_nic_dword(dev, rTxAGC_Rate18_06); priv->MCSTxPowerLevelOriginalOffset[1] = read_nic_dword(dev, rTxAGC_Rate54_24); priv->MCSTxPowerLevelOriginalOffset[2] = read_nic_dword(dev, rTxAGC_Mcs03_Mcs00); priv->MCSTxPowerLevelOriginalOffset[3] = read_nic_dword(dev, rTxAGC_Mcs07_Mcs04); priv->MCSTxPowerLevelOriginalOffset[4] = read_nic_dword(dev, rTxAGC_Mcs11_Mcs08); priv->MCSTxPowerLevelOriginalOffset[5] = read_nic_dword(dev, rTxAGC_Mcs15_Mcs12); priv->DefaultInitialGain[0] = read_nic_byte(dev, rOFDM0_XAAGCCore1); priv->DefaultInitialGain[1] = read_nic_byte(dev, rOFDM0_XBAGCCore1); priv->DefaultInitialGain[2] = read_nic_byte(dev, rOFDM0_XCAGCCore1); priv->DefaultInitialGain[3] = read_nic_byte(dev, rOFDM0_XDAGCCore1); RT_TRACE(COMP_INIT, "Default initial gain (c50=0x%x, c58=0x%x, " "c60=0x%x, c68=0x%x)\n", priv->DefaultInitialGain[0], priv->DefaultInitialGain[1], priv->DefaultInitialGain[2], priv->DefaultInitialGain[3]); priv->framesync = read_nic_byte(dev, rOFDM0_RxDetector3); priv->framesyncC34 = read_nic_dword(dev, rOFDM0_RxDetector2); RT_TRACE(COMP_INIT, "Default framesync (0x%x) = 0x%x\n", rOFDM0_RxDetector3, priv->framesync); priv->SifsTime = read_nic_word(dev, SIFS); return; } void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel) { struct r8192_priv *priv = rtllib_priv(dev); u8 powerlevel = 0, powerlevelOFDM24G = 0; char ant_pwr_diff; u32 u4RegValue; if (priv->epromtype == EEPROM_93C46) { powerlevel = priv->TxPowerLevelCCK[channel-1]; powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1]; } else if (priv->epromtype == EEPROM_93C56) { if (priv->rf_type == RF_1T2R) { powerlevel = priv->TxPowerLevelCCK_C[channel-1]; powerlevelOFDM24G = priv->TxPowerLevelOFDM24G_C[channel-1]; } else if (priv->rf_type == RF_2T4R) { powerlevel = priv->TxPowerLevelCCK_A[channel-1]; powerlevelOFDM24G = priv->TxPowerLevelOFDM24G_A[channel-1]; ant_pwr_diff = priv->TxPowerLevelOFDM24G_C[channel-1] - priv->TxPowerLevelOFDM24G_A[channel-1]; priv->RF_C_TxPwDiff = ant_pwr_diff; ant_pwr_diff &= 0xf; priv->AntennaTxPwDiff[2] = 0; priv->AntennaTxPwDiff[1] = (u8)(ant_pwr_diff); priv->AntennaTxPwDiff[0] = 0; u4RegValue = (priv->AntennaTxPwDiff[2]<<8 | priv->AntennaTxPwDiff[1]<<4 | priv->AntennaTxPwDiff[0]); rtl8192_setBBreg(dev, rFPGA0_TxGainStage, (bXBTxAGC|bXCTxAGC|bXDTxAGC), u4RegValue); } } switch (priv->rf_chip) { case RF_8225: break; case RF_8256: PHY_SetRF8256CCKTxPower(dev, powerlevel); PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G); break; case RF_8258: break; default: RT_TRACE(COMP_ERR, "unknown rf chip in funtion %s()\n", __func__); break; } return; } bool rtl8192_phy_RFConfig(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); bool rtStatus = true; switch (priv->rf_chip) { case RF_8225: break; case RF_8256: rtStatus = PHY_RF8256_Config(dev); break; case RF_8258: break; case RF_PSEUDO_11N: break; default: RT_TRACE(COMP_ERR, "error chip id\n"); break; } return rtStatus; } void rtl8192_phy_updateInitGain(struct net_device *dev) { return; } u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev, enum rf90_radio_path eRFPath) { int i; u8 ret = 0; switch (eRFPath) { case RF90_PATH_A: for (i = 0; i < RadioA_ArrayLength; i += 2) { if (Rtl819XRadioA_Array[i] == 0xfe) { msleep(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, Rtl819XRadioA_Array[i], bMask12Bits, Rtl819XRadioA_Array[i+1]); } break; case RF90_PATH_B: for (i = 0; i < RadioB_ArrayLength; i += 2) { if (Rtl819XRadioB_Array[i] == 0xfe) { msleep(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, Rtl819XRadioB_Array[i], bMask12Bits, Rtl819XRadioB_Array[i+1]); } break; case RF90_PATH_C: for (i = 0; i < RadioC_ArrayLength; i += 2) { if (Rtl819XRadioC_Array[i] == 0xfe) { msleep(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, Rtl819XRadioC_Array[i], bMask12Bits, Rtl819XRadioC_Array[i+1]); } break; case RF90_PATH_D: for (i = 0; i < RadioD_ArrayLength; i += 2) { if (Rtl819XRadioD_Array[i] == 0xfe) { msleep(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, Rtl819XRadioD_Array[i], bMask12Bits, Rtl819XRadioD_Array[i+1]); } break; default: break; } return ret; } static void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel) { struct r8192_priv *priv = rtllib_priv(dev); u8 powerlevel = priv->TxPowerLevelCCK[channel-1]; u8 powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1]; switch (priv->rf_chip) { case RF_8225: break; case RF_8256: PHY_SetRF8256CCKTxPower(dev, powerlevel); PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G); break; case RF_8258: break; default: RT_TRACE(COMP_ERR, "unknown rf chip ID in rtl8192_SetTxPower" "Level()\n"); break; } return; } static u8 rtl8192_phy_SetSwChnlCmdArray(struct sw_chnl_cmd *CmdTable, u32 CmdTableIdx, u32 CmdTableSz, enum sw_chnl_cmd_id CmdID, u32 Para1, u32 Para2, u32 msDelay) { struct sw_chnl_cmd *pCmd; if (CmdTable == NULL) { RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): CmdTable cannot " "be NULL.\n"); return false; } if (CmdTableIdx >= CmdTableSz) { RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): Access invalid" " index, please check size of the table, CmdTableIdx:" "%d, CmdTableSz:%d\n", CmdTableIdx, CmdTableSz); return false; } pCmd = CmdTable + CmdTableIdx; pCmd->CmdID = CmdID; pCmd->Para1 = Para1; pCmd->Para2 = Para2; pCmd->msDelay = msDelay; return true; } static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel, u8 *stage, u8 *step, u32 *delay) { struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; u32 PreCommonCmdCnt; u32 PostCommonCmdCnt; u32 RfDependCmdCnt; struct sw_chnl_cmd *CurrentCmd = NULL; u8 eRFPath; RT_TRACE(COMP_TRACE, "====>%s()====stage:%d, step:%d, channel:%d\n", __func__, *stage, *step, channel); if (!rtllib_legal_channel(priv->rtllib, channel)) { RT_TRACE(COMP_ERR, "=============>set to illegal channel:%d\n", channel); return true; } { PreCommonCmdCnt = 0; rtl8192_phy_SetSwChnlCmdArray(ieee->PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT, CmdID_SetTxPowerLevel, 0, 0, 0); rtl8192_phy_SetSwChnlCmdArray(ieee->PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT, CmdID_End, 0, 0, 0); PostCommonCmdCnt = 0; rtl8192_phy_SetSwChnlCmdArray(ieee->PostCommonCmd, PostCommonCmdCnt++, MAX_POSTCMD_CNT, CmdID_End, 0, 0, 0); RfDependCmdCnt = 0; switch (priv->rf_chip) { case RF_8225: if (!(channel >= 1 && channel <= 14)) { RT_TRACE(COMP_ERR, "illegal channel for Zebra " "8225: %d\n", channel); return false; } rtl8192_phy_SetSwChnlCmdArray(ieee->RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_RF_WriteReg, rZebra1_Channel, RF_CHANNEL_TABLE_ZEBRA[channel], 10); rtl8192_phy_SetSwChnlCmdArray(ieee->RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_End, 0, 0, 0); break; case RF_8256: if (!(channel >= 1 && channel <= 14)) { RT_TRACE(COMP_ERR, "illegal channel for Zebra" " 8256: %d\n", channel); return false; } rtl8192_phy_SetSwChnlCmdArray(ieee->RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_RF_WriteReg, rZebra1_Channel, channel, 10); rtl8192_phy_SetSwChnlCmdArray(ieee->RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_End, 0, 0, 0); break; case RF_8258: break; default: RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip); return false; break; } do { switch (*stage) { case 0: CurrentCmd = &ieee->PreCommonCmd[*step]; break; case 1: CurrentCmd = &ieee->RfDependCmd[*step]; break; case 2: CurrentCmd = &ieee->PostCommonCmd[*step]; break; } if (CurrentCmd && CurrentCmd->CmdID == CmdID_End) { if ((*stage) == 2) { return true; } else { (*stage)++; (*step) = 0; continue; } } if (!CurrentCmd) continue; switch (CurrentCmd->CmdID) { case CmdID_SetTxPowerLevel: if (priv->IC_Cut > (u8)VERSION_8190_BD) rtl8192_SetTxPowerLevel(dev, channel); break; case CmdID_WritePortUlong: write_nic_dword(dev, CurrentCmd->Para1, CurrentCmd->Para2); break; case CmdID_WritePortUshort: write_nic_word(dev, CurrentCmd->Para1, (u16)CurrentCmd->Para2); break; case CmdID_WritePortUchar: write_nic_byte(dev, CurrentCmd->Para1, (u8)CurrentCmd->Para2); break; case CmdID_RF_WriteReg: for (eRFPath = 0; eRFPath < priv->NumTotalRFPath; eRFPath++) rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path)eRFPath, CurrentCmd->Para1, bMask12Bits, CurrentCmd->Para2<<7); break; default: break; } break; } while (true); } /*for (Number of RF paths)*/ (*delay) = CurrentCmd->msDelay; (*step)++; return false; } static void rtl8192_phy_FinishSwChnlNow(struct net_device *dev, u8 channel) { struct r8192_priv *priv = rtllib_priv(dev); u32 delay = 0; while (!rtl8192_phy_SwChnlStepByStep(dev, channel, &priv->SwChnlStage, &priv->SwChnlStep, &delay)) { if (delay > 0) msleep(delay); if (IS_NIC_DOWN(priv)) break; } } void rtl8192_SwChnl_WorkItem(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); RT_TRACE(COMP_TRACE, "==> SwChnlCallback819xUsbWorkItem()\n"); RT_TRACE(COMP_TRACE, "=====>--%s(), set chan:%d, priv:%p\n", __func__, priv->chan, priv); rtl8192_phy_FinishSwChnlNow(dev , priv->chan); RT_TRACE(COMP_TRACE, "<== SwChnlCallback819xUsbWorkItem()\n"); } u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel) { struct r8192_priv *priv = rtllib_priv(dev); RT_TRACE(COMP_PHY, "=====>%s()\n", __func__); if (IS_NIC_DOWN(priv)) { RT_TRACE(COMP_ERR, "%s(): ERR !! driver is not up\n", __func__); return false; } if (priv->SwChnlInProgress) return false; switch (priv->rtllib->mode) { case WIRELESS_MODE_A: case WIRELESS_MODE_N_5G: if (channel <= 14) { RT_TRACE(COMP_ERR, "WIRELESS_MODE_A but channel<=14"); return false; } break; case WIRELESS_MODE_B: if (channel > 14) { RT_TRACE(COMP_ERR, "WIRELESS_MODE_B but channel>14"); return false; } break; case WIRELESS_MODE_G: case WIRELESS_MODE_N_24G: if (channel > 14) { RT_TRACE(COMP_ERR, "WIRELESS_MODE_G but channel>14"); return false; } break; } priv->SwChnlInProgress = true; if (channel == 0) channel = 1; priv->chan = channel; priv->SwChnlStage = 0; priv->SwChnlStep = 0; if (!IS_NIC_DOWN(priv)) rtl8192_SwChnl_WorkItem(dev); priv->SwChnlInProgress = false; return true; } static void CCK_Tx_Power_Track_BW_Switch_TSSI(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); switch (priv->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: priv->CCKPresentAttentuation = priv->CCKPresentAttentuation_20Mdefault + priv->CCKPresentAttentuation_difference; if (priv->CCKPresentAttentuation > (CCKTxBBGainTableLength-1)) priv->CCKPresentAttentuation = CCKTxBBGainTableLength-1; if (priv->CCKPresentAttentuation < 0) priv->CCKPresentAttentuation = 0; RT_TRACE(COMP_POWER_TRACKING, "20M, priv->CCKPresent" "Attentuation = %d\n", priv->CCKPresentAttentuation); if (priv->rtllib->current_network.channel == 14 && !priv->bcck_in_ch14) { priv->bcck_in_ch14 = true; dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } else if (priv->rtllib->current_network.channel != 14 && priv->bcck_in_ch14) { priv->bcck_in_ch14 = false; dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } else { dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } break; case HT_CHANNEL_WIDTH_20_40: priv->CCKPresentAttentuation = priv->CCKPresentAttentuation_40Mdefault + priv->CCKPresentAttentuation_difference; RT_TRACE(COMP_POWER_TRACKING, "40M, priv->CCKPresent" "Attentuation = %d\n", priv->CCKPresentAttentuation); if (priv->CCKPresentAttentuation > (CCKTxBBGainTableLength - 1)) priv->CCKPresentAttentuation = CCKTxBBGainTableLength-1; if (priv->CCKPresentAttentuation < 0) priv->CCKPresentAttentuation = 0; if (priv->rtllib->current_network.channel == 14 && !priv->bcck_in_ch14) { priv->bcck_in_ch14 = true; dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } else if (priv->rtllib->current_network.channel != 14 && priv->bcck_in_ch14) { priv->bcck_in_ch14 = false; dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } else { dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } break; } } static void CCK_Tx_Power_Track_BW_Switch_ThermalMeter(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->rtllib->current_network.channel == 14 && !priv->bcck_in_ch14) priv->bcck_in_ch14 = true; else if (priv->rtllib->current_network.channel != 14 && priv->bcck_in_ch14) priv->bcck_in_ch14 = false; switch (priv->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: if (priv->Record_CCK_20Mindex == 0) priv->Record_CCK_20Mindex = 6; priv->CCK_index = priv->Record_CCK_20Mindex; RT_TRACE(COMP_POWER_TRACKING, "20MHz, CCK_Tx_Power_Track_BW_" "Switch_ThermalMeter(),CCK_index = %d\n", priv->CCK_index); break; case HT_CHANNEL_WIDTH_20_40: priv->CCK_index = priv->Record_CCK_40Mindex; RT_TRACE(COMP_POWER_TRACKING, "40MHz, CCK_Tx_Power_Track_BW_" "Switch_ThermalMeter(), CCK_index = %d\n", priv->CCK_index); break; } dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } static void CCK_Tx_Power_Track_BW_Switch(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->IC_Cut >= IC_VersionCut_D) CCK_Tx_Power_Track_BW_Switch_TSSI(dev); else CCK_Tx_Power_Track_BW_Switch_ThermalMeter(dev); } void rtl8192_SetBWModeWorkItem(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); u8 regBwOpMode; RT_TRACE(COMP_SWBW, "==>rtl8192_SetBWModeWorkItem() Switch to %s " "bandwidth\n", priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ? "20MHz" : "40MHz") if (priv->rf_chip == RF_PSEUDO_11N) { priv->SetBWModeInProgress = false; return; } if (IS_NIC_DOWN(priv)) { RT_TRACE(COMP_ERR, "%s(): ERR!! driver is not up\n", __func__); return; } regBwOpMode = read_nic_byte(dev, BW_OPMODE); switch (priv->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: regBwOpMode |= BW_OPMODE_20MHZ; write_nic_byte(dev, BW_OPMODE, regBwOpMode); break; case HT_CHANNEL_WIDTH_20_40: regBwOpMode &= ~BW_OPMODE_20MHZ; write_nic_byte(dev, BW_OPMODE, regBwOpMode); break; default: RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown " "Bandwidth: %#X\n", priv->CurrentChannelBW); break; } switch (priv->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x0); rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x0); if (!priv->btxpower_tracking) { write_nic_dword(dev, rCCK0_TxFilter1, 0x1a1b0000); write_nic_dword(dev, rCCK0_TxFilter2, 0x090e1317); write_nic_dword(dev, rCCK0_DebugPort, 0x00000204); } else { CCK_Tx_Power_Track_BW_Switch(dev); } rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 1); break; case HT_CHANNEL_WIDTH_20_40: rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1); rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1); if (!priv->btxpower_tracking) { write_nic_dword(dev, rCCK0_TxFilter1, 0x35360000); write_nic_dword(dev, rCCK0_TxFilter2, 0x121c252e); write_nic_dword(dev, rCCK0_DebugPort, 0x00000409); } else { CCK_Tx_Power_Track_BW_Switch(dev); } rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand, (priv->nCur40MhzPrimeSC>>1)); rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00, priv->nCur40MhzPrimeSC); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0); break; default: RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown " "Bandwidth: %#X\n", priv->CurrentChannelBW); break; } switch (priv->rf_chip) { case RF_8225: break; case RF_8256: PHY_SetRF8256Bandwidth(dev, priv->CurrentChannelBW); break; case RF_8258: break; case RF_PSEUDO_11N: break; default: RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip); break; } atomic_dec(&(priv->rtllib->atm_swbw)); priv->SetBWModeInProgress = false; RT_TRACE(COMP_SWBW, "<==SetBWMode819xUsb()"); } void rtl8192_SetBWMode(struct net_device *dev, enum ht_channel_width Bandwidth, enum ht_extchnl_offset Offset) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->SetBWModeInProgress) return; atomic_inc(&(priv->rtllib->atm_swbw)); priv->SetBWModeInProgress = true; priv->CurrentChannelBW = Bandwidth; if (Offset == HT_EXTCHNL_OFFSET_LOWER) priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_UPPER; else if (Offset == HT_EXTCHNL_OFFSET_UPPER) priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_LOWER; else priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_DONT_CARE; rtl8192_SetBWModeWorkItem(dev); } void InitialGain819xPci(struct net_device *dev, u8 Operation) { #define SCAN_RX_INITIAL_GAIN 0x17 #define POWER_DETECTION_TH 0x08 struct r8192_priv *priv = rtllib_priv(dev); u32 BitMask; u8 initial_gain; if (!IS_NIC_DOWN(priv)) { switch (Operation) { case IG_Backup: RT_TRACE(COMP_SCAN, "IG_Backup, backup the initial" " gain.\n"); initial_gain = SCAN_RX_INITIAL_GAIN; BitMask = bMaskByte0; if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, BitMask); priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, BitMask); priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, BitMask); priv->initgain_backup.xdagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, BitMask); BitMask = bMaskByte2; priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, BitMask); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc50 is" " %x\n", priv->initgain_backup.xaagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc58 is" " %x\n", priv->initgain_backup.xbagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc60 is" " %x\n", priv->initgain_backup.xcagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc68 is" " %x\n", priv->initgain_backup.xdagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xa0a is" " %x\n", priv->initgain_backup.cca); RT_TRACE(COMP_SCAN, "Write scan initial gain = 0x%x\n", initial_gain); write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain); RT_TRACE(COMP_SCAN, "Write scan 0xa0a = 0x%x\n", POWER_DETECTION_TH); write_nic_byte(dev, 0xa0a, POWER_DETECTION_TH); break; case IG_Restore: RT_TRACE(COMP_SCAN, "IG_Restore, restore the initial " "gain.\n"); BitMask = 0x7f; if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, BitMask, (u32)priv->initgain_backup.xaagccore1); rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, BitMask, (u32)priv->initgain_backup.xbagccore1); rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, BitMask, (u32)priv->initgain_backup.xcagccore1); rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, BitMask, (u32)priv->initgain_backup.xdagccore1); BitMask = bMaskByte2; rtl8192_setBBreg(dev, rCCK0_CCA, BitMask, (u32)priv->initgain_backup.cca); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc50" " is %x\n", priv->initgain_backup.xaagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc58" " is %x\n", priv->initgain_backup.xbagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc60" " is %x\n", priv->initgain_backup.xcagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc68" " is %x\n", priv->initgain_backup.xdagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xa0a" " is %x\n", priv->initgain_backup.cca); rtl8192_phy_setTxPower(dev, priv->rtllib->current_network.channel); if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); break; default: RT_TRACE(COMP_SCAN, "Unknown IG Operation.\n"); break; } } } void PHY_SetRtl8192eRfOff(struct net_device *dev) { rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x0); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x18, 0x0); rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0xf, 0x0); rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0xf, 0x0); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x60, 0x0); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x4, 0x0); write_nic_byte(dev, ANAPAR_FOR_8192PciE, 0x07); } static bool SetRFPowerState8190(struct net_device *dev, enum rt_rf_power_state eRFPowerState) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) (&(priv->rtllib->PowerSaveControl)); bool bResult = true; u8 i = 0, QueueID = 0; struct rtl8192_tx_ring *ring = NULL; if (priv->SetRFPowerStateInProgress == true) return false; RT_TRACE(COMP_PS, "===========> SetRFPowerState8190()!\n"); priv->SetRFPowerStateInProgress = true; switch (priv->rf_chip) { case RF_8256: switch (eRFPowerState) { case eRfOn: RT_TRACE(COMP_PS, "SetRFPowerState8190() eRfOn!\n"); if ((priv->rtllib->eRFPowerState == eRfOff) && RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) { bool rtstatus = true; u32 InitilizeCount = 3; do { InitilizeCount--; priv->RegRfOff = false; rtstatus = NicIFEnableNIC(dev); } while ((rtstatus != true) && (InitilizeCount > 0)); if (rtstatus != true) { RT_TRACE(COMP_ERR, "%s():Initialize Ada" "pter fail,return\n", __func__); priv->SetRFPowerStateInProgress = false; return false; } RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC); } else { write_nic_byte(dev, ANAPAR, 0x37); mdelay(1); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x4, 0x1); priv->bHwRfOffAction = 0; rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x1); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x18, 0x3); rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x3, 0x3); rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x3, 0x3); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x60, 0x3); } break; case eRfSleep: if (priv->rtllib->eRFPowerState == eRfOff) break; for (QueueID = 0, i = 0; QueueID < MAX_TX_QUEUE; ) { ring = &priv->tx_ring[QueueID]; if (skb_queue_len(&ring->queue) == 0) { QueueID++; continue; } else { RT_TRACE((COMP_POWER|COMP_RF), "eRf Off" "/Sleep: %d times TcbBusyQueue" "[%d] !=0 before doze!\n", (i+1), QueueID); udelay(10); i++; } if (i >= MAX_DOZE_WAITING_TIMES_9x) { RT_TRACE(COMP_POWER, "\n\n\n TimeOut!! " "SetRFPowerState8190(): eRfOff" ": %d times TcbBusyQueue[%d] " "!= 0 !!!\n", MAX_DOZE_WAITING_TIMES_9x, QueueID); break; } } PHY_SetRtl8192eRfOff(dev); break; case eRfOff: RT_TRACE(COMP_PS, "SetRFPowerState8190() eRfOff/" "Sleep !\n"); for (QueueID = 0, i = 0; QueueID < MAX_TX_QUEUE; ) { ring = &priv->tx_ring[QueueID]; if (skb_queue_len(&ring->queue) == 0) { QueueID++; continue; } else { RT_TRACE(COMP_POWER, "eRf Off/Sleep: %d" " times TcbBusyQueue[%d] !=0 b" "efore doze!\n", (i+1), QueueID); udelay(10); i++; } if (i >= MAX_DOZE_WAITING_TIMES_9x) { RT_TRACE(COMP_POWER, "\n\n\n SetZebra: " "RFPowerState8185B(): eRfOff:" " %d times TcbBusyQueue[%d] " "!= 0 !!!\n", MAX_DOZE_WAITING_TIMES_9x, QueueID); break; } } if (pPSC->RegRfPsLevel & RT_RF_OFF_LEVL_HALT_NIC && !RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) { NicIFDisableNIC(dev); RT_SET_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC); } else if (!(pPSC->RegRfPsLevel & RT_RF_OFF_LEVL_HALT_NIC)) { PHY_SetRtl8192eRfOff(dev); } break; default: bResult = false; RT_TRACE(COMP_ERR, "SetRFPowerState8190(): unknow state" " to set: 0x%X!!!\n", eRFPowerState); break; } break; default: RT_TRACE(COMP_ERR, "SetRFPowerState8190(): Unknown RF type\n"); break; } if (bResult) { priv->rtllib->eRFPowerState = eRFPowerState; switch (priv->rf_chip) { case RF_8256: break; default: RT_TRACE(COMP_ERR, "SetRFPowerState8190(): Unknown " "RF type\n"); break; } } priv->SetRFPowerStateInProgress = false; RT_TRACE(COMP_PS, "<=========== SetRFPowerState8190() bResult = %d!\n", bResult); return bResult; } bool SetRFPowerState(struct net_device *dev, enum rt_rf_power_state eRFPowerState) { struct r8192_priv *priv = rtllib_priv(dev); bool bResult = false; RT_TRACE(COMP_PS, "---------> SetRFPowerState(): eRFPowerState(%d)\n", eRFPowerState); if (eRFPowerState == priv->rtllib->eRFPowerState && priv->bHwRfOffAction == 0) { RT_TRACE(COMP_PS, "<--------- SetRFPowerState(): discard the " "request for eRFPowerState(%d) is the same.\n", eRFPowerState); return bResult; } bResult = SetRFPowerState8190(dev, eRFPowerState); RT_TRACE(COMP_PS, "<--------- SetRFPowerState(): bResult(%d)\n", bResult); return bResult; } void PHY_ScanOperationBackup8192(struct net_device *dev, u8 Operation) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->up) { switch (Operation) { case SCAN_OPT_BACKUP: priv->rtllib->InitialGainHandler(dev, IG_Backup); break; case SCAN_OPT_RESTORE: priv->rtllib->InitialGainHandler(dev, IG_Restore); break; default: RT_TRACE(COMP_SCAN, "Unknown Scan Backup Operation.\n"); break; } } }
gpl-2.0
pablohaylan/I9500_Stock_Kernel_KK_4.4.2
arch/arm/plat-nomadik/timer.c
4899
6195
/* * linux/arch/arm/plat-nomadik/timer.c * * Copyright (C) 2008 STMicroelectronics * Copyright (C) 2010 Alessandro Rubini * Copyright (C) 2010 Linus Walleij for ST-Ericsson * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/clockchips.h> #include <linux/clk.h> #include <linux/jiffies.h> #include <linux/err.h> #include <asm/mach/time.h> #include <asm/sched_clock.h> /* * The MTU device hosts four different counters, with 4 set of * registers. These are register names. */ #define MTU_IMSC 0x00 /* Interrupt mask set/clear */ #define MTU_RIS 0x04 /* Raw interrupt status */ #define MTU_MIS 0x08 /* Masked interrupt status */ #define MTU_ICR 0x0C /* Interrupt clear register */ /* per-timer registers take 0..3 as argument */ #define MTU_LR(x) (0x10 + 0x10 * (x) + 0x00) /* Load value */ #define MTU_VAL(x) (0x10 + 0x10 * (x) + 0x04) /* Current value */ #define MTU_CR(x) (0x10 + 0x10 * (x) + 0x08) /* Control reg */ #define MTU_BGLR(x) (0x10 + 0x10 * (x) + 0x0c) /* At next overflow */ /* bits for the control register */ #define MTU_CRn_ENA 0x80 #define MTU_CRn_PERIODIC 0x40 /* if 0 = free-running */ #define MTU_CRn_PRESCALE_MASK 0x0c #define MTU_CRn_PRESCALE_1 0x00 #define MTU_CRn_PRESCALE_16 0x04 #define MTU_CRn_PRESCALE_256 0x08 #define MTU_CRn_32BITS 0x02 #define MTU_CRn_ONESHOT 0x01 /* if 0 = wraps reloading from BGLR*/ /* Other registers are usual amba/primecell registers, currently not used */ #define MTU_ITCR 0xff0 #define MTU_ITOP 0xff4 #define MTU_PERIPH_ID0 0xfe0 #define MTU_PERIPH_ID1 0xfe4 #define MTU_PERIPH_ID2 0xfe8 #define MTU_PERIPH_ID3 0xfeC #define MTU_PCELL0 0xff0 #define MTU_PCELL1 0xff4 #define MTU_PCELL2 0xff8 #define MTU_PCELL3 0xffC static void __iomem *mtu_base; static bool clkevt_periodic; static u32 clk_prescale; static u32 nmdk_cycle; /* write-once */ #ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK /* * Override the global weak sched_clock symbol with this * local implementation which uses the clocksource to get some * better resolution when scheduling the kernel. */ static u32 notrace nomadik_read_sched_clock(void) { if (unlikely(!mtu_base)) return 0; return -readl(mtu_base + MTU_VAL(0)); } #endif /* Clockevent device: use one-shot mode */ static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev) { writel(1 << 1, mtu_base + MTU_IMSC); writel(evt, mtu_base + MTU_LR(1)); /* Load highest value, enable device, enable interrupts */ writel(MTU_CRn_ONESHOT | clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA, mtu_base + MTU_CR(1)); return 0; } void nmdk_clkevt_reset(void) { if (clkevt_periodic) { /* Timer: configure load and background-load, and fire it up */ writel(nmdk_cycle, mtu_base + MTU_LR(1)); writel(nmdk_cycle, mtu_base + MTU_BGLR(1)); writel(MTU_CRn_PERIODIC | clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA, mtu_base + MTU_CR(1)); writel(1 << 1, mtu_base + MTU_IMSC); } else { /* Generate an interrupt to start the clockevent again */ (void) nmdk_clkevt_next(nmdk_cycle, NULL); } } static void nmdk_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: clkevt_periodic = true; nmdk_clkevt_reset(); break; case CLOCK_EVT_MODE_ONESHOT: clkevt_periodic = false; break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: writel(0, mtu_base + MTU_IMSC); /* disable timer */ writel(0, mtu_base + MTU_CR(1)); /* load some high default value */ writel(0xffffffff, mtu_base + MTU_LR(1)); break; case CLOCK_EVT_MODE_RESUME: break; } } static struct clock_event_device nmdk_clkevt = { .name = "mtu_1", .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, .rating = 200, .set_mode = nmdk_clkevt_mode, .set_next_event = nmdk_clkevt_next, }; /* * IRQ Handler for timer 1 of the MTU block. */ static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evdev = dev_id; writel(1 << 1, mtu_base + MTU_ICR); /* Interrupt clear reg */ evdev->event_handler(evdev); return IRQ_HANDLED; } static struct irqaction nmdk_timer_irq = { .name = "Nomadik Timer Tick", .flags = IRQF_DISABLED | IRQF_TIMER, .handler = nmdk_timer_interrupt, .dev_id = &nmdk_clkevt, }; void nmdk_clksrc_reset(void) { /* Disable */ writel(0, mtu_base + MTU_CR(0)); /* ClockSource: configure load and background-load, and fire it up */ writel(nmdk_cycle, mtu_base + MTU_LR(0)); writel(nmdk_cycle, mtu_base + MTU_BGLR(0)); writel(clk_prescale | MTU_CRn_32BITS | MTU_CRn_ENA, mtu_base + MTU_CR(0)); } void __init nmdk_timer_init(void __iomem *base) { unsigned long rate; struct clk *clk0; mtu_base = base; clk0 = clk_get_sys("mtu0", NULL); BUG_ON(IS_ERR(clk0)); BUG_ON(clk_prepare(clk0) < 0); BUG_ON(clk_enable(clk0) < 0); /* * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz * for ux500. * Use a divide-by-16 counter if the tick rate is more than 32MHz. * At 32 MHz, the timer (with 32 bit counter) can be programmed * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer * with 16 gives too low timer resolution. */ rate = clk_get_rate(clk0); if (rate > 32000000) { rate /= 16; clk_prescale = MTU_CRn_PRESCALE_16; } else { clk_prescale = MTU_CRn_PRESCALE_1; } nmdk_cycle = (rate + HZ/2) / HZ; /* Timer 0 is the free running clocksource */ nmdk_clksrc_reset(); if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0", rate, 200, 32, clocksource_mmio_readl_down)) pr_err("timer: failed to initialize clock source %s\n", "mtu_0"); #ifdef CONFIG_NOMADIK_MTU_SCHED_CLOCK setup_sched_clock(nomadik_read_sched_clock, 32, rate); #endif /* Timer 1 is used for events, register irq and clockevents */ setup_irq(IRQ_MTU0, &nmdk_timer_irq); nmdk_clkevt.cpumask = cpumask_of(0); clockevents_config_and_register(&nmdk_clkevt, rate, 2, 0xffffffffU); }
gpl-2.0
embest-tech/linux-imx
drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
4899
45619
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> ******************************************************************************/ #include "rtl_core.h" #include "r8192E_hw.h" #include "r8192E_phyreg.h" #include "r8190P_rtl8256.h" #include "r8192E_phy.h" #include "rtl_dm.h" #include "r8192E_hwimg.h" static u32 RF_CHANNEL_TABLE_ZEBRA[] = { 0, 0x085c, 0x08dc, 0x095c, 0x09dc, 0x0a5c, 0x0adc, 0x0b5c, 0x0bdc, 0x0c5c, 0x0cdc, 0x0d5c, 0x0ddc, 0x0e5c, 0x0f72, }; /*************************Define local function prototype**********************/ static u32 phy_FwRFSerialRead(struct net_device *dev, enum rf90_radio_path eRFPath, u32 Offset); static void phy_FwRFSerialWrite(struct net_device *dev, enum rf90_radio_path eRFPath, u32 Offset, u32 Data); static u32 rtl8192_CalculateBitShift(u32 dwBitMask) { u32 i; for (i = 0; i <= 31; i++) { if (((dwBitMask >> i) & 0x1) == 1) break; } return i; } u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath) { u8 ret = 1; struct r8192_priv *priv = rtllib_priv(dev); if (priv->rf_type == RF_2T4R) ret = 0; else if (priv->rf_type == RF_1T2R) { if (eRFPath == RF90_PATH_A || eRFPath == RF90_PATH_B) ret = 1; else if (eRFPath == RF90_PATH_C || eRFPath == RF90_PATH_D) ret = 0; } return ret; } void rtl8192_setBBreg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask, u32 dwData) { u32 OriginalValue, BitShift, NewValue; if (dwBitMask != bMaskDWord) { OriginalValue = read_nic_dword(dev, dwRegAddr); BitShift = rtl8192_CalculateBitShift(dwBitMask); NewValue = (((OriginalValue) & (~dwBitMask)) | (dwData << BitShift)); write_nic_dword(dev, dwRegAddr, NewValue); } else write_nic_dword(dev, dwRegAddr, dwData); return; } u32 rtl8192_QueryBBReg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask) { u32 Ret = 0, OriginalValue, BitShift; OriginalValue = read_nic_dword(dev, dwRegAddr); BitShift = rtl8192_CalculateBitShift(dwBitMask); Ret = (OriginalValue & dwBitMask) >> BitShift; return Ret; } static u32 rtl8192_phy_RFSerialRead(struct net_device *dev, enum rf90_radio_path eRFPath, u32 Offset) { struct r8192_priv *priv = rtllib_priv(dev); u32 ret = 0; u32 NewOffset = 0; struct bb_reg_definition *pPhyReg = &priv->PHYRegDef[eRFPath]; Offset &= 0x3f; if (priv->rf_chip == RF_8256) { rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0); if (Offset >= 31) { priv->RfReg0Value[eRFPath] |= 0x140; rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16)); NewOffset = Offset - 30; } else if (Offset >= 16) { priv->RfReg0Value[eRFPath] |= 0x100; priv->RfReg0Value[eRFPath] &= (~0x40); rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16)); NewOffset = Offset - 15; } else NewOffset = Offset; } else { RT_TRACE((COMP_PHY|COMP_ERR), "check RF type here, need" " to be 8256\n"); NewOffset = Offset; } rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress, NewOffset); rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x0); rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x1); mdelay(1); ret = rtl8192_QueryBBReg(dev, pPhyReg->rfLSSIReadBack, bLSSIReadBackData); if (priv->rf_chip == RF_8256) { priv->RfReg0Value[eRFPath] &= 0xebf; rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3); } return ret; } static void rtl8192_phy_RFSerialWrite(struct net_device *dev, enum rf90_radio_path eRFPath, u32 Offset, u32 Data) { struct r8192_priv *priv = rtllib_priv(dev); u32 DataAndAddr = 0, NewOffset = 0; struct bb_reg_definition *pPhyReg = &priv->PHYRegDef[eRFPath]; Offset &= 0x3f; if (priv->rf_chip == RF_8256) { rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0); if (Offset >= 31) { priv->RfReg0Value[eRFPath] |= 0x140; rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); NewOffset = Offset - 30; } else if (Offset >= 16) { priv->RfReg0Value[eRFPath] |= 0x100; priv->RfReg0Value[eRFPath] &= (~0x40); rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); NewOffset = Offset - 15; } else NewOffset = Offset; } else { RT_TRACE((COMP_PHY|COMP_ERR), "check RF type here, need to be" " 8256\n"); NewOffset = Offset; } DataAndAddr = (Data<<16) | (NewOffset&0x3f); rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr); if (Offset == 0x0) priv->RfReg0Value[eRFPath] = Data; if (priv->rf_chip == RF_8256) { if (Offset != 0) { priv->RfReg0Value[eRFPath] &= 0xebf; rtl8192_setBBreg( dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); } rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3); } return; } void rtl8192_phy_SetRFReg(struct net_device *dev, enum rf90_radio_path eRFPath, u32 RegAddr, u32 BitMask, u32 Data) { struct r8192_priv *priv = rtllib_priv(dev); u32 Original_Value, BitShift, New_Value; if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) return; if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter) return; RT_TRACE(COMP_PHY, "FW RF CTRL is not ready now\n"); if (priv->Rf_Mode == RF_OP_By_FW) { if (BitMask != bMask12Bits) { Original_Value = phy_FwRFSerialRead(dev, eRFPath, RegAddr); BitShift = rtl8192_CalculateBitShift(BitMask); New_Value = (((Original_Value) & (~BitMask)) | (Data << BitShift)); phy_FwRFSerialWrite(dev, eRFPath, RegAddr, New_Value); } else phy_FwRFSerialWrite(dev, eRFPath, RegAddr, Data); udelay(200); } else { if (BitMask != bMask12Bits) { Original_Value = rtl8192_phy_RFSerialRead(dev, eRFPath, RegAddr); BitShift = rtl8192_CalculateBitShift(BitMask); New_Value = (((Original_Value) & (~BitMask)) | (Data << BitShift)); rtl8192_phy_RFSerialWrite(dev, eRFPath, RegAddr, New_Value); } else rtl8192_phy_RFSerialWrite(dev, eRFPath, RegAddr, Data); } return; } u32 rtl8192_phy_QueryRFReg(struct net_device *dev, enum rf90_radio_path eRFPath, u32 RegAddr, u32 BitMask) { u32 Original_Value, Readback_Value, BitShift; struct r8192_priv *priv = rtllib_priv(dev); if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) return 0; if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter) return 0; down(&priv->rf_sem); if (priv->Rf_Mode == RF_OP_By_FW) { Original_Value = phy_FwRFSerialRead(dev, eRFPath, RegAddr); udelay(200); } else { Original_Value = rtl8192_phy_RFSerialRead(dev, eRFPath, RegAddr); } BitShift = rtl8192_CalculateBitShift(BitMask); Readback_Value = (Original_Value & BitMask) >> BitShift; up(&priv->rf_sem); return Readback_Value; } static u32 phy_FwRFSerialRead(struct net_device *dev, enum rf90_radio_path eRFPath, u32 Offset) { u32 retValue = 0; u32 Data = 0; u8 time = 0; Data |= ((Offset & 0xFF) << 12); Data |= ((eRFPath & 0x3) << 20); Data |= 0x80000000; while (read_nic_dword(dev, QPNR)&0x80000000) { if (time++ < 100) udelay(10); else break; } write_nic_dword(dev, QPNR, Data); while (read_nic_dword(dev, QPNR) & 0x80000000) { if (time++ < 100) udelay(10); else return 0; } retValue = read_nic_dword(dev, RF_DATA); return retValue; } /* phy_FwRFSerialRead */ static void phy_FwRFSerialWrite(struct net_device *dev, enum rf90_radio_path eRFPath, u32 Offset, u32 Data) { u8 time = 0; Data |= ((Offset & 0xFF) << 12); Data |= ((eRFPath & 0x3) << 20); Data |= 0x400000; Data |= 0x80000000; while (read_nic_dword(dev, QPNR) & 0x80000000) { if (time++ < 100) udelay(10); else break; } write_nic_dword(dev, QPNR, Data); } /* phy_FwRFSerialWrite */ void rtl8192_phy_configmac(struct net_device *dev) { u32 dwArrayLen = 0, i = 0; u32 *pdwArray = NULL; struct r8192_priv *priv = rtllib_priv(dev); if (priv->bTXPowerDataReadFromEEPORM) { RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array_PG\n"); dwArrayLen = MACPHY_Array_PGLength; pdwArray = Rtl819XMACPHY_Array_PG; } else { RT_TRACE(COMP_PHY, "Read rtl819XMACPHY_Array\n"); dwArrayLen = MACPHY_ArrayLength; pdwArray = Rtl819XMACPHY_Array; } for (i = 0; i < dwArrayLen; i += 3) { RT_TRACE(COMP_DBG, "The Rtl8190MACPHY_Array[0] is %x Rtl8190MAC" "PHY_Array[1] is %x Rtl8190MACPHY_Array[2] is %x\n", pdwArray[i], pdwArray[i+1], pdwArray[i+2]); if (pdwArray[i] == 0x318) pdwArray[i+2] = 0x00000800; rtl8192_setBBreg(dev, pdwArray[i], pdwArray[i+1], pdwArray[i+2]); } return; } void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType) { int i; u32 *Rtl819XPHY_REGArray_Table = NULL; u32 *Rtl819XAGCTAB_Array_Table = NULL; u16 AGCTAB_ArrayLen, PHY_REGArrayLen = 0; struct r8192_priv *priv = rtllib_priv(dev); AGCTAB_ArrayLen = AGCTAB_ArrayLength; Rtl819XAGCTAB_Array_Table = Rtl819XAGCTAB_Array; if (priv->rf_type == RF_2T4R) { PHY_REGArrayLen = PHY_REGArrayLength; Rtl819XPHY_REGArray_Table = Rtl819XPHY_REGArray; } else if (priv->rf_type == RF_1T2R) { PHY_REGArrayLen = PHY_REG_1T2RArrayLength; Rtl819XPHY_REGArray_Table = Rtl819XPHY_REG_1T2RArray; } if (ConfigType == BaseBand_Config_PHY_REG) { for (i = 0; i < PHY_REGArrayLen; i += 2) { rtl8192_setBBreg(dev, Rtl819XPHY_REGArray_Table[i], bMaskDWord, Rtl819XPHY_REGArray_Table[i+1]); RT_TRACE(COMP_DBG, "i: %x, The Rtl819xUsbPHY_REGArray" "[0] is %x Rtl819xUsbPHY_REGArray[1] is %x\n", i, Rtl819XPHY_REGArray_Table[i], Rtl819XPHY_REGArray_Table[i+1]); } } else if (ConfigType == BaseBand_Config_AGC_TAB) { for (i = 0; i < AGCTAB_ArrayLen; i += 2) { rtl8192_setBBreg(dev, Rtl819XAGCTAB_Array_Table[i], bMaskDWord, Rtl819XAGCTAB_Array_Table[i+1]); RT_TRACE(COMP_DBG, "i:%x, The rtl819XAGCTAB_Array[0] " "is %x rtl819XAGCTAB_Array[1] is %x\n", i, Rtl819XAGCTAB_Array_Table[i], Rtl819XAGCTAB_Array_Table[i+1]); } } return; } static void rtl8192_InitBBRFRegDef(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); priv->PHYRegDef[RF90_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW; priv->PHYRegDef[RF90_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW; priv->PHYRegDef[RF90_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW; priv->PHYRegDef[RF90_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW; priv->PHYRegDef[RF90_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB; priv->PHYRegDef[RF90_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB; priv->PHYRegDef[RF90_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB; priv->PHYRegDef[RF90_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB; priv->PHYRegDef[RF90_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_C].rfintfo = rFPGA0_XC_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_D].rfintfo = rFPGA0_XD_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_C].rfintfe = rFPGA0_XC_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_D].rfintfe = rFPGA0_XD_RFInterfaceOE; priv->PHYRegDef[RF90_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter; priv->PHYRegDef[RF90_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter; priv->PHYRegDef[RF90_PATH_C].rf3wireOffset = rFPGA0_XC_LSSIParameter; priv->PHYRegDef[RF90_PATH_D].rf3wireOffset = rFPGA0_XD_LSSIParameter; priv->PHYRegDef[RF90_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter; priv->PHYRegDef[RF90_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter; priv->PHYRegDef[RF90_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter; priv->PHYRegDef[RF90_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter; priv->PHYRegDef[RF90_PATH_A].rfTxGainStage = rFPGA0_TxGainStage; priv->PHYRegDef[RF90_PATH_B].rfTxGainStage = rFPGA0_TxGainStage; priv->PHYRegDef[RF90_PATH_C].rfTxGainStage = rFPGA0_TxGainStage; priv->PHYRegDef[RF90_PATH_D].rfTxGainStage = rFPGA0_TxGainStage; priv->PHYRegDef[RF90_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; priv->PHYRegDef[RF90_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1; priv->PHYRegDef[RF90_PATH_C].rfHSSIPara1 = rFPGA0_XC_HSSIParameter1; priv->PHYRegDef[RF90_PATH_D].rfHSSIPara1 = rFPGA0_XD_HSSIParameter1; priv->PHYRegDef[RF90_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2; priv->PHYRegDef[RF90_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2; priv->PHYRegDef[RF90_PATH_C].rfHSSIPara2 = rFPGA0_XC_HSSIParameter2; priv->PHYRegDef[RF90_PATH_D].rfHSSIPara2 = rFPGA0_XD_HSSIParameter2; priv->PHYRegDef[RF90_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl; priv->PHYRegDef[RF90_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl; priv->PHYRegDef[RF90_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl; priv->PHYRegDef[RF90_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl; priv->PHYRegDef[RF90_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1; priv->PHYRegDef[RF90_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1; priv->PHYRegDef[RF90_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1; priv->PHYRegDef[RF90_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1; priv->PHYRegDef[RF90_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2; priv->PHYRegDef[RF90_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2; priv->PHYRegDef[RF90_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2; priv->PHYRegDef[RF90_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2; priv->PHYRegDef[RF90_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance; priv->PHYRegDef[RF90_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance; priv->PHYRegDef[RF90_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance; priv->PHYRegDef[RF90_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance; priv->PHYRegDef[RF90_PATH_A].rfRxAFE = rOFDM0_XARxAFE; priv->PHYRegDef[RF90_PATH_B].rfRxAFE = rOFDM0_XBRxAFE; priv->PHYRegDef[RF90_PATH_C].rfRxAFE = rOFDM0_XCRxAFE; priv->PHYRegDef[RF90_PATH_D].rfRxAFE = rOFDM0_XDRxAFE; priv->PHYRegDef[RF90_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance; priv->PHYRegDef[RF90_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance; priv->PHYRegDef[RF90_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance; priv->PHYRegDef[RF90_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance; priv->PHYRegDef[RF90_PATH_A].rfTxAFE = rOFDM0_XATxAFE; priv->PHYRegDef[RF90_PATH_B].rfTxAFE = rOFDM0_XBTxAFE; priv->PHYRegDef[RF90_PATH_C].rfTxAFE = rOFDM0_XCTxAFE; priv->PHYRegDef[RF90_PATH_D].rfTxAFE = rOFDM0_XDTxAFE; priv->PHYRegDef[RF90_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack; priv->PHYRegDef[RF90_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack; priv->PHYRegDef[RF90_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack; priv->PHYRegDef[RF90_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack; } bool rtl8192_phy_checkBBAndRF(struct net_device *dev, enum hw90_block CheckBlock, enum rf90_radio_path eRFPath) { bool ret = true; u32 i, CheckTimes = 4, dwRegRead = 0; u32 WriteAddr[4]; u32 WriteData[] = {0xfffff027, 0xaa55a02f, 0x00000027, 0x55aa502f}; WriteAddr[HW90_BLOCK_MAC] = 0x100; WriteAddr[HW90_BLOCK_PHY0] = 0x900; WriteAddr[HW90_BLOCK_PHY1] = 0x800; WriteAddr[HW90_BLOCK_RF] = 0x3; RT_TRACE(COMP_PHY, "=======>%s(), CheckBlock:%d\n", __func__, CheckBlock); for (i = 0; i < CheckTimes; i++) { switch (CheckBlock) { case HW90_BLOCK_MAC: RT_TRACE(COMP_ERR, "PHY_CheckBBRFOK(): Never Write " "0x100 here!"); break; case HW90_BLOCK_PHY0: case HW90_BLOCK_PHY1: write_nic_dword(dev, WriteAddr[CheckBlock], WriteData[i]); dwRegRead = read_nic_dword(dev, WriteAddr[CheckBlock]); break; case HW90_BLOCK_RF: WriteData[i] &= 0xfff; rtl8192_phy_SetRFReg(dev, eRFPath, WriteAddr[HW90_BLOCK_RF], bMask12Bits, WriteData[i]); mdelay(10); dwRegRead = rtl8192_phy_QueryRFReg(dev, eRFPath, WriteAddr[HW90_BLOCK_RF], bMaskDWord); mdelay(10); break; default: ret = false; break; } if (dwRegRead != WriteData[i]) { RT_TRACE(COMP_ERR, "====>error=====dwRegRead: %x, " "WriteData: %x\n", dwRegRead, WriteData[i]); ret = false; break; } } return ret; } static bool rtl8192_BB_Config_ParaFile(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); bool rtStatus = true; u8 bRegValue = 0, eCheckItem = 0; u32 dwRegValue = 0; bRegValue = read_nic_byte(dev, BB_GLOBAL_RESET); write_nic_byte(dev, BB_GLOBAL_RESET, (bRegValue|BB_GLOBAL_RESET_BIT)); dwRegValue = read_nic_dword(dev, CPU_GEN); write_nic_dword(dev, CPU_GEN, (dwRegValue&(~CPU_GEN_BB_RST))); for (eCheckItem = (enum hw90_block)HW90_BLOCK_PHY0; eCheckItem <= HW90_BLOCK_PHY1; eCheckItem++) { rtStatus = rtl8192_phy_checkBBAndRF(dev, (enum hw90_block)eCheckItem, (enum rf90_radio_path)0); if (rtStatus != true) { RT_TRACE((COMP_ERR | COMP_PHY), "PHY_RF8256_Config():" "Check PHY%d Fail!!\n", eCheckItem-1); return rtStatus; } } rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0); rtl8192_phyConfigBB(dev, BaseBand_Config_PHY_REG); dwRegValue = read_nic_dword(dev, CPU_GEN); write_nic_dword(dev, CPU_GEN, (dwRegValue|CPU_GEN_BB_RST)); rtl8192_phyConfigBB(dev, BaseBand_Config_AGC_TAB); if (priv->IC_Cut > VERSION_8190_BD) { if (priv->rf_type == RF_2T4R) dwRegValue = (priv->AntennaTxPwDiff[2]<<8 | priv->AntennaTxPwDiff[1]<<4 | priv->AntennaTxPwDiff[0]); else dwRegValue = 0x0; rtl8192_setBBreg(dev, rFPGA0_TxGainStage, (bXBTxAGC|bXCTxAGC|bXDTxAGC), dwRegValue); dwRegValue = priv->CrystalCap; rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, bXtalCap92x, dwRegValue); } return rtStatus; } bool rtl8192_BBConfig(struct net_device *dev) { bool rtStatus = true; rtl8192_InitBBRFRegDef(dev); rtStatus = rtl8192_BB_Config_ParaFile(dev); return rtStatus; } void rtl8192_phy_getTxPower(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); priv->MCSTxPowerLevelOriginalOffset[0] = read_nic_dword(dev, rTxAGC_Rate18_06); priv->MCSTxPowerLevelOriginalOffset[1] = read_nic_dword(dev, rTxAGC_Rate54_24); priv->MCSTxPowerLevelOriginalOffset[2] = read_nic_dword(dev, rTxAGC_Mcs03_Mcs00); priv->MCSTxPowerLevelOriginalOffset[3] = read_nic_dword(dev, rTxAGC_Mcs07_Mcs04); priv->MCSTxPowerLevelOriginalOffset[4] = read_nic_dword(dev, rTxAGC_Mcs11_Mcs08); priv->MCSTxPowerLevelOriginalOffset[5] = read_nic_dword(dev, rTxAGC_Mcs15_Mcs12); priv->DefaultInitialGain[0] = read_nic_byte(dev, rOFDM0_XAAGCCore1); priv->DefaultInitialGain[1] = read_nic_byte(dev, rOFDM0_XBAGCCore1); priv->DefaultInitialGain[2] = read_nic_byte(dev, rOFDM0_XCAGCCore1); priv->DefaultInitialGain[3] = read_nic_byte(dev, rOFDM0_XDAGCCore1); RT_TRACE(COMP_INIT, "Default initial gain (c50=0x%x, c58=0x%x, " "c60=0x%x, c68=0x%x)\n", priv->DefaultInitialGain[0], priv->DefaultInitialGain[1], priv->DefaultInitialGain[2], priv->DefaultInitialGain[3]); priv->framesync = read_nic_byte(dev, rOFDM0_RxDetector3); priv->framesyncC34 = read_nic_dword(dev, rOFDM0_RxDetector2); RT_TRACE(COMP_INIT, "Default framesync (0x%x) = 0x%x\n", rOFDM0_RxDetector3, priv->framesync); priv->SifsTime = read_nic_word(dev, SIFS); return; } void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel) { struct r8192_priv *priv = rtllib_priv(dev); u8 powerlevel = 0, powerlevelOFDM24G = 0; char ant_pwr_diff; u32 u4RegValue; if (priv->epromtype == EEPROM_93C46) { powerlevel = priv->TxPowerLevelCCK[channel-1]; powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1]; } else if (priv->epromtype == EEPROM_93C56) { if (priv->rf_type == RF_1T2R) { powerlevel = priv->TxPowerLevelCCK_C[channel-1]; powerlevelOFDM24G = priv->TxPowerLevelOFDM24G_C[channel-1]; } else if (priv->rf_type == RF_2T4R) { powerlevel = priv->TxPowerLevelCCK_A[channel-1]; powerlevelOFDM24G = priv->TxPowerLevelOFDM24G_A[channel-1]; ant_pwr_diff = priv->TxPowerLevelOFDM24G_C[channel-1] - priv->TxPowerLevelOFDM24G_A[channel-1]; priv->RF_C_TxPwDiff = ant_pwr_diff; ant_pwr_diff &= 0xf; priv->AntennaTxPwDiff[2] = 0; priv->AntennaTxPwDiff[1] = (u8)(ant_pwr_diff); priv->AntennaTxPwDiff[0] = 0; u4RegValue = (priv->AntennaTxPwDiff[2]<<8 | priv->AntennaTxPwDiff[1]<<4 | priv->AntennaTxPwDiff[0]); rtl8192_setBBreg(dev, rFPGA0_TxGainStage, (bXBTxAGC|bXCTxAGC|bXDTxAGC), u4RegValue); } } switch (priv->rf_chip) { case RF_8225: break; case RF_8256: PHY_SetRF8256CCKTxPower(dev, powerlevel); PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G); break; case RF_8258: break; default: RT_TRACE(COMP_ERR, "unknown rf chip in funtion %s()\n", __func__); break; } return; } bool rtl8192_phy_RFConfig(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); bool rtStatus = true; switch (priv->rf_chip) { case RF_8225: break; case RF_8256: rtStatus = PHY_RF8256_Config(dev); break; case RF_8258: break; case RF_PSEUDO_11N: break; default: RT_TRACE(COMP_ERR, "error chip id\n"); break; } return rtStatus; } void rtl8192_phy_updateInitGain(struct net_device *dev) { return; } u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev, enum rf90_radio_path eRFPath) { int i; u8 ret = 0; switch (eRFPath) { case RF90_PATH_A: for (i = 0; i < RadioA_ArrayLength; i += 2) { if (Rtl819XRadioA_Array[i] == 0xfe) { msleep(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, Rtl819XRadioA_Array[i], bMask12Bits, Rtl819XRadioA_Array[i+1]); } break; case RF90_PATH_B: for (i = 0; i < RadioB_ArrayLength; i += 2) { if (Rtl819XRadioB_Array[i] == 0xfe) { msleep(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, Rtl819XRadioB_Array[i], bMask12Bits, Rtl819XRadioB_Array[i+1]); } break; case RF90_PATH_C: for (i = 0; i < RadioC_ArrayLength; i += 2) { if (Rtl819XRadioC_Array[i] == 0xfe) { msleep(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, Rtl819XRadioC_Array[i], bMask12Bits, Rtl819XRadioC_Array[i+1]); } break; case RF90_PATH_D: for (i = 0; i < RadioD_ArrayLength; i += 2) { if (Rtl819XRadioD_Array[i] == 0xfe) { msleep(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, Rtl819XRadioD_Array[i], bMask12Bits, Rtl819XRadioD_Array[i+1]); } break; default: break; } return ret; } static void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel) { struct r8192_priv *priv = rtllib_priv(dev); u8 powerlevel = priv->TxPowerLevelCCK[channel-1]; u8 powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1]; switch (priv->rf_chip) { case RF_8225: break; case RF_8256: PHY_SetRF8256CCKTxPower(dev, powerlevel); PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G); break; case RF_8258: break; default: RT_TRACE(COMP_ERR, "unknown rf chip ID in rtl8192_SetTxPower" "Level()\n"); break; } return; } static u8 rtl8192_phy_SetSwChnlCmdArray(struct sw_chnl_cmd *CmdTable, u32 CmdTableIdx, u32 CmdTableSz, enum sw_chnl_cmd_id CmdID, u32 Para1, u32 Para2, u32 msDelay) { struct sw_chnl_cmd *pCmd; if (CmdTable == NULL) { RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): CmdTable cannot " "be NULL.\n"); return false; } if (CmdTableIdx >= CmdTableSz) { RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): Access invalid" " index, please check size of the table, CmdTableIdx:" "%d, CmdTableSz:%d\n", CmdTableIdx, CmdTableSz); return false; } pCmd = CmdTable + CmdTableIdx; pCmd->CmdID = CmdID; pCmd->Para1 = Para1; pCmd->Para2 = Para2; pCmd->msDelay = msDelay; return true; } static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel, u8 *stage, u8 *step, u32 *delay) { struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; u32 PreCommonCmdCnt; u32 PostCommonCmdCnt; u32 RfDependCmdCnt; struct sw_chnl_cmd *CurrentCmd = NULL; u8 eRFPath; RT_TRACE(COMP_TRACE, "====>%s()====stage:%d, step:%d, channel:%d\n", __func__, *stage, *step, channel); if (!rtllib_legal_channel(priv->rtllib, channel)) { RT_TRACE(COMP_ERR, "=============>set to illegal channel:%d\n", channel); return true; } { PreCommonCmdCnt = 0; rtl8192_phy_SetSwChnlCmdArray(ieee->PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT, CmdID_SetTxPowerLevel, 0, 0, 0); rtl8192_phy_SetSwChnlCmdArray(ieee->PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT, CmdID_End, 0, 0, 0); PostCommonCmdCnt = 0; rtl8192_phy_SetSwChnlCmdArray(ieee->PostCommonCmd, PostCommonCmdCnt++, MAX_POSTCMD_CNT, CmdID_End, 0, 0, 0); RfDependCmdCnt = 0; switch (priv->rf_chip) { case RF_8225: if (!(channel >= 1 && channel <= 14)) { RT_TRACE(COMP_ERR, "illegal channel for Zebra " "8225: %d\n", channel); return false; } rtl8192_phy_SetSwChnlCmdArray(ieee->RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_RF_WriteReg, rZebra1_Channel, RF_CHANNEL_TABLE_ZEBRA[channel], 10); rtl8192_phy_SetSwChnlCmdArray(ieee->RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_End, 0, 0, 0); break; case RF_8256: if (!(channel >= 1 && channel <= 14)) { RT_TRACE(COMP_ERR, "illegal channel for Zebra" " 8256: %d\n", channel); return false; } rtl8192_phy_SetSwChnlCmdArray(ieee->RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_RF_WriteReg, rZebra1_Channel, channel, 10); rtl8192_phy_SetSwChnlCmdArray(ieee->RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_End, 0, 0, 0); break; case RF_8258: break; default: RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip); return false; break; } do { switch (*stage) { case 0: CurrentCmd = &ieee->PreCommonCmd[*step]; break; case 1: CurrentCmd = &ieee->RfDependCmd[*step]; break; case 2: CurrentCmd = &ieee->PostCommonCmd[*step]; break; } if (CurrentCmd && CurrentCmd->CmdID == CmdID_End) { if ((*stage) == 2) { return true; } else { (*stage)++; (*step) = 0; continue; } } if (!CurrentCmd) continue; switch (CurrentCmd->CmdID) { case CmdID_SetTxPowerLevel: if (priv->IC_Cut > (u8)VERSION_8190_BD) rtl8192_SetTxPowerLevel(dev, channel); break; case CmdID_WritePortUlong: write_nic_dword(dev, CurrentCmd->Para1, CurrentCmd->Para2); break; case CmdID_WritePortUshort: write_nic_word(dev, CurrentCmd->Para1, (u16)CurrentCmd->Para2); break; case CmdID_WritePortUchar: write_nic_byte(dev, CurrentCmd->Para1, (u8)CurrentCmd->Para2); break; case CmdID_RF_WriteReg: for (eRFPath = 0; eRFPath < priv->NumTotalRFPath; eRFPath++) rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path)eRFPath, CurrentCmd->Para1, bMask12Bits, CurrentCmd->Para2<<7); break; default: break; } break; } while (true); } /*for (Number of RF paths)*/ (*delay) = CurrentCmd->msDelay; (*step)++; return false; } static void rtl8192_phy_FinishSwChnlNow(struct net_device *dev, u8 channel) { struct r8192_priv *priv = rtllib_priv(dev); u32 delay = 0; while (!rtl8192_phy_SwChnlStepByStep(dev, channel, &priv->SwChnlStage, &priv->SwChnlStep, &delay)) { if (delay > 0) msleep(delay); if (IS_NIC_DOWN(priv)) break; } } void rtl8192_SwChnl_WorkItem(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); RT_TRACE(COMP_TRACE, "==> SwChnlCallback819xUsbWorkItem()\n"); RT_TRACE(COMP_TRACE, "=====>--%s(), set chan:%d, priv:%p\n", __func__, priv->chan, priv); rtl8192_phy_FinishSwChnlNow(dev , priv->chan); RT_TRACE(COMP_TRACE, "<== SwChnlCallback819xUsbWorkItem()\n"); } u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel) { struct r8192_priv *priv = rtllib_priv(dev); RT_TRACE(COMP_PHY, "=====>%s()\n", __func__); if (IS_NIC_DOWN(priv)) { RT_TRACE(COMP_ERR, "%s(): ERR !! driver is not up\n", __func__); return false; } if (priv->SwChnlInProgress) return false; switch (priv->rtllib->mode) { case WIRELESS_MODE_A: case WIRELESS_MODE_N_5G: if (channel <= 14) { RT_TRACE(COMP_ERR, "WIRELESS_MODE_A but channel<=14"); return false; } break; case WIRELESS_MODE_B: if (channel > 14) { RT_TRACE(COMP_ERR, "WIRELESS_MODE_B but channel>14"); return false; } break; case WIRELESS_MODE_G: case WIRELESS_MODE_N_24G: if (channel > 14) { RT_TRACE(COMP_ERR, "WIRELESS_MODE_G but channel>14"); return false; } break; } priv->SwChnlInProgress = true; if (channel == 0) channel = 1; priv->chan = channel; priv->SwChnlStage = 0; priv->SwChnlStep = 0; if (!IS_NIC_DOWN(priv)) rtl8192_SwChnl_WorkItem(dev); priv->SwChnlInProgress = false; return true; } static void CCK_Tx_Power_Track_BW_Switch_TSSI(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); switch (priv->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: priv->CCKPresentAttentuation = priv->CCKPresentAttentuation_20Mdefault + priv->CCKPresentAttentuation_difference; if (priv->CCKPresentAttentuation > (CCKTxBBGainTableLength-1)) priv->CCKPresentAttentuation = CCKTxBBGainTableLength-1; if (priv->CCKPresentAttentuation < 0) priv->CCKPresentAttentuation = 0; RT_TRACE(COMP_POWER_TRACKING, "20M, priv->CCKPresent" "Attentuation = %d\n", priv->CCKPresentAttentuation); if (priv->rtllib->current_network.channel == 14 && !priv->bcck_in_ch14) { priv->bcck_in_ch14 = true; dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } else if (priv->rtllib->current_network.channel != 14 && priv->bcck_in_ch14) { priv->bcck_in_ch14 = false; dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } else { dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } break; case HT_CHANNEL_WIDTH_20_40: priv->CCKPresentAttentuation = priv->CCKPresentAttentuation_40Mdefault + priv->CCKPresentAttentuation_difference; RT_TRACE(COMP_POWER_TRACKING, "40M, priv->CCKPresent" "Attentuation = %d\n", priv->CCKPresentAttentuation); if (priv->CCKPresentAttentuation > (CCKTxBBGainTableLength - 1)) priv->CCKPresentAttentuation = CCKTxBBGainTableLength-1; if (priv->CCKPresentAttentuation < 0) priv->CCKPresentAttentuation = 0; if (priv->rtllib->current_network.channel == 14 && !priv->bcck_in_ch14) { priv->bcck_in_ch14 = true; dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } else if (priv->rtllib->current_network.channel != 14 && priv->bcck_in_ch14) { priv->bcck_in_ch14 = false; dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } else { dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } break; } } static void CCK_Tx_Power_Track_BW_Switch_ThermalMeter(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->rtllib->current_network.channel == 14 && !priv->bcck_in_ch14) priv->bcck_in_ch14 = true; else if (priv->rtllib->current_network.channel != 14 && priv->bcck_in_ch14) priv->bcck_in_ch14 = false; switch (priv->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: if (priv->Record_CCK_20Mindex == 0) priv->Record_CCK_20Mindex = 6; priv->CCK_index = priv->Record_CCK_20Mindex; RT_TRACE(COMP_POWER_TRACKING, "20MHz, CCK_Tx_Power_Track_BW_" "Switch_ThermalMeter(),CCK_index = %d\n", priv->CCK_index); break; case HT_CHANNEL_WIDTH_20_40: priv->CCK_index = priv->Record_CCK_40Mindex; RT_TRACE(COMP_POWER_TRACKING, "40MHz, CCK_Tx_Power_Track_BW_" "Switch_ThermalMeter(), CCK_index = %d\n", priv->CCK_index); break; } dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } static void CCK_Tx_Power_Track_BW_Switch(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->IC_Cut >= IC_VersionCut_D) CCK_Tx_Power_Track_BW_Switch_TSSI(dev); else CCK_Tx_Power_Track_BW_Switch_ThermalMeter(dev); } void rtl8192_SetBWModeWorkItem(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); u8 regBwOpMode; RT_TRACE(COMP_SWBW, "==>rtl8192_SetBWModeWorkItem() Switch to %s " "bandwidth\n", priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ? "20MHz" : "40MHz") if (priv->rf_chip == RF_PSEUDO_11N) { priv->SetBWModeInProgress = false; return; } if (IS_NIC_DOWN(priv)) { RT_TRACE(COMP_ERR, "%s(): ERR!! driver is not up\n", __func__); return; } regBwOpMode = read_nic_byte(dev, BW_OPMODE); switch (priv->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: regBwOpMode |= BW_OPMODE_20MHZ; write_nic_byte(dev, BW_OPMODE, regBwOpMode); break; case HT_CHANNEL_WIDTH_20_40: regBwOpMode &= ~BW_OPMODE_20MHZ; write_nic_byte(dev, BW_OPMODE, regBwOpMode); break; default: RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown " "Bandwidth: %#X\n", priv->CurrentChannelBW); break; } switch (priv->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x0); rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x0); if (!priv->btxpower_tracking) { write_nic_dword(dev, rCCK0_TxFilter1, 0x1a1b0000); write_nic_dword(dev, rCCK0_TxFilter2, 0x090e1317); write_nic_dword(dev, rCCK0_DebugPort, 0x00000204); } else { CCK_Tx_Power_Track_BW_Switch(dev); } rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 1); break; case HT_CHANNEL_WIDTH_20_40: rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1); rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1); if (!priv->btxpower_tracking) { write_nic_dword(dev, rCCK0_TxFilter1, 0x35360000); write_nic_dword(dev, rCCK0_TxFilter2, 0x121c252e); write_nic_dword(dev, rCCK0_DebugPort, 0x00000409); } else { CCK_Tx_Power_Track_BW_Switch(dev); } rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand, (priv->nCur40MhzPrimeSC>>1)); rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00, priv->nCur40MhzPrimeSC); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0); break; default: RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown " "Bandwidth: %#X\n", priv->CurrentChannelBW); break; } switch (priv->rf_chip) { case RF_8225: break; case RF_8256: PHY_SetRF8256Bandwidth(dev, priv->CurrentChannelBW); break; case RF_8258: break; case RF_PSEUDO_11N: break; default: RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip); break; } atomic_dec(&(priv->rtllib->atm_swbw)); priv->SetBWModeInProgress = false; RT_TRACE(COMP_SWBW, "<==SetBWMode819xUsb()"); } void rtl8192_SetBWMode(struct net_device *dev, enum ht_channel_width Bandwidth, enum ht_extchnl_offset Offset) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->SetBWModeInProgress) return; atomic_inc(&(priv->rtllib->atm_swbw)); priv->SetBWModeInProgress = true; priv->CurrentChannelBW = Bandwidth; if (Offset == HT_EXTCHNL_OFFSET_LOWER) priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_UPPER; else if (Offset == HT_EXTCHNL_OFFSET_UPPER) priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_LOWER; else priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_DONT_CARE; rtl8192_SetBWModeWorkItem(dev); } void InitialGain819xPci(struct net_device *dev, u8 Operation) { #define SCAN_RX_INITIAL_GAIN 0x17 #define POWER_DETECTION_TH 0x08 struct r8192_priv *priv = rtllib_priv(dev); u32 BitMask; u8 initial_gain; if (!IS_NIC_DOWN(priv)) { switch (Operation) { case IG_Backup: RT_TRACE(COMP_SCAN, "IG_Backup, backup the initial" " gain.\n"); initial_gain = SCAN_RX_INITIAL_GAIN; BitMask = bMaskByte0; if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, BitMask); priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, BitMask); priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, BitMask); priv->initgain_backup.xdagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, BitMask); BitMask = bMaskByte2; priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, BitMask); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc50 is" " %x\n", priv->initgain_backup.xaagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc58 is" " %x\n", priv->initgain_backup.xbagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc60 is" " %x\n", priv->initgain_backup.xcagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc68 is" " %x\n", priv->initgain_backup.xdagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xa0a is" " %x\n", priv->initgain_backup.cca); RT_TRACE(COMP_SCAN, "Write scan initial gain = 0x%x\n", initial_gain); write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain); RT_TRACE(COMP_SCAN, "Write scan 0xa0a = 0x%x\n", POWER_DETECTION_TH); write_nic_byte(dev, 0xa0a, POWER_DETECTION_TH); break; case IG_Restore: RT_TRACE(COMP_SCAN, "IG_Restore, restore the initial " "gain.\n"); BitMask = 0x7f; if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, BitMask, (u32)priv->initgain_backup.xaagccore1); rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, BitMask, (u32)priv->initgain_backup.xbagccore1); rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, BitMask, (u32)priv->initgain_backup.xcagccore1); rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, BitMask, (u32)priv->initgain_backup.xdagccore1); BitMask = bMaskByte2; rtl8192_setBBreg(dev, rCCK0_CCA, BitMask, (u32)priv->initgain_backup.cca); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc50" " is %x\n", priv->initgain_backup.xaagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc58" " is %x\n", priv->initgain_backup.xbagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc60" " is %x\n", priv->initgain_backup.xcagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc68" " is %x\n", priv->initgain_backup.xdagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xa0a" " is %x\n", priv->initgain_backup.cca); rtl8192_phy_setTxPower(dev, priv->rtllib->current_network.channel); if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); break; default: RT_TRACE(COMP_SCAN, "Unknown IG Operation.\n"); break; } } } void PHY_SetRtl8192eRfOff(struct net_device *dev) { rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x0); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x18, 0x0); rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0xf, 0x0); rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0xf, 0x0); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x60, 0x0); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x4, 0x0); write_nic_byte(dev, ANAPAR_FOR_8192PciE, 0x07); } static bool SetRFPowerState8190(struct net_device *dev, enum rt_rf_power_state eRFPowerState) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) (&(priv->rtllib->PowerSaveControl)); bool bResult = true; u8 i = 0, QueueID = 0; struct rtl8192_tx_ring *ring = NULL; if (priv->SetRFPowerStateInProgress == true) return false; RT_TRACE(COMP_PS, "===========> SetRFPowerState8190()!\n"); priv->SetRFPowerStateInProgress = true; switch (priv->rf_chip) { case RF_8256: switch (eRFPowerState) { case eRfOn: RT_TRACE(COMP_PS, "SetRFPowerState8190() eRfOn!\n"); if ((priv->rtllib->eRFPowerState == eRfOff) && RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) { bool rtstatus = true; u32 InitilizeCount = 3; do { InitilizeCount--; priv->RegRfOff = false; rtstatus = NicIFEnableNIC(dev); } while ((rtstatus != true) && (InitilizeCount > 0)); if (rtstatus != true) { RT_TRACE(COMP_ERR, "%s():Initialize Ada" "pter fail,return\n", __func__); priv->SetRFPowerStateInProgress = false; return false; } RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC); } else { write_nic_byte(dev, ANAPAR, 0x37); mdelay(1); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x4, 0x1); priv->bHwRfOffAction = 0; rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x1); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x18, 0x3); rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x3, 0x3); rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x3, 0x3); rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x60, 0x3); } break; case eRfSleep: if (priv->rtllib->eRFPowerState == eRfOff) break; for (QueueID = 0, i = 0; QueueID < MAX_TX_QUEUE; ) { ring = &priv->tx_ring[QueueID]; if (skb_queue_len(&ring->queue) == 0) { QueueID++; continue; } else { RT_TRACE((COMP_POWER|COMP_RF), "eRf Off" "/Sleep: %d times TcbBusyQueue" "[%d] !=0 before doze!\n", (i+1), QueueID); udelay(10); i++; } if (i >= MAX_DOZE_WAITING_TIMES_9x) { RT_TRACE(COMP_POWER, "\n\n\n TimeOut!! " "SetRFPowerState8190(): eRfOff" ": %d times TcbBusyQueue[%d] " "!= 0 !!!\n", MAX_DOZE_WAITING_TIMES_9x, QueueID); break; } } PHY_SetRtl8192eRfOff(dev); break; case eRfOff: RT_TRACE(COMP_PS, "SetRFPowerState8190() eRfOff/" "Sleep !\n"); for (QueueID = 0, i = 0; QueueID < MAX_TX_QUEUE; ) { ring = &priv->tx_ring[QueueID]; if (skb_queue_len(&ring->queue) == 0) { QueueID++; continue; } else { RT_TRACE(COMP_POWER, "eRf Off/Sleep: %d" " times TcbBusyQueue[%d] !=0 b" "efore doze!\n", (i+1), QueueID); udelay(10); i++; } if (i >= MAX_DOZE_WAITING_TIMES_9x) { RT_TRACE(COMP_POWER, "\n\n\n SetZebra: " "RFPowerState8185B(): eRfOff:" " %d times TcbBusyQueue[%d] " "!= 0 !!!\n", MAX_DOZE_WAITING_TIMES_9x, QueueID); break; } } if (pPSC->RegRfPsLevel & RT_RF_OFF_LEVL_HALT_NIC && !RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) { NicIFDisableNIC(dev); RT_SET_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC); } else if (!(pPSC->RegRfPsLevel & RT_RF_OFF_LEVL_HALT_NIC)) { PHY_SetRtl8192eRfOff(dev); } break; default: bResult = false; RT_TRACE(COMP_ERR, "SetRFPowerState8190(): unknow state" " to set: 0x%X!!!\n", eRFPowerState); break; } break; default: RT_TRACE(COMP_ERR, "SetRFPowerState8190(): Unknown RF type\n"); break; } if (bResult) { priv->rtllib->eRFPowerState = eRFPowerState; switch (priv->rf_chip) { case RF_8256: break; default: RT_TRACE(COMP_ERR, "SetRFPowerState8190(): Unknown " "RF type\n"); break; } } priv->SetRFPowerStateInProgress = false; RT_TRACE(COMP_PS, "<=========== SetRFPowerState8190() bResult = %d!\n", bResult); return bResult; } bool SetRFPowerState(struct net_device *dev, enum rt_rf_power_state eRFPowerState) { struct r8192_priv *priv = rtllib_priv(dev); bool bResult = false; RT_TRACE(COMP_PS, "---------> SetRFPowerState(): eRFPowerState(%d)\n", eRFPowerState); if (eRFPowerState == priv->rtllib->eRFPowerState && priv->bHwRfOffAction == 0) { RT_TRACE(COMP_PS, "<--------- SetRFPowerState(): discard the " "request for eRFPowerState(%d) is the same.\n", eRFPowerState); return bResult; } bResult = SetRFPowerState8190(dev, eRFPowerState); RT_TRACE(COMP_PS, "<--------- SetRFPowerState(): bResult(%d)\n", bResult); return bResult; } void PHY_ScanOperationBackup8192(struct net_device *dev, u8 Operation) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->up) { switch (Operation) { case SCAN_OPT_BACKUP: priv->rtllib->InitialGainHandler(dev, IG_Backup); break; case SCAN_OPT_RESTORE: priv->rtllib->InitialGainHandler(dev, IG_Restore); break; default: RT_TRACE(COMP_SCAN, "Unknown Scan Backup Operation.\n"); break; } } }
gpl-2.0
raden/cempaka-kernel
drivers/media/video/cx231xx/cx231xx-core.c
4899
44940
/* cx231xx-core.c - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> Based on em28xx driver This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/vmalloc.h> #include <media/v4l2-common.h> #include <media/tuner.h> #include "cx231xx.h" #include "cx231xx-reg.h" /* #define ENABLE_DEBUG_ISOC_FRAMES */ static unsigned int core_debug; module_param(core_debug, int, 0644); MODULE_PARM_DESC(core_debug, "enable debug messages [core]"); #define cx231xx_coredbg(fmt, arg...) do {\ if (core_debug) \ printk(KERN_INFO "%s %s :"fmt, \ dev->name, __func__ , ##arg); } while (0) static unsigned int reg_debug; module_param(reg_debug, int, 0644); MODULE_PARM_DESC(reg_debug, "enable debug messages [URB reg]"); static int alt = CX231XX_PINOUT; module_param(alt, int, 0644); MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint"); #define cx231xx_isocdbg(fmt, arg...) do {\ if (core_debug) \ printk(KERN_INFO "%s %s :"fmt, \ dev->name, __func__ , ##arg); } while (0) /***************************************************************** * Device control list functions * ******************************************************************/ LIST_HEAD(cx231xx_devlist); static DEFINE_MUTEX(cx231xx_devlist_mutex); /* * cx231xx_realease_resources() * unregisters the v4l2,i2c and usb devices * called when the device gets disconected or at module unload */ void cx231xx_remove_from_devlist(struct cx231xx *dev) { if (dev == NULL) return; if (dev->udev == NULL) return; if (atomic_read(&dev->devlist_count) > 0) { mutex_lock(&cx231xx_devlist_mutex); list_del(&dev->devlist); atomic_dec(&dev->devlist_count); mutex_unlock(&cx231xx_devlist_mutex); } }; void cx231xx_add_into_devlist(struct cx231xx *dev) { mutex_lock(&cx231xx_devlist_mutex); list_add_tail(&dev->devlist, &cx231xx_devlist); atomic_inc(&dev->devlist_count); mutex_unlock(&cx231xx_devlist_mutex); }; static LIST_HEAD(cx231xx_extension_devlist); int cx231xx_register_extension(struct cx231xx_ops *ops) { struct cx231xx *dev = NULL; mutex_lock(&cx231xx_devlist_mutex); list_add_tail(&ops->next, &cx231xx_extension_devlist); list_for_each_entry(dev, &cx231xx_devlist, devlist) ops->init(dev); printk(KERN_INFO DRIVER_NAME ": %s initialized\n", ops->name); mutex_unlock(&cx231xx_devlist_mutex); return 0; } EXPORT_SYMBOL(cx231xx_register_extension); void cx231xx_unregister_extension(struct cx231xx_ops *ops) { struct cx231xx *dev = NULL; mutex_lock(&cx231xx_devlist_mutex); list_for_each_entry(dev, &cx231xx_devlist, devlist) ops->fini(dev); printk(KERN_INFO DRIVER_NAME ": %s removed\n", ops->name); list_del(&ops->next); mutex_unlock(&cx231xx_devlist_mutex); } EXPORT_SYMBOL(cx231xx_unregister_extension); void cx231xx_init_extension(struct cx231xx *dev) { struct cx231xx_ops *ops = NULL; mutex_lock(&cx231xx_devlist_mutex); if (!list_empty(&cx231xx_extension_devlist)) { list_for_each_entry(ops, &cx231xx_extension_devlist, next) { if (ops->init) ops->init(dev); } } mutex_unlock(&cx231xx_devlist_mutex); } void cx231xx_close_extension(struct cx231xx *dev) { struct cx231xx_ops *ops = NULL; mutex_lock(&cx231xx_devlist_mutex); if (!list_empty(&cx231xx_extension_devlist)) { list_for_each_entry(ops, &cx231xx_extension_devlist, next) { if (ops->fini) ops->fini(dev); } } mutex_unlock(&cx231xx_devlist_mutex); } /**************************************************************** * U S B related functions * *****************************************************************/ int cx231xx_send_usb_command(struct cx231xx_i2c *i2c_bus, struct cx231xx_i2c_xfer_data *req_data) { int status = 0; struct cx231xx *dev = i2c_bus->dev; struct VENDOR_REQUEST_IN ven_req; u8 saddr_len = 0; u8 _i2c_period = 0; u8 _i2c_nostop = 0; u8 _i2c_reserve = 0; if (dev->state & DEV_DISCONNECTED) return -ENODEV; /* Get the I2C period, nostop and reserve parameters */ _i2c_period = i2c_bus->i2c_period; _i2c_nostop = i2c_bus->i2c_nostop; _i2c_reserve = i2c_bus->i2c_reserve; saddr_len = req_data->saddr_len; /* Set wValue */ if (saddr_len == 1) /* need check saddr_len == 0 */ ven_req.wValue = req_data-> dev_addr << 9 | _i2c_period << 4 | saddr_len << 2 | _i2c_nostop << 1 | I2C_SYNC | _i2c_reserve << 6; else ven_req.wValue = req_data-> dev_addr << 9 | _i2c_period << 4 | saddr_len << 2 | _i2c_nostop << 1 | I2C_SYNC | _i2c_reserve << 6; /* set channel number */ if (req_data->direction & I2C_M_RD) { /* channel number, for read,spec required channel_num +4 */ ven_req.bRequest = i2c_bus->nr + 4; } else ven_req.bRequest = i2c_bus->nr; /* channel number, */ /* set index value */ switch (saddr_len) { case 0: ven_req.wIndex = 0; /* need check */ break; case 1: ven_req.wIndex = (req_data->saddr_dat & 0xff); break; case 2: ven_req.wIndex = req_data->saddr_dat; break; } /* set wLength value */ ven_req.wLength = req_data->buf_size; /* set bData value */ ven_req.bData = 0; /* set the direction */ if (req_data->direction) { ven_req.direction = USB_DIR_IN; memset(req_data->p_buffer, 0x00, ven_req.wLength); } else ven_req.direction = USB_DIR_OUT; /* set the buffer for read / write */ ven_req.pBuff = req_data->p_buffer; /* call common vendor command request */ status = cx231xx_send_vendor_cmd(dev, &ven_req); if (status < 0) { cx231xx_info ("UsbInterface::sendCommand, failed with status -%d\n", status); } return status; } EXPORT_SYMBOL_GPL(cx231xx_send_usb_command); /* * Sends/Receives URB control messages, assuring to use a kalloced buffer * for all operations (dev->urb_buf), to avoid using stacked buffers, as * they aren't safe for usage with USB, due to DMA restrictions. * Also implements the debug code for control URB's. */ static int __usb_control_msg(struct cx231xx *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { int rc, i; if (reg_debug) { printk(KERN_DEBUG "%s: (pipe 0x%08x): " "%s: %02x %02x %02x %02x %02x %02x %02x %02x ", dev->name, pipe, (requesttype & USB_DIR_IN) ? "IN" : "OUT", requesttype, request, value & 0xff, value >> 8, index & 0xff, index >> 8, size & 0xff, size >> 8); if (!(requesttype & USB_DIR_IN)) { printk(KERN_CONT ">>>"); for (i = 0; i < size; i++) printk(KERN_CONT " %02x", ((unsigned char *)data)[i]); } } /* Do the real call to usb_control_msg */ mutex_lock(&dev->ctrl_urb_lock); if (!(requesttype & USB_DIR_IN) && size) memcpy(dev->urb_buf, data, size); rc = usb_control_msg(dev->udev, pipe, request, requesttype, value, index, dev->urb_buf, size, timeout); if ((requesttype & USB_DIR_IN) && size) memcpy(data, dev->urb_buf, size); mutex_unlock(&dev->ctrl_urb_lock); if (reg_debug) { if (unlikely(rc < 0)) { printk(KERN_CONT "FAILED!\n"); return rc; } if ((requesttype & USB_DIR_IN)) { printk(KERN_CONT "<<<"); for (i = 0; i < size; i++) printk(KERN_CONT " %02x", ((unsigned char *)data)[i]); } printk(KERN_CONT "\n"); } return rc; } /* * cx231xx_read_ctrl_reg() * reads data from the usb device specifying bRequest and wValue */ int cx231xx_read_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf, int len) { u8 val = 0; int ret; int pipe = usb_rcvctrlpipe(dev->udev, 0); if (dev->state & DEV_DISCONNECTED) return -ENODEV; if (len > URB_MAX_CTRL_SIZE) return -EINVAL; switch (len) { case 1: val = ENABLE_ONE_BYTE; break; case 2: val = ENABLE_TWE_BYTE; break; case 3: val = ENABLE_THREE_BYTE; break; case 4: val = ENABLE_FOUR_BYTE; break; default: val = 0xFF; /* invalid option */ } if (val == 0xFF) return -EINVAL; ret = __usb_control_msg(dev, pipe, req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, reg, buf, len, HZ); return ret; } int cx231xx_send_vendor_cmd(struct cx231xx *dev, struct VENDOR_REQUEST_IN *ven_req) { int ret; int pipe = 0; int unsend_size = 0; u8 *pdata; if (dev->state & DEV_DISCONNECTED) return -ENODEV; if ((ven_req->wLength > URB_MAX_CTRL_SIZE)) return -EINVAL; if (ven_req->direction) pipe = usb_rcvctrlpipe(dev->udev, 0); else pipe = usb_sndctrlpipe(dev->udev, 0); /* * If the cx23102 read more than 4 bytes with i2c bus, * need chop to 4 byte per request */ if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) || (ven_req->bRequest == 0x5) || (ven_req->bRequest == 0x6))) { unsend_size = 0; pdata = ven_req->pBuff; unsend_size = ven_req->wLength; /* the first package */ ven_req->wValue = ven_req->wValue & 0xFFFB; ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x2; ret = __usb_control_msg(dev, pipe, ven_req->bRequest, ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ven_req->wValue, ven_req->wIndex, pdata, 0x0004, HZ); unsend_size = unsend_size - 4; /* the middle package */ ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x42; while (unsend_size - 4 > 0) { pdata = pdata + 4; ret = __usb_control_msg(dev, pipe, ven_req->bRequest, ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ven_req->wValue, ven_req->wIndex, pdata, 0x0004, HZ); unsend_size = unsend_size - 4; } /* the last package */ ven_req->wValue = (ven_req->wValue & 0xFFBD) | 0x40; pdata = pdata + 4; ret = __usb_control_msg(dev, pipe, ven_req->bRequest, ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ven_req->wValue, ven_req->wIndex, pdata, unsend_size, HZ); } else { ret = __usb_control_msg(dev, pipe, ven_req->bRequest, ven_req->direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE, ven_req->wValue, ven_req->wIndex, ven_req->pBuff, ven_req->wLength, HZ); } return ret; } /* * cx231xx_write_ctrl_reg() * sends data to the usb device, specifying bRequest */ int cx231xx_write_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf, int len) { u8 val = 0; int ret; int pipe = usb_sndctrlpipe(dev->udev, 0); if (dev->state & DEV_DISCONNECTED) return -ENODEV; if ((len < 1) || (len > URB_MAX_CTRL_SIZE)) return -EINVAL; switch (len) { case 1: val = ENABLE_ONE_BYTE; break; case 2: val = ENABLE_TWE_BYTE; break; case 3: val = ENABLE_THREE_BYTE; break; case 4: val = ENABLE_FOUR_BYTE; break; default: val = 0xFF; /* invalid option */ } if (val == 0xFF) return -EINVAL; if (reg_debug) { int byte; cx231xx_isocdbg("(pipe 0x%08x): " "OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>", pipe, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, req, 0, val, reg & 0xff, reg >> 8, len & 0xff, len >> 8); for (byte = 0; byte < len; byte++) cx231xx_isocdbg(" %02x", (unsigned char)buf[byte]); cx231xx_isocdbg("\n"); } ret = __usb_control_msg(dev, pipe, req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, reg, buf, len, HZ); return ret; } /**************************************************************** * USB Alternate Setting functions * *****************************************************************/ int cx231xx_set_video_alternate(struct cx231xx *dev) { int errCode, prev_alt = dev->video_mode.alt; unsigned int min_pkt_size = dev->width * 2 + 4; u32 usb_interface_index = 0; /* When image size is bigger than a certain value, the frame size should be increased, otherwise, only green screen will be received. */ if (dev->width * 2 * dev->height > 720 * 240 * 2) min_pkt_size *= 2; if (dev->width > 360) { /* resolutions: 720,704,640 */ dev->video_mode.alt = 3; } else if (dev->width > 180) { /* resolutions: 360,352,320,240 */ dev->video_mode.alt = 2; } else if (dev->width > 0) { /* resolutions: 180,176,160,128,88 */ dev->video_mode.alt = 1; } else { /* Change to alt0 BULK to release USB bandwidth */ dev->video_mode.alt = 0; } if (dev->USE_ISO == 0) dev->video_mode.alt = 0; cx231xx_coredbg("dev->video_mode.alt= %d\n", dev->video_mode.alt); /* Get the correct video interface Index */ usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. video_index + 1; if (dev->video_mode.alt != prev_alt) { cx231xx_coredbg("minimum isoc packet size: %u (alt=%d)\n", min_pkt_size, dev->video_mode.alt); if (dev->video_mode.alt_max_pkt_size != NULL) dev->video_mode.max_pkt_size = dev->video_mode.alt_max_pkt_size[dev->video_mode.alt]; cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n", dev->video_mode.alt, dev->video_mode.max_pkt_size); errCode = usb_set_interface(dev->udev, usb_interface_index, dev->video_mode.alt); if (errCode < 0) { cx231xx_errdev ("cannot change alt number to %d (error=%i)\n", dev->video_mode.alt, errCode); return errCode; } } return 0; } int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt) { int status = 0; u32 usb_interface_index = 0; u32 max_pkt_size = 0; switch (index) { case INDEX_TS1: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. ts1_index + 1; dev->ts1_mode.alt = alt; if (dev->ts1_mode.alt_max_pkt_size != NULL) max_pkt_size = dev->ts1_mode.max_pkt_size = dev->ts1_mode.alt_max_pkt_size[dev->ts1_mode.alt]; break; case INDEX_TS2: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. ts2_index + 1; break; case INDEX_AUDIO: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. audio_index + 1; dev->adev.alt = alt; if (dev->adev.alt_max_pkt_size != NULL) max_pkt_size = dev->adev.max_pkt_size = dev->adev.alt_max_pkt_size[dev->adev.alt]; break; case INDEX_VIDEO: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. video_index + 1; dev->video_mode.alt = alt; if (dev->video_mode.alt_max_pkt_size != NULL) max_pkt_size = dev->video_mode.max_pkt_size = dev->video_mode.alt_max_pkt_size[dev->video_mode. alt]; break; case INDEX_VANC: if (dev->board.no_alt_vanc) return 0; usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. vanc_index + 1; dev->vbi_mode.alt = alt; if (dev->vbi_mode.alt_max_pkt_size != NULL) max_pkt_size = dev->vbi_mode.max_pkt_size = dev->vbi_mode.alt_max_pkt_size[dev->vbi_mode.alt]; break; case INDEX_HANC: usb_interface_index = dev->current_pcb_config.hs_config_info[0].interface_info. hanc_index + 1; dev->sliced_cc_mode.alt = alt; if (dev->sliced_cc_mode.alt_max_pkt_size != NULL) max_pkt_size = dev->sliced_cc_mode.max_pkt_size = dev->sliced_cc_mode.alt_max_pkt_size[dev-> sliced_cc_mode. alt]; break; default: break; } if (alt > 0 && max_pkt_size == 0) { cx231xx_errdev ("can't change interface %d alt no. to %d: Max. Pkt size = 0\n", usb_interface_index, alt); /*To workaround error number=-71 on EP0 for videograbber, need add following codes.*/ if (dev->board.no_alt_vanc) return -1; } cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u," "Interface = %d\n", alt, max_pkt_size, usb_interface_index); if (usb_interface_index > 0) { status = usb_set_interface(dev->udev, usb_interface_index, alt); if (status < 0) { cx231xx_errdev ("can't change interface %d alt no. to %d (err=%i)\n", usb_interface_index, alt, status); return status; } } return status; } EXPORT_SYMBOL_GPL(cx231xx_set_alt_setting); int cx231xx_gpio_set(struct cx231xx *dev, struct cx231xx_reg_seq *gpio) { int rc = 0; if (!gpio) return rc; /* Send GPIO reset sequences specified at board entry */ while (gpio->sleep >= 0) { rc = cx231xx_set_gpio_value(dev, gpio->bit, gpio->val); if (rc < 0) return rc; if (gpio->sleep > 0) msleep(gpio->sleep); gpio++; } return rc; } int cx231xx_demod_reset(struct cx231xx *dev) { u8 status = 0; u8 value[4] = { 0, 0, 0, 0 }; status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); cx231xx_coredbg("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0], value[1], value[2], value[3]); cx231xx_coredbg("Enter cx231xx_demod_reset()\n"); value[1] = (u8) 0x3; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(10); value[1] = (u8) 0x0; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(10); value[1] = (u8) 0x3; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(10); status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); cx231xx_coredbg("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0], value[1], value[2], value[3]); return status; } EXPORT_SYMBOL_GPL(cx231xx_demod_reset); int is_fw_load(struct cx231xx *dev) { return cx231xx_check_fw(dev); } EXPORT_SYMBOL_GPL(is_fw_load); int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode) { int errCode = 0; if (dev->mode == set_mode) return 0; if (set_mode == CX231XX_SUSPEND) { /* Set the chip in power saving mode */ dev->mode = set_mode; } /* Resource is locked */ if (dev->mode != CX231XX_SUSPEND) return -EINVAL; dev->mode = set_mode; if (dev->mode == CX231XX_DIGITAL_MODE)/* Set Digital power mode */ { /* set AGC mode to Digital */ switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); break; case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1); break; case CX231XX_BOARD_HAUPPAUGE_EXETER: errCode = cx231xx_set_power_mode(dev, POLARIS_AVMODE_DIGITAL); break; default: break; } } else/* Set Analog Power mode */ { /* set AGC mode to Analog */ switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1); break; case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); break; default: break; } } return 0; } EXPORT_SYMBOL_GPL(cx231xx_set_mode); int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size) { int errCode = 0; int actlen, ret = -ENOMEM; u32 *buffer; buffer = kzalloc(4096, GFP_KERNEL); if (buffer == NULL) { cx231xx_info("out of mem\n"); return -ENOMEM; } memcpy(&buffer[0], firmware, 4096); ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 5), buffer, 4096, &actlen, 2000); if (ret) cx231xx_info("bulk message failed: %d (%d/%d)", ret, size, actlen); else { errCode = actlen != size ? -1 : 0; } kfree(buffer); return 0; } /***************************************************************** * URB Streaming functions * ******************************************************************/ /* * IRQ callback, called by URB callback */ static void cx231xx_isoc_irq_callback(struct urb *urb) { struct cx231xx_dmaqueue *dma_q = urb->context; struct cx231xx_video_mode *vmode = container_of(dma_q, struct cx231xx_video_mode, vidq); struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode); int rc, i; switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ cx231xx_isocdbg("urb completition error %d.\n", urb->status); break; } /* Copy data from URB */ spin_lock(&dev->video_mode.slock); rc = dev->video_mode.isoc_ctl.isoc_copy(dev, urb); spin_unlock(&dev->video_mode.slock); /* Reset urb buffers */ for (i = 0; i < urb->number_of_packets; i++) { urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = 0; } urb->status = 0; urb->status = usb_submit_urb(urb, GFP_ATOMIC); if (urb->status) { cx231xx_isocdbg("urb resubmit failed (error=%i)\n", urb->status); } } /***************************************************************** * URB Streaming functions * ******************************************************************/ /* * IRQ callback, called by URB callback */ static void cx231xx_bulk_irq_callback(struct urb *urb) { struct cx231xx_dmaqueue *dma_q = urb->context; struct cx231xx_video_mode *vmode = container_of(dma_q, struct cx231xx_video_mode, vidq); struct cx231xx *dev = container_of(vmode, struct cx231xx, video_mode); int rc; switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ cx231xx_isocdbg("urb completition error %d.\n", urb->status); break; } /* Copy data from URB */ spin_lock(&dev->video_mode.slock); rc = dev->video_mode.bulk_ctl.bulk_copy(dev, urb); spin_unlock(&dev->video_mode.slock); /* Reset urb buffers */ urb->status = 0; urb->status = usb_submit_urb(urb, GFP_ATOMIC); if (urb->status) { cx231xx_isocdbg("urb resubmit failed (error=%i)\n", urb->status); } } /* * Stop and Deallocate URBs */ void cx231xx_uninit_isoc(struct cx231xx *dev) { struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq; struct urb *urb; int i; cx231xx_isocdbg("cx231xx: called cx231xx_uninit_isoc\n"); dev->video_mode.isoc_ctl.nfields = -1; for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) { urb = dev->video_mode.isoc_ctl.urb[i]; if (urb) { if (!irqs_disabled()) usb_kill_urb(urb); else usb_unlink_urb(urb); if (dev->video_mode.isoc_ctl.transfer_buffer[i]) { usb_free_coherent(dev->udev, urb->transfer_buffer_length, dev->video_mode.isoc_ctl. transfer_buffer[i], urb->transfer_dma); } usb_free_urb(urb); dev->video_mode.isoc_ctl.urb[i] = NULL; } dev->video_mode.isoc_ctl.transfer_buffer[i] = NULL; } kfree(dev->video_mode.isoc_ctl.urb); kfree(dev->video_mode.isoc_ctl.transfer_buffer); kfree(dma_q->p_left_data); dev->video_mode.isoc_ctl.urb = NULL; dev->video_mode.isoc_ctl.transfer_buffer = NULL; dev->video_mode.isoc_ctl.num_bufs = 0; dma_q->p_left_data = NULL; if (dev->mode_tv == 0) cx231xx_capture_start(dev, 0, Raw_Video); else cx231xx_capture_start(dev, 0, TS1_serial_mode); } EXPORT_SYMBOL_GPL(cx231xx_uninit_isoc); /* * Stop and Deallocate URBs */ void cx231xx_uninit_bulk(struct cx231xx *dev) { struct urb *urb; int i; cx231xx_isocdbg("cx231xx: called cx231xx_uninit_bulk\n"); dev->video_mode.bulk_ctl.nfields = -1; for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) { urb = dev->video_mode.bulk_ctl.urb[i]; if (urb) { if (!irqs_disabled()) usb_kill_urb(urb); else usb_unlink_urb(urb); if (dev->video_mode.bulk_ctl.transfer_buffer[i]) { usb_free_coherent(dev->udev, urb->transfer_buffer_length, dev->video_mode.isoc_ctl. transfer_buffer[i], urb->transfer_dma); } usb_free_urb(urb); dev->video_mode.bulk_ctl.urb[i] = NULL; } dev->video_mode.bulk_ctl.transfer_buffer[i] = NULL; } kfree(dev->video_mode.bulk_ctl.urb); kfree(dev->video_mode.bulk_ctl.transfer_buffer); dev->video_mode.bulk_ctl.urb = NULL; dev->video_mode.bulk_ctl.transfer_buffer = NULL; dev->video_mode.bulk_ctl.num_bufs = 0; if (dev->mode_tv == 0) cx231xx_capture_start(dev, 0, Raw_Video); else cx231xx_capture_start(dev, 0, TS1_serial_mode); } EXPORT_SYMBOL_GPL(cx231xx_uninit_bulk); /* * Allocate URBs and start IRQ */ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets, int num_bufs, int max_pkt_size, int (*isoc_copy) (struct cx231xx *dev, struct urb *urb)) { struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq; int i; int sb_size, pipe; struct urb *urb; int j, k; int rc; /* De-allocates all pending stuff */ cx231xx_uninit_isoc(dev); dma_q->p_left_data = kzalloc(4096, GFP_KERNEL); if (dma_q->p_left_data == NULL) { cx231xx_info("out of mem\n"); return -ENOMEM; } dev->video_mode.isoc_ctl.isoc_copy = isoc_copy; dev->video_mode.isoc_ctl.num_bufs = num_bufs; dma_q->pos = 0; dma_q->is_partial_line = 0; dma_q->last_sav = 0; dma_q->current_field = -1; dma_q->field1_done = 0; dma_q->lines_per_field = dev->height / 2; dma_q->bytes_left_in_line = dev->width << 1; dma_q->lines_completed = 0; dma_q->mpeg_buffer_done = 0; dma_q->left_data_count = 0; dma_q->mpeg_buffer_completed = 0; dma_q->add_ps_package_head = CX231XX_NEED_ADD_PS_PACKAGE_HEAD; dma_q->ps_head[0] = 0x00; dma_q->ps_head[1] = 0x00; dma_q->ps_head[2] = 0x01; dma_q->ps_head[3] = 0xBA; for (i = 0; i < 8; i++) dma_q->partial_buf[i] = 0; dev->video_mode.isoc_ctl.urb = kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); if (!dev->video_mode.isoc_ctl.urb) { cx231xx_errdev("cannot alloc memory for usb buffers\n"); return -ENOMEM; } dev->video_mode.isoc_ctl.transfer_buffer = kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); if (!dev->video_mode.isoc_ctl.transfer_buffer) { cx231xx_errdev("cannot allocate memory for usbtransfer\n"); kfree(dev->video_mode.isoc_ctl.urb); return -ENOMEM; } dev->video_mode.isoc_ctl.max_pkt_size = max_pkt_size; dev->video_mode.isoc_ctl.buf = NULL; sb_size = max_packets * dev->video_mode.isoc_ctl.max_pkt_size; if (dev->mode_tv == 1) dev->video_mode.end_point_addr = 0x81; else dev->video_mode.end_point_addr = 0x84; /* allocate urbs and transfer buffers */ for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) { urb = usb_alloc_urb(max_packets, GFP_KERNEL); if (!urb) { cx231xx_err("cannot alloc isoc_ctl.urb %i\n", i); cx231xx_uninit_isoc(dev); return -ENOMEM; } dev->video_mode.isoc_ctl.urb[i] = urb; dev->video_mode.isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL, &urb->transfer_dma); if (!dev->video_mode.isoc_ctl.transfer_buffer[i]) { cx231xx_err("unable to allocate %i bytes for transfer" " buffer %i%s\n", sb_size, i, in_interrupt() ? " while in int" : ""); cx231xx_uninit_isoc(dev); return -ENOMEM; } memset(dev->video_mode.isoc_ctl.transfer_buffer[i], 0, sb_size); pipe = usb_rcvisocpipe(dev->udev, dev->video_mode.end_point_addr); usb_fill_int_urb(urb, dev->udev, pipe, dev->video_mode.isoc_ctl.transfer_buffer[i], sb_size, cx231xx_isoc_irq_callback, dma_q, 1); urb->number_of_packets = max_packets; urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; k = 0; for (j = 0; j < max_packets; j++) { urb->iso_frame_desc[j].offset = k; urb->iso_frame_desc[j].length = dev->video_mode.isoc_ctl.max_pkt_size; k += dev->video_mode.isoc_ctl.max_pkt_size; } } init_waitqueue_head(&dma_q->wq); /* submit urbs and enables IRQ */ for (i = 0; i < dev->video_mode.isoc_ctl.num_bufs; i++) { rc = usb_submit_urb(dev->video_mode.isoc_ctl.urb[i], GFP_ATOMIC); if (rc) { cx231xx_err("submit of urb %i failed (error=%i)\n", i, rc); cx231xx_uninit_isoc(dev); return rc; } } if (dev->mode_tv == 0) cx231xx_capture_start(dev, 1, Raw_Video); else cx231xx_capture_start(dev, 1, TS1_serial_mode); return 0; } EXPORT_SYMBOL_GPL(cx231xx_init_isoc); /* * Allocate URBs and start IRQ */ int cx231xx_init_bulk(struct cx231xx *dev, int max_packets, int num_bufs, int max_pkt_size, int (*bulk_copy) (struct cx231xx *dev, struct urb *urb)) { struct cx231xx_dmaqueue *dma_q = &dev->video_mode.vidq; int i; int sb_size, pipe; struct urb *urb; int rc; dev->video_input = dev->video_input > 2 ? 2 : dev->video_input; cx231xx_coredbg("Setting Video mux to %d\n", dev->video_input); video_mux(dev, dev->video_input); /* De-allocates all pending stuff */ cx231xx_uninit_bulk(dev); dev->video_mode.bulk_ctl.bulk_copy = bulk_copy; dev->video_mode.bulk_ctl.num_bufs = num_bufs; dma_q->pos = 0; dma_q->is_partial_line = 0; dma_q->last_sav = 0; dma_q->current_field = -1; dma_q->field1_done = 0; dma_q->lines_per_field = dev->height / 2; dma_q->bytes_left_in_line = dev->width << 1; dma_q->lines_completed = 0; dma_q->mpeg_buffer_done = 0; dma_q->left_data_count = 0; dma_q->mpeg_buffer_completed = 0; dma_q->ps_head[0] = 0x00; dma_q->ps_head[1] = 0x00; dma_q->ps_head[2] = 0x01; dma_q->ps_head[3] = 0xBA; for (i = 0; i < 8; i++) dma_q->partial_buf[i] = 0; dev->video_mode.bulk_ctl.urb = kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); if (!dev->video_mode.bulk_ctl.urb) { cx231xx_errdev("cannot alloc memory for usb buffers\n"); return -ENOMEM; } dev->video_mode.bulk_ctl.transfer_buffer = kzalloc(sizeof(void *) * num_bufs, GFP_KERNEL); if (!dev->video_mode.bulk_ctl.transfer_buffer) { cx231xx_errdev("cannot allocate memory for usbtransfer\n"); kfree(dev->video_mode.bulk_ctl.urb); return -ENOMEM; } dev->video_mode.bulk_ctl.max_pkt_size = max_pkt_size; dev->video_mode.bulk_ctl.buf = NULL; sb_size = max_packets * dev->video_mode.bulk_ctl.max_pkt_size; if (dev->mode_tv == 1) dev->video_mode.end_point_addr = 0x81; else dev->video_mode.end_point_addr = 0x84; /* allocate urbs and transfer buffers */ for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) { urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { cx231xx_err("cannot alloc bulk_ctl.urb %i\n", i); cx231xx_uninit_bulk(dev); return -ENOMEM; } dev->video_mode.bulk_ctl.urb[i] = urb; urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; dev->video_mode.bulk_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL, &urb->transfer_dma); if (!dev->video_mode.bulk_ctl.transfer_buffer[i]) { cx231xx_err("unable to allocate %i bytes for transfer" " buffer %i%s\n", sb_size, i, in_interrupt() ? " while in int" : ""); cx231xx_uninit_bulk(dev); return -ENOMEM; } memset(dev->video_mode.bulk_ctl.transfer_buffer[i], 0, sb_size); pipe = usb_rcvbulkpipe(dev->udev, dev->video_mode.end_point_addr); usb_fill_bulk_urb(urb, dev->udev, pipe, dev->video_mode.bulk_ctl.transfer_buffer[i], sb_size, cx231xx_bulk_irq_callback, dma_q); } init_waitqueue_head(&dma_q->wq); /* submit urbs and enables IRQ */ for (i = 0; i < dev->video_mode.bulk_ctl.num_bufs; i++) { rc = usb_submit_urb(dev->video_mode.bulk_ctl.urb[i], GFP_ATOMIC); if (rc) { cx231xx_err("submit of urb %i failed (error=%i)\n", i, rc); cx231xx_uninit_bulk(dev); return rc; } } if (dev->mode_tv == 0) cx231xx_capture_start(dev, 1, Raw_Video); else cx231xx_capture_start(dev, 1, TS1_serial_mode); return 0; } EXPORT_SYMBOL_GPL(cx231xx_init_bulk); void cx231xx_stop_TS1(struct cx231xx *dev) { int status = 0; u8 val[4] = { 0, 0, 0, 0 }; val[0] = 0x00; val[1] = 0x03; val[2] = 0x00; val[3] = 0x00; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS_MODE_REG, val, 4); val[0] = 0x00; val[1] = 0x70; val[2] = 0x04; val[3] = 0x00; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS1_CFG_REG, val, 4); } /* EXPORT_SYMBOL_GPL(cx231xx_stop_TS1); */ void cx231xx_start_TS1(struct cx231xx *dev) { int status = 0; u8 val[4] = { 0, 0, 0, 0 }; val[0] = 0x03; val[1] = 0x03; val[2] = 0x00; val[3] = 0x00; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS_MODE_REG, val, 4); val[0] = 0x04; val[1] = 0xA3; val[2] = 0x3B; val[3] = 0x00; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS1_CFG_REG, val, 4); } /* EXPORT_SYMBOL_GPL(cx231xx_start_TS1); */ /***************************************************************** * Device Init/UnInit functions * ******************************************************************/ int cx231xx_dev_init(struct cx231xx *dev) { int errCode = 0; /* Initialize I2C bus */ /* External Master 1 Bus */ dev->i2c_bus[0].nr = 0; dev->i2c_bus[0].dev = dev; dev->i2c_bus[0].i2c_period = I2C_SPEED_100K; /* 100 KHz */ dev->i2c_bus[0].i2c_nostop = 0; dev->i2c_bus[0].i2c_reserve = 0; /* External Master 2 Bus */ dev->i2c_bus[1].nr = 1; dev->i2c_bus[1].dev = dev; dev->i2c_bus[1].i2c_period = I2C_SPEED_100K; /* 100 KHz */ dev->i2c_bus[1].i2c_nostop = 0; dev->i2c_bus[1].i2c_reserve = 0; /* Internal Master 3 Bus */ dev->i2c_bus[2].nr = 2; dev->i2c_bus[2].dev = dev; dev->i2c_bus[2].i2c_period = I2C_SPEED_100K; /* 100kHz */ dev->i2c_bus[2].i2c_nostop = 0; dev->i2c_bus[2].i2c_reserve = 0; /* register I2C buses */ cx231xx_i2c_register(&dev->i2c_bus[0]); cx231xx_i2c_register(&dev->i2c_bus[1]); cx231xx_i2c_register(&dev->i2c_bus[2]); /* init hardware */ /* Note : with out calling set power mode function, afe can not be set up correctly */ if (dev->board.external_av) { errCode = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ENXTERNAL_AV); if (errCode < 0) { cx231xx_errdev ("%s: Failed to set Power - errCode [%d]!\n", __func__, errCode); return errCode; } } else { errCode = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV); if (errCode < 0) { cx231xx_errdev ("%s: Failed to set Power - errCode [%d]!\n", __func__, errCode); return errCode; } } /* reset the Tuner, if it is a Xceive tuner */ if ((dev->board.tuner_type == TUNER_XC5000) || (dev->board.tuner_type == TUNER_XC2028)) cx231xx_gpio_set(dev, dev->board.tuner_gpio); /* initialize Colibri block */ errCode = cx231xx_afe_init_super_block(dev, 0x23c); if (errCode < 0) { cx231xx_errdev ("%s: cx231xx_afe init super block - errCode [%d]!\n", __func__, errCode); return errCode; } errCode = cx231xx_afe_init_channels(dev); if (errCode < 0) { cx231xx_errdev ("%s: cx231xx_afe init channels - errCode [%d]!\n", __func__, errCode); return errCode; } /* Set DIF in By pass mode */ errCode = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); if (errCode < 0) { cx231xx_errdev ("%s: cx231xx_dif set to By pass mode - errCode [%d]!\n", __func__, errCode); return errCode; } /* I2S block related functions */ errCode = cx231xx_i2s_blk_initialize(dev); if (errCode < 0) { cx231xx_errdev ("%s: cx231xx_i2s block initialize - errCode [%d]!\n", __func__, errCode); return errCode; } /* init control pins */ errCode = cx231xx_init_ctrl_pin_status(dev); if (errCode < 0) { cx231xx_errdev("%s: cx231xx_init ctrl pins - errCode [%d]!\n", __func__, errCode); return errCode; } /* set AGC mode to Analog */ switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1); break; case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); break; default: break; } if (errCode < 0) { cx231xx_errdev ("%s: cx231xx_AGC mode to Analog - errCode [%d]!\n", __func__, errCode); return errCode; } /* set all alternate settings to zero initially */ cx231xx_set_alt_setting(dev, INDEX_VIDEO, 0); cx231xx_set_alt_setting(dev, INDEX_VANC, 0); cx231xx_set_alt_setting(dev, INDEX_HANC, 0); if (dev->board.has_dvb) cx231xx_set_alt_setting(dev, INDEX_TS1, 0); /* set the I2C master port to 3 on channel 1 */ errCode = cx231xx_enable_i2c_port_3(dev, true); return errCode; } EXPORT_SYMBOL_GPL(cx231xx_dev_init); void cx231xx_dev_uninit(struct cx231xx *dev) { /* Un Initialize I2C bus */ cx231xx_i2c_unregister(&dev->i2c_bus[2]); cx231xx_i2c_unregister(&dev->i2c_bus[1]); cx231xx_i2c_unregister(&dev->i2c_bus[0]); } EXPORT_SYMBOL_GPL(cx231xx_dev_uninit); /***************************************************************** * G P I O related functions * ******************************************************************/ int cx231xx_send_gpio_cmd(struct cx231xx *dev, u32 gpio_bit, u8 *gpio_val, u8 len, u8 request, u8 direction) { int status = 0; struct VENDOR_REQUEST_IN ven_req; /* Set wValue */ ven_req.wValue = (u16) (gpio_bit >> 16 & 0xffff); /* set request */ if (!request) { if (direction) ven_req.bRequest = VRT_GET_GPIO; /* 0x8 gpio */ else ven_req.bRequest = VRT_SET_GPIO; /* 0x9 gpio */ } else { if (direction) ven_req.bRequest = VRT_GET_GPIE; /* 0xa gpie */ else ven_req.bRequest = VRT_SET_GPIE; /* 0xb gpie */ } /* set index value */ ven_req.wIndex = (u16) (gpio_bit & 0xffff); /* set wLength value */ ven_req.wLength = len; /* set bData value */ ven_req.bData = 0; /* set the buffer for read / write */ ven_req.pBuff = gpio_val; /* set the direction */ if (direction) { ven_req.direction = USB_DIR_IN; memset(ven_req.pBuff, 0x00, ven_req.wLength); } else ven_req.direction = USB_DIR_OUT; /* call common vendor command request */ status = cx231xx_send_vendor_cmd(dev, &ven_req); if (status < 0) { cx231xx_info ("UsbInterface::sendCommand, failed with status -%d\n", status); } return status; } EXPORT_SYMBOL_GPL(cx231xx_send_gpio_cmd); /***************************************************************** * C O N T R O L - Register R E A D / W R I T E functions * *****************************************************************/ int cx231xx_mode_register(struct cx231xx *dev, u16 address, u32 mode) { u8 value[4] = { 0x0, 0x0, 0x0, 0x0 }; u32 tmp = 0; int status = 0; status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, address, value, 4); if (status < 0) return status; tmp = *((u32 *) value); tmp |= mode; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, address, value, 4); return status; } /***************************************************************** * I 2 C Internal C O N T R O L functions * *****************************************************************/ int cx231xx_read_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 *data, u8 data_len, int master) { int status = 0; struct cx231xx_i2c_xfer_data req_data; u8 value[64] = "0"; if (saddr_len == 0) saddr = 0; else if (saddr_len == 1) saddr &= 0xff; /* prepare xfer_data struct */ req_data.dev_addr = dev_addr >> 1; req_data.direction = I2C_M_RD; req_data.saddr_len = saddr_len; req_data.saddr_dat = saddr; req_data.buf_size = data_len; req_data.p_buffer = (u8 *) value; /* usb send command */ if (master == 0) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0], &req_data); else if (master == 1) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[1], &req_data); else if (master == 2) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[2], &req_data); if (status >= 0) { /* Copy the data read back to main buffer */ if (data_len == 1) *data = value[0]; else if (data_len == 4) *data = value[0] | value[1] << 8 | value[2] << 16 | value[3] << 24; else if (data_len > 4) *data = value[saddr]; } return status; } int cx231xx_write_i2c_master(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 data, u8 data_len, int master) { int status = 0; u8 value[4] = { 0, 0, 0, 0 }; struct cx231xx_i2c_xfer_data req_data; value[0] = (u8) data; value[1] = (u8) (data >> 8); value[2] = (u8) (data >> 16); value[3] = (u8) (data >> 24); if (saddr_len == 0) saddr = 0; else if (saddr_len == 1) saddr &= 0xff; /* prepare xfer_data struct */ req_data.dev_addr = dev_addr >> 1; req_data.direction = 0; req_data.saddr_len = saddr_len; req_data.saddr_dat = saddr; req_data.buf_size = data_len; req_data.p_buffer = value; /* usb send command */ if (master == 0) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0], &req_data); else if (master == 1) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[1], &req_data); else if (master == 2) status = dev->cx231xx_send_usb_command(&dev->i2c_bus[2], &req_data); return status; } int cx231xx_read_i2c_data(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 *data, u8 data_len) { int status = 0; struct cx231xx_i2c_xfer_data req_data; u8 value[4] = { 0, 0, 0, 0 }; if (saddr_len == 0) saddr = 0; else if (saddr_len == 1) saddr &= 0xff; /* prepare xfer_data struct */ req_data.dev_addr = dev_addr >> 1; req_data.direction = I2C_M_RD; req_data.saddr_len = saddr_len; req_data.saddr_dat = saddr; req_data.buf_size = data_len; req_data.p_buffer = (u8 *) value; /* usb send command */ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0], &req_data); if (status >= 0) { /* Copy the data read back to main buffer */ if (data_len == 1) *data = value[0]; else *data = value[0] | value[1] << 8 | value[2] << 16 | value[3] << 24; } return status; } int cx231xx_write_i2c_data(struct cx231xx *dev, u8 dev_addr, u16 saddr, u8 saddr_len, u32 data, u8 data_len) { int status = 0; u8 value[4] = { 0, 0, 0, 0 }; struct cx231xx_i2c_xfer_data req_data; value[0] = (u8) data; value[1] = (u8) (data >> 8); value[2] = (u8) (data >> 16); value[3] = (u8) (data >> 24); if (saddr_len == 0) saddr = 0; else if (saddr_len == 1) saddr &= 0xff; /* prepare xfer_data struct */ req_data.dev_addr = dev_addr >> 1; req_data.direction = 0; req_data.saddr_len = saddr_len; req_data.saddr_dat = saddr; req_data.buf_size = data_len; req_data.p_buffer = value; /* usb send command */ status = dev->cx231xx_send_usb_command(&dev->i2c_bus[0], &req_data); return status; } int cx231xx_reg_mask_write(struct cx231xx *dev, u8 dev_addr, u8 size, u16 register_address, u8 bit_start, u8 bit_end, u32 value) { int status = 0; u32 tmp; u32 mask = 0; int i; if (bit_start > (size - 1) || bit_end > (size - 1)) return -1; if (size == 8) { status = cx231xx_read_i2c_data(dev, dev_addr, register_address, 2, &tmp, 1); } else { status = cx231xx_read_i2c_data(dev, dev_addr, register_address, 2, &tmp, 4); } if (status < 0) return status; mask = 1 << bit_end; for (i = bit_end; i > bit_start && i > 0; i--) mask = mask + (1 << (i - 1)); value <<= bit_start; if (size == 8) { tmp &= ~mask; tmp |= value; tmp &= 0xff; status = cx231xx_write_i2c_data(dev, dev_addr, register_address, 2, tmp, 1); } else { tmp &= ~mask; tmp |= value; status = cx231xx_write_i2c_data(dev, dev_addr, register_address, 2, tmp, 4); } return status; } int cx231xx_read_modify_write_i2c_dword(struct cx231xx *dev, u8 dev_addr, u16 saddr, u32 mask, u32 value) { u32 temp; int status = 0; status = cx231xx_read_i2c_data(dev, dev_addr, saddr, 2, &temp, 4); if (status < 0) return status; temp &= ~mask; temp |= value; status = cx231xx_write_i2c_data(dev, dev_addr, saddr, 2, temp, 4); return status; } u32 cx231xx_set_field(u32 field_mask, u32 data) { u32 temp; for (temp = field_mask; (temp & 1) == 0; temp >>= 1) data <<= 1; return data; }
gpl-2.0
hominlinx/linux-bananapi
sound/soc/samsung/s3c-i2s-v2.c
5411
18057
/* sound/soc/samsung/s3c-i2c-v2.c * * ALSA Soc Audio Layer - I2S core for newer Samsung SoCs. * * Copyright (c) 2006 Wolfson Microelectronics PLC. * Graeme Gregory graeme.gregory@wolfsonmicro.com * linux@wolfsonmicro.com * * Copyright (c) 2008, 2007, 2004-2005 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <sound/soc.h> #include <sound/pcm_params.h> #include <mach/dma.h> #include "regs-i2s-v2.h" #include "s3c-i2s-v2.h" #include "dma.h" #undef S3C_IIS_V2_SUPPORTED #if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413) \ || defined(CONFIG_CPU_S5PV210) #define S3C_IIS_V2_SUPPORTED #endif #ifdef CONFIG_PLAT_S3C64XX #define S3C_IIS_V2_SUPPORTED #endif #ifndef S3C_IIS_V2_SUPPORTED #error Unsupported CPU model #endif #define S3C2412_I2S_DEBUG_CON 0 static inline struct s3c_i2sv2_info *to_info(struct snd_soc_dai *cpu_dai) { return snd_soc_dai_get_drvdata(cpu_dai); } #define bit_set(v, b) (((v) & (b)) ? 1 : 0) #if S3C2412_I2S_DEBUG_CON static void dbg_showcon(const char *fn, u32 con) { printk(KERN_DEBUG "%s: LRI=%d, TXFEMPT=%d, RXFEMPT=%d, TXFFULL=%d, RXFFULL=%d\n", fn, bit_set(con, S3C2412_IISCON_LRINDEX), bit_set(con, S3C2412_IISCON_TXFIFO_EMPTY), bit_set(con, S3C2412_IISCON_RXFIFO_EMPTY), bit_set(con, S3C2412_IISCON_TXFIFO_FULL), bit_set(con, S3C2412_IISCON_RXFIFO_FULL)); printk(KERN_DEBUG "%s: PAUSE: TXDMA=%d, RXDMA=%d, TXCH=%d, RXCH=%d\n", fn, bit_set(con, S3C2412_IISCON_TXDMA_PAUSE), bit_set(con, S3C2412_IISCON_RXDMA_PAUSE), bit_set(con, S3C2412_IISCON_TXCH_PAUSE), bit_set(con, S3C2412_IISCON_RXCH_PAUSE)); printk(KERN_DEBUG "%s: ACTIVE: TXDMA=%d, RXDMA=%d, IIS=%d\n", fn, bit_set(con, S3C2412_IISCON_TXDMA_ACTIVE), bit_set(con, S3C2412_IISCON_RXDMA_ACTIVE), bit_set(con, S3C2412_IISCON_IIS_ACTIVE)); } #else static inline void dbg_showcon(const char *fn, u32 con) { } #endif /* Turn on or off the transmission path. */ static void s3c2412_snd_txctrl(struct s3c_i2sv2_info *i2s, int on) { void __iomem *regs = i2s->regs; u32 fic, con, mod; pr_debug("%s(%d)\n", __func__, on); fic = readl(regs + S3C2412_IISFIC); con = readl(regs + S3C2412_IISCON); mod = readl(regs + S3C2412_IISMOD); pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); if (on) { con |= S3C2412_IISCON_TXDMA_ACTIVE | S3C2412_IISCON_IIS_ACTIVE; con &= ~S3C2412_IISCON_TXDMA_PAUSE; con &= ~S3C2412_IISCON_TXCH_PAUSE; switch (mod & S3C2412_IISMOD_MODE_MASK) { case S3C2412_IISMOD_MODE_TXONLY: case S3C2412_IISMOD_MODE_TXRX: /* do nothing, we are in the right mode */ break; case S3C2412_IISMOD_MODE_RXONLY: mod &= ~S3C2412_IISMOD_MODE_MASK; mod |= S3C2412_IISMOD_MODE_TXRX; break; default: dev_err(i2s->dev, "TXEN: Invalid MODE %x in IISMOD\n", mod & S3C2412_IISMOD_MODE_MASK); break; } writel(con, regs + S3C2412_IISCON); writel(mod, regs + S3C2412_IISMOD); } else { /* Note, we do not have any indication that the FIFO problems * tha the S3C2410/2440 had apply here, so we should be able * to disable the DMA and TX without resetting the FIFOS. */ con |= S3C2412_IISCON_TXDMA_PAUSE; con |= S3C2412_IISCON_TXCH_PAUSE; con &= ~S3C2412_IISCON_TXDMA_ACTIVE; switch (mod & S3C2412_IISMOD_MODE_MASK) { case S3C2412_IISMOD_MODE_TXRX: mod &= ~S3C2412_IISMOD_MODE_MASK; mod |= S3C2412_IISMOD_MODE_RXONLY; break; case S3C2412_IISMOD_MODE_TXONLY: mod &= ~S3C2412_IISMOD_MODE_MASK; con &= ~S3C2412_IISCON_IIS_ACTIVE; break; default: dev_err(i2s->dev, "TXDIS: Invalid MODE %x in IISMOD\n", mod & S3C2412_IISMOD_MODE_MASK); break; } writel(mod, regs + S3C2412_IISMOD); writel(con, regs + S3C2412_IISCON); } fic = readl(regs + S3C2412_IISFIC); dbg_showcon(__func__, con); pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); } static void s3c2412_snd_rxctrl(struct s3c_i2sv2_info *i2s, int on) { void __iomem *regs = i2s->regs; u32 fic, con, mod; pr_debug("%s(%d)\n", __func__, on); fic = readl(regs + S3C2412_IISFIC); con = readl(regs + S3C2412_IISCON); mod = readl(regs + S3C2412_IISMOD); pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); if (on) { con |= S3C2412_IISCON_RXDMA_ACTIVE | S3C2412_IISCON_IIS_ACTIVE; con &= ~S3C2412_IISCON_RXDMA_PAUSE; con &= ~S3C2412_IISCON_RXCH_PAUSE; switch (mod & S3C2412_IISMOD_MODE_MASK) { case S3C2412_IISMOD_MODE_TXRX: case S3C2412_IISMOD_MODE_RXONLY: /* do nothing, we are in the right mode */ break; case S3C2412_IISMOD_MODE_TXONLY: mod &= ~S3C2412_IISMOD_MODE_MASK; mod |= S3C2412_IISMOD_MODE_TXRX; break; default: dev_err(i2s->dev, "RXEN: Invalid MODE %x in IISMOD\n", mod & S3C2412_IISMOD_MODE_MASK); } writel(mod, regs + S3C2412_IISMOD); writel(con, regs + S3C2412_IISCON); } else { /* See txctrl notes on FIFOs. */ con &= ~S3C2412_IISCON_RXDMA_ACTIVE; con |= S3C2412_IISCON_RXDMA_PAUSE; con |= S3C2412_IISCON_RXCH_PAUSE; switch (mod & S3C2412_IISMOD_MODE_MASK) { case S3C2412_IISMOD_MODE_RXONLY: con &= ~S3C2412_IISCON_IIS_ACTIVE; mod &= ~S3C2412_IISMOD_MODE_MASK; break; case S3C2412_IISMOD_MODE_TXRX: mod &= ~S3C2412_IISMOD_MODE_MASK; mod |= S3C2412_IISMOD_MODE_TXONLY; break; default: dev_err(i2s->dev, "RXDIS: Invalid MODE %x in IISMOD\n", mod & S3C2412_IISMOD_MODE_MASK); } writel(con, regs + S3C2412_IISCON); writel(mod, regs + S3C2412_IISMOD); } fic = readl(regs + S3C2412_IISFIC); pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); } #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) /* * Wait for the LR signal to allow synchronisation to the L/R clock * from the codec. May only be needed for slave mode. */ static int s3c2412_snd_lrsync(struct s3c_i2sv2_info *i2s) { u32 iiscon; unsigned long loops = msecs_to_loops(5); pr_debug("Entered %s\n", __func__); while (--loops) { iiscon = readl(i2s->regs + S3C2412_IISCON); if (iiscon & S3C2412_IISCON_LRINDEX) break; cpu_relax(); } if (!loops) { printk(KERN_ERR "%s: timeout\n", __func__); return -ETIMEDOUT; } return 0; } /* * Set S3C2412 I2S DAI format */ static int s3c2412_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct s3c_i2sv2_info *i2s = to_info(cpu_dai); u32 iismod; pr_debug("Entered %s\n", __func__); iismod = readl(i2s->regs + S3C2412_IISMOD); pr_debug("hw_params r: IISMOD: %x \n", iismod); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: i2s->master = 0; iismod |= S3C2412_IISMOD_SLAVE; break; case SND_SOC_DAIFMT_CBS_CFS: i2s->master = 1; iismod &= ~S3C2412_IISMOD_SLAVE; break; default: pr_err("unknwon master/slave format\n"); return -EINVAL; } iismod &= ~S3C2412_IISMOD_SDF_MASK; switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_RIGHT_J: iismod |= S3C2412_IISMOD_LR_RLOW; iismod |= S3C2412_IISMOD_SDF_MSB; break; case SND_SOC_DAIFMT_LEFT_J: iismod |= S3C2412_IISMOD_LR_RLOW; iismod |= S3C2412_IISMOD_SDF_LSB; break; case SND_SOC_DAIFMT_I2S: iismod &= ~S3C2412_IISMOD_LR_RLOW; iismod |= S3C2412_IISMOD_SDF_IIS; break; default: pr_err("Unknown data format\n"); return -EINVAL; } writel(iismod, i2s->regs + S3C2412_IISMOD); pr_debug("hw_params w: IISMOD: %x \n", iismod); return 0; } static int s3c_i2sv2_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct s3c_i2sv2_info *i2s = to_info(dai); struct s3c_dma_params *dma_data; u32 iismod; pr_debug("Entered %s\n", __func__); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) dma_data = i2s->dma_playback; else dma_data = i2s->dma_capture; snd_soc_dai_set_dma_data(dai, substream, dma_data); /* Working copies of register */ iismod = readl(i2s->regs + S3C2412_IISMOD); pr_debug("%s: r: IISMOD: %x\n", __func__, iismod); iismod &= ~S3C64XX_IISMOD_BLC_MASK; /* Sample size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: iismod |= S3C64XX_IISMOD_BLC_8BIT; break; case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S24_LE: iismod |= S3C64XX_IISMOD_BLC_24BIT; break; } writel(iismod, i2s->regs + S3C2412_IISMOD); pr_debug("%s: w: IISMOD: %x\n", __func__, iismod); return 0; } static int s3c_i2sv2_set_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct s3c_i2sv2_info *i2s = to_info(cpu_dai); u32 iismod = readl(i2s->regs + S3C2412_IISMOD); pr_debug("Entered %s\n", __func__); pr_debug("%s r: IISMOD: %x\n", __func__, iismod); switch (clk_id) { case S3C_I2SV2_CLKSRC_PCLK: iismod &= ~S3C2412_IISMOD_IMS_SYSMUX; break; case S3C_I2SV2_CLKSRC_AUDIOBUS: iismod |= S3C2412_IISMOD_IMS_SYSMUX; break; case S3C_I2SV2_CLKSRC_CDCLK: /* Error if controller doesn't have the CDCLKCON bit */ if (!(i2s->feature & S3C_FEATURE_CDCLKCON)) return -EINVAL; switch (dir) { case SND_SOC_CLOCK_IN: iismod |= S3C64XX_IISMOD_CDCLKCON; break; case SND_SOC_CLOCK_OUT: iismod &= ~S3C64XX_IISMOD_CDCLKCON; break; default: return -EINVAL; } break; default: return -EINVAL; } writel(iismod, i2s->regs + S3C2412_IISMOD); pr_debug("%s w: IISMOD: %x\n", __func__, iismod); return 0; } static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct s3c_i2sv2_info *i2s = to_info(rtd->cpu_dai); int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE); unsigned long irqs; int ret = 0; struct s3c_dma_params *dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); pr_debug("Entered %s\n", __func__); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* On start, ensure that the FIFOs are cleared and reset. */ writel(capture ? S3C2412_IISFIC_RXFLUSH : S3C2412_IISFIC_TXFLUSH, i2s->regs + S3C2412_IISFIC); /* clear again, just in case */ writel(0x0, i2s->regs + S3C2412_IISFIC); case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (!i2s->master) { ret = s3c2412_snd_lrsync(i2s); if (ret) goto exit_err; } local_irq_save(irqs); if (capture) s3c2412_snd_rxctrl(i2s, 1); else s3c2412_snd_txctrl(i2s, 1); local_irq_restore(irqs); /* * Load the next buffer to DMA to meet the reqirement * of the auto reload mechanism of S3C24XX. * This call won't bother S3C64XX. */ s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: local_irq_save(irqs); if (capture) s3c2412_snd_rxctrl(i2s, 0); else s3c2412_snd_txctrl(i2s, 0); local_irq_restore(irqs); break; default: ret = -EINVAL; break; } exit_err: return ret; } /* * Set S3C2412 Clock dividers */ static int s3c2412_i2s_set_clkdiv(struct snd_soc_dai *cpu_dai, int div_id, int div) { struct s3c_i2sv2_info *i2s = to_info(cpu_dai); u32 reg; pr_debug("%s(%p, %d, %d)\n", __func__, cpu_dai, div_id, div); switch (div_id) { case S3C_I2SV2_DIV_BCLK: switch (div) { case 16: div = S3C2412_IISMOD_BCLK_16FS; break; case 32: div = S3C2412_IISMOD_BCLK_32FS; break; case 24: div = S3C2412_IISMOD_BCLK_24FS; break; case 48: div = S3C2412_IISMOD_BCLK_48FS; break; default: return -EINVAL; } reg = readl(i2s->regs + S3C2412_IISMOD); reg &= ~S3C2412_IISMOD_BCLK_MASK; writel(reg | div, i2s->regs + S3C2412_IISMOD); pr_debug("%s: MOD=%08x\n", __func__, readl(i2s->regs + S3C2412_IISMOD)); break; case S3C_I2SV2_DIV_RCLK: switch (div) { case 256: div = S3C2412_IISMOD_RCLK_256FS; break; case 384: div = S3C2412_IISMOD_RCLK_384FS; break; case 512: div = S3C2412_IISMOD_RCLK_512FS; break; case 768: div = S3C2412_IISMOD_RCLK_768FS; break; default: return -EINVAL; } reg = readl(i2s->regs + S3C2412_IISMOD); reg &= ~S3C2412_IISMOD_RCLK_MASK; writel(reg | div, i2s->regs + S3C2412_IISMOD); pr_debug("%s: MOD=%08x\n", __func__, readl(i2s->regs + S3C2412_IISMOD)); break; case S3C_I2SV2_DIV_PRESCALER: if (div >= 0) { writel((div << 8) | S3C2412_IISPSR_PSREN, i2s->regs + S3C2412_IISPSR); } else { writel(0x0, i2s->regs + S3C2412_IISPSR); } pr_debug("%s: PSR=%08x\n", __func__, readl(i2s->regs + S3C2412_IISPSR)); break; default: return -EINVAL; } return 0; } static snd_pcm_sframes_t s3c2412_i2s_delay(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct s3c_i2sv2_info *i2s = to_info(dai); u32 reg = readl(i2s->regs + S3C2412_IISFIC); snd_pcm_sframes_t delay; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) delay = S3C2412_IISFIC_TXCOUNT(reg); else delay = S3C2412_IISFIC_RXCOUNT(reg); return delay; } struct clk *s3c_i2sv2_get_clock(struct snd_soc_dai *cpu_dai) { struct s3c_i2sv2_info *i2s = to_info(cpu_dai); u32 iismod = readl(i2s->regs + S3C2412_IISMOD); if (iismod & S3C2412_IISMOD_IMS_SYSMUX) return i2s->iis_cclk; else return i2s->iis_pclk; } EXPORT_SYMBOL_GPL(s3c_i2sv2_get_clock); /* default table of all avaialable root fs divisors */ static unsigned int iis_fs_tab[] = { 256, 512, 384, 768 }; int s3c_i2sv2_iis_calc_rate(struct s3c_i2sv2_rate_calc *info, unsigned int *fstab, unsigned int rate, struct clk *clk) { unsigned long clkrate = clk_get_rate(clk); unsigned int div; unsigned int fsclk; unsigned int actual; unsigned int fs; unsigned int fsdiv; signed int deviation = 0; unsigned int best_fs = 0; unsigned int best_div = 0; unsigned int best_rate = 0; unsigned int best_deviation = INT_MAX; pr_debug("Input clock rate %ldHz\n", clkrate); if (fstab == NULL) fstab = iis_fs_tab; for (fs = 0; fs < ARRAY_SIZE(iis_fs_tab); fs++) { fsdiv = iis_fs_tab[fs]; fsclk = clkrate / fsdiv; div = fsclk / rate; if ((fsclk % rate) > (rate / 2)) div++; if (div <= 1) continue; actual = clkrate / (fsdiv * div); deviation = actual - rate; printk(KERN_DEBUG "%ufs: div %u => result %u, deviation %d\n", fsdiv, div, actual, deviation); deviation = abs(deviation); if (deviation < best_deviation) { best_fs = fsdiv; best_div = div; best_rate = actual; best_deviation = deviation; } if (deviation == 0) break; } printk(KERN_DEBUG "best: fs=%u, div=%u, rate=%u\n", best_fs, best_div, best_rate); info->fs_div = best_fs; info->clk_div = best_div; return 0; } EXPORT_SYMBOL_GPL(s3c_i2sv2_iis_calc_rate); int s3c_i2sv2_probe(struct snd_soc_dai *dai, struct s3c_i2sv2_info *i2s, unsigned long base) { struct device *dev = dai->dev; unsigned int iismod; i2s->dev = dev; /* record our i2s structure for later use in the callbacks */ snd_soc_dai_set_drvdata(dai, i2s); i2s->regs = ioremap(base, 0x100); if (i2s->regs == NULL) { dev_err(dev, "cannot ioremap registers\n"); return -ENXIO; } i2s->iis_pclk = clk_get(dev, "iis"); if (IS_ERR(i2s->iis_pclk)) { dev_err(dev, "failed to get iis_clock\n"); iounmap(i2s->regs); return -ENOENT; } clk_enable(i2s->iis_pclk); /* Mark ourselves as in TXRX mode so we can run through our cleanup * process without warnings. */ iismod = readl(i2s->regs + S3C2412_IISMOD); iismod |= S3C2412_IISMOD_MODE_TXRX; writel(iismod, i2s->regs + S3C2412_IISMOD); s3c2412_snd_txctrl(i2s, 0); s3c2412_snd_rxctrl(i2s, 0); return 0; } EXPORT_SYMBOL_GPL(s3c_i2sv2_probe); #ifdef CONFIG_PM static int s3c2412_i2s_suspend(struct snd_soc_dai *dai) { struct s3c_i2sv2_info *i2s = to_info(dai); u32 iismod; if (dai->active) { i2s->suspend_iismod = readl(i2s->regs + S3C2412_IISMOD); i2s->suspend_iiscon = readl(i2s->regs + S3C2412_IISCON); i2s->suspend_iispsr = readl(i2s->regs + S3C2412_IISPSR); /* some basic suspend checks */ iismod = readl(i2s->regs + S3C2412_IISMOD); if (iismod & S3C2412_IISCON_RXDMA_ACTIVE) pr_warning("%s: RXDMA active?\n", __func__); if (iismod & S3C2412_IISCON_TXDMA_ACTIVE) pr_warning("%s: TXDMA active?\n", __func__); if (iismod & S3C2412_IISCON_IIS_ACTIVE) pr_warning("%s: IIS active\n", __func__); } return 0; } static int s3c2412_i2s_resume(struct snd_soc_dai *dai) { struct s3c_i2sv2_info *i2s = to_info(dai); pr_info("dai_active %d, IISMOD %08x, IISCON %08x\n", dai->active, i2s->suspend_iismod, i2s->suspend_iiscon); if (dai->active) { writel(i2s->suspend_iiscon, i2s->regs + S3C2412_IISCON); writel(i2s->suspend_iismod, i2s->regs + S3C2412_IISMOD); writel(i2s->suspend_iispsr, i2s->regs + S3C2412_IISPSR); writel(S3C2412_IISFIC_RXFLUSH | S3C2412_IISFIC_TXFLUSH, i2s->regs + S3C2412_IISFIC); ndelay(250); writel(0x0, i2s->regs + S3C2412_IISFIC); } return 0; } #else #define s3c2412_i2s_suspend NULL #define s3c2412_i2s_resume NULL #endif int s3c_i2sv2_register_dai(struct device *dev, int id, struct snd_soc_dai_driver *drv) { struct snd_soc_dai_ops *ops = drv->ops; ops->trigger = s3c2412_i2s_trigger; if (!ops->hw_params) ops->hw_params = s3c_i2sv2_hw_params; ops->set_fmt = s3c2412_i2s_set_fmt; ops->set_clkdiv = s3c2412_i2s_set_clkdiv; ops->set_sysclk = s3c_i2sv2_set_sysclk; /* Allow overriding by (for example) IISv4 */ if (!ops->delay) ops->delay = s3c2412_i2s_delay; drv->suspend = s3c2412_i2s_suspend; drv->resume = s3c2412_i2s_resume; return snd_soc_register_dai(dev, drv); } EXPORT_SYMBOL_GPL(s3c_i2sv2_register_dai); MODULE_LICENSE("GPL");
gpl-2.0
lostemp/lsk-3.4-android-12.09
sound/soc/samsung/s3c-i2s-v2.c
5411
18057
/* sound/soc/samsung/s3c-i2c-v2.c * * ALSA Soc Audio Layer - I2S core for newer Samsung SoCs. * * Copyright (c) 2006 Wolfson Microelectronics PLC. * Graeme Gregory graeme.gregory@wolfsonmicro.com * linux@wolfsonmicro.com * * Copyright (c) 2008, 2007, 2004-2005 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <sound/soc.h> #include <sound/pcm_params.h> #include <mach/dma.h> #include "regs-i2s-v2.h" #include "s3c-i2s-v2.h" #include "dma.h" #undef S3C_IIS_V2_SUPPORTED #if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413) \ || defined(CONFIG_CPU_S5PV210) #define S3C_IIS_V2_SUPPORTED #endif #ifdef CONFIG_PLAT_S3C64XX #define S3C_IIS_V2_SUPPORTED #endif #ifndef S3C_IIS_V2_SUPPORTED #error Unsupported CPU model #endif #define S3C2412_I2S_DEBUG_CON 0 static inline struct s3c_i2sv2_info *to_info(struct snd_soc_dai *cpu_dai) { return snd_soc_dai_get_drvdata(cpu_dai); } #define bit_set(v, b) (((v) & (b)) ? 1 : 0) #if S3C2412_I2S_DEBUG_CON static void dbg_showcon(const char *fn, u32 con) { printk(KERN_DEBUG "%s: LRI=%d, TXFEMPT=%d, RXFEMPT=%d, TXFFULL=%d, RXFFULL=%d\n", fn, bit_set(con, S3C2412_IISCON_LRINDEX), bit_set(con, S3C2412_IISCON_TXFIFO_EMPTY), bit_set(con, S3C2412_IISCON_RXFIFO_EMPTY), bit_set(con, S3C2412_IISCON_TXFIFO_FULL), bit_set(con, S3C2412_IISCON_RXFIFO_FULL)); printk(KERN_DEBUG "%s: PAUSE: TXDMA=%d, RXDMA=%d, TXCH=%d, RXCH=%d\n", fn, bit_set(con, S3C2412_IISCON_TXDMA_PAUSE), bit_set(con, S3C2412_IISCON_RXDMA_PAUSE), bit_set(con, S3C2412_IISCON_TXCH_PAUSE), bit_set(con, S3C2412_IISCON_RXCH_PAUSE)); printk(KERN_DEBUG "%s: ACTIVE: TXDMA=%d, RXDMA=%d, IIS=%d\n", fn, bit_set(con, S3C2412_IISCON_TXDMA_ACTIVE), bit_set(con, S3C2412_IISCON_RXDMA_ACTIVE), bit_set(con, S3C2412_IISCON_IIS_ACTIVE)); } #else static inline void dbg_showcon(const char *fn, u32 con) { } #endif /* Turn on or off the transmission path. */ static void s3c2412_snd_txctrl(struct s3c_i2sv2_info *i2s, int on) { void __iomem *regs = i2s->regs; u32 fic, con, mod; pr_debug("%s(%d)\n", __func__, on); fic = readl(regs + S3C2412_IISFIC); con = readl(regs + S3C2412_IISCON); mod = readl(regs + S3C2412_IISMOD); pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); if (on) { con |= S3C2412_IISCON_TXDMA_ACTIVE | S3C2412_IISCON_IIS_ACTIVE; con &= ~S3C2412_IISCON_TXDMA_PAUSE; con &= ~S3C2412_IISCON_TXCH_PAUSE; switch (mod & S3C2412_IISMOD_MODE_MASK) { case S3C2412_IISMOD_MODE_TXONLY: case S3C2412_IISMOD_MODE_TXRX: /* do nothing, we are in the right mode */ break; case S3C2412_IISMOD_MODE_RXONLY: mod &= ~S3C2412_IISMOD_MODE_MASK; mod |= S3C2412_IISMOD_MODE_TXRX; break; default: dev_err(i2s->dev, "TXEN: Invalid MODE %x in IISMOD\n", mod & S3C2412_IISMOD_MODE_MASK); break; } writel(con, regs + S3C2412_IISCON); writel(mod, regs + S3C2412_IISMOD); } else { /* Note, we do not have any indication that the FIFO problems * tha the S3C2410/2440 had apply here, so we should be able * to disable the DMA and TX without resetting the FIFOS. */ con |= S3C2412_IISCON_TXDMA_PAUSE; con |= S3C2412_IISCON_TXCH_PAUSE; con &= ~S3C2412_IISCON_TXDMA_ACTIVE; switch (mod & S3C2412_IISMOD_MODE_MASK) { case S3C2412_IISMOD_MODE_TXRX: mod &= ~S3C2412_IISMOD_MODE_MASK; mod |= S3C2412_IISMOD_MODE_RXONLY; break; case S3C2412_IISMOD_MODE_TXONLY: mod &= ~S3C2412_IISMOD_MODE_MASK; con &= ~S3C2412_IISCON_IIS_ACTIVE; break; default: dev_err(i2s->dev, "TXDIS: Invalid MODE %x in IISMOD\n", mod & S3C2412_IISMOD_MODE_MASK); break; } writel(mod, regs + S3C2412_IISMOD); writel(con, regs + S3C2412_IISCON); } fic = readl(regs + S3C2412_IISFIC); dbg_showcon(__func__, con); pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); } static void s3c2412_snd_rxctrl(struct s3c_i2sv2_info *i2s, int on) { void __iomem *regs = i2s->regs; u32 fic, con, mod; pr_debug("%s(%d)\n", __func__, on); fic = readl(regs + S3C2412_IISFIC); con = readl(regs + S3C2412_IISCON); mod = readl(regs + S3C2412_IISMOD); pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); if (on) { con |= S3C2412_IISCON_RXDMA_ACTIVE | S3C2412_IISCON_IIS_ACTIVE; con &= ~S3C2412_IISCON_RXDMA_PAUSE; con &= ~S3C2412_IISCON_RXCH_PAUSE; switch (mod & S3C2412_IISMOD_MODE_MASK) { case S3C2412_IISMOD_MODE_TXRX: case S3C2412_IISMOD_MODE_RXONLY: /* do nothing, we are in the right mode */ break; case S3C2412_IISMOD_MODE_TXONLY: mod &= ~S3C2412_IISMOD_MODE_MASK; mod |= S3C2412_IISMOD_MODE_TXRX; break; default: dev_err(i2s->dev, "RXEN: Invalid MODE %x in IISMOD\n", mod & S3C2412_IISMOD_MODE_MASK); } writel(mod, regs + S3C2412_IISMOD); writel(con, regs + S3C2412_IISCON); } else { /* See txctrl notes on FIFOs. */ con &= ~S3C2412_IISCON_RXDMA_ACTIVE; con |= S3C2412_IISCON_RXDMA_PAUSE; con |= S3C2412_IISCON_RXCH_PAUSE; switch (mod & S3C2412_IISMOD_MODE_MASK) { case S3C2412_IISMOD_MODE_RXONLY: con &= ~S3C2412_IISCON_IIS_ACTIVE; mod &= ~S3C2412_IISMOD_MODE_MASK; break; case S3C2412_IISMOD_MODE_TXRX: mod &= ~S3C2412_IISMOD_MODE_MASK; mod |= S3C2412_IISMOD_MODE_TXONLY; break; default: dev_err(i2s->dev, "RXDIS: Invalid MODE %x in IISMOD\n", mod & S3C2412_IISMOD_MODE_MASK); } writel(con, regs + S3C2412_IISCON); writel(mod, regs + S3C2412_IISMOD); } fic = readl(regs + S3C2412_IISFIC); pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic); } #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) /* * Wait for the LR signal to allow synchronisation to the L/R clock * from the codec. May only be needed for slave mode. */ static int s3c2412_snd_lrsync(struct s3c_i2sv2_info *i2s) { u32 iiscon; unsigned long loops = msecs_to_loops(5); pr_debug("Entered %s\n", __func__); while (--loops) { iiscon = readl(i2s->regs + S3C2412_IISCON); if (iiscon & S3C2412_IISCON_LRINDEX) break; cpu_relax(); } if (!loops) { printk(KERN_ERR "%s: timeout\n", __func__); return -ETIMEDOUT; } return 0; } /* * Set S3C2412 I2S DAI format */ static int s3c2412_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct s3c_i2sv2_info *i2s = to_info(cpu_dai); u32 iismod; pr_debug("Entered %s\n", __func__); iismod = readl(i2s->regs + S3C2412_IISMOD); pr_debug("hw_params r: IISMOD: %x \n", iismod); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: i2s->master = 0; iismod |= S3C2412_IISMOD_SLAVE; break; case SND_SOC_DAIFMT_CBS_CFS: i2s->master = 1; iismod &= ~S3C2412_IISMOD_SLAVE; break; default: pr_err("unknwon master/slave format\n"); return -EINVAL; } iismod &= ~S3C2412_IISMOD_SDF_MASK; switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_RIGHT_J: iismod |= S3C2412_IISMOD_LR_RLOW; iismod |= S3C2412_IISMOD_SDF_MSB; break; case SND_SOC_DAIFMT_LEFT_J: iismod |= S3C2412_IISMOD_LR_RLOW; iismod |= S3C2412_IISMOD_SDF_LSB; break; case SND_SOC_DAIFMT_I2S: iismod &= ~S3C2412_IISMOD_LR_RLOW; iismod |= S3C2412_IISMOD_SDF_IIS; break; default: pr_err("Unknown data format\n"); return -EINVAL; } writel(iismod, i2s->regs + S3C2412_IISMOD); pr_debug("hw_params w: IISMOD: %x \n", iismod); return 0; } static int s3c_i2sv2_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct s3c_i2sv2_info *i2s = to_info(dai); struct s3c_dma_params *dma_data; u32 iismod; pr_debug("Entered %s\n", __func__); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) dma_data = i2s->dma_playback; else dma_data = i2s->dma_capture; snd_soc_dai_set_dma_data(dai, substream, dma_data); /* Working copies of register */ iismod = readl(i2s->regs + S3C2412_IISMOD); pr_debug("%s: r: IISMOD: %x\n", __func__, iismod); iismod &= ~S3C64XX_IISMOD_BLC_MASK; /* Sample size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: iismod |= S3C64XX_IISMOD_BLC_8BIT; break; case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S24_LE: iismod |= S3C64XX_IISMOD_BLC_24BIT; break; } writel(iismod, i2s->regs + S3C2412_IISMOD); pr_debug("%s: w: IISMOD: %x\n", __func__, iismod); return 0; } static int s3c_i2sv2_set_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct s3c_i2sv2_info *i2s = to_info(cpu_dai); u32 iismod = readl(i2s->regs + S3C2412_IISMOD); pr_debug("Entered %s\n", __func__); pr_debug("%s r: IISMOD: %x\n", __func__, iismod); switch (clk_id) { case S3C_I2SV2_CLKSRC_PCLK: iismod &= ~S3C2412_IISMOD_IMS_SYSMUX; break; case S3C_I2SV2_CLKSRC_AUDIOBUS: iismod |= S3C2412_IISMOD_IMS_SYSMUX; break; case S3C_I2SV2_CLKSRC_CDCLK: /* Error if controller doesn't have the CDCLKCON bit */ if (!(i2s->feature & S3C_FEATURE_CDCLKCON)) return -EINVAL; switch (dir) { case SND_SOC_CLOCK_IN: iismod |= S3C64XX_IISMOD_CDCLKCON; break; case SND_SOC_CLOCK_OUT: iismod &= ~S3C64XX_IISMOD_CDCLKCON; break; default: return -EINVAL; } break; default: return -EINVAL; } writel(iismod, i2s->regs + S3C2412_IISMOD); pr_debug("%s w: IISMOD: %x\n", __func__, iismod); return 0; } static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct s3c_i2sv2_info *i2s = to_info(rtd->cpu_dai); int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE); unsigned long irqs; int ret = 0; struct s3c_dma_params *dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); pr_debug("Entered %s\n", __func__); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* On start, ensure that the FIFOs are cleared and reset. */ writel(capture ? S3C2412_IISFIC_RXFLUSH : S3C2412_IISFIC_TXFLUSH, i2s->regs + S3C2412_IISFIC); /* clear again, just in case */ writel(0x0, i2s->regs + S3C2412_IISFIC); case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (!i2s->master) { ret = s3c2412_snd_lrsync(i2s); if (ret) goto exit_err; } local_irq_save(irqs); if (capture) s3c2412_snd_rxctrl(i2s, 1); else s3c2412_snd_txctrl(i2s, 1); local_irq_restore(irqs); /* * Load the next buffer to DMA to meet the reqirement * of the auto reload mechanism of S3C24XX. * This call won't bother S3C64XX. */ s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: local_irq_save(irqs); if (capture) s3c2412_snd_rxctrl(i2s, 0); else s3c2412_snd_txctrl(i2s, 0); local_irq_restore(irqs); break; default: ret = -EINVAL; break; } exit_err: return ret; } /* * Set S3C2412 Clock dividers */ static int s3c2412_i2s_set_clkdiv(struct snd_soc_dai *cpu_dai, int div_id, int div) { struct s3c_i2sv2_info *i2s = to_info(cpu_dai); u32 reg; pr_debug("%s(%p, %d, %d)\n", __func__, cpu_dai, div_id, div); switch (div_id) { case S3C_I2SV2_DIV_BCLK: switch (div) { case 16: div = S3C2412_IISMOD_BCLK_16FS; break; case 32: div = S3C2412_IISMOD_BCLK_32FS; break; case 24: div = S3C2412_IISMOD_BCLK_24FS; break; case 48: div = S3C2412_IISMOD_BCLK_48FS; break; default: return -EINVAL; } reg = readl(i2s->regs + S3C2412_IISMOD); reg &= ~S3C2412_IISMOD_BCLK_MASK; writel(reg | div, i2s->regs + S3C2412_IISMOD); pr_debug("%s: MOD=%08x\n", __func__, readl(i2s->regs + S3C2412_IISMOD)); break; case S3C_I2SV2_DIV_RCLK: switch (div) { case 256: div = S3C2412_IISMOD_RCLK_256FS; break; case 384: div = S3C2412_IISMOD_RCLK_384FS; break; case 512: div = S3C2412_IISMOD_RCLK_512FS; break; case 768: div = S3C2412_IISMOD_RCLK_768FS; break; default: return -EINVAL; } reg = readl(i2s->regs + S3C2412_IISMOD); reg &= ~S3C2412_IISMOD_RCLK_MASK; writel(reg | div, i2s->regs + S3C2412_IISMOD); pr_debug("%s: MOD=%08x\n", __func__, readl(i2s->regs + S3C2412_IISMOD)); break; case S3C_I2SV2_DIV_PRESCALER: if (div >= 0) { writel((div << 8) | S3C2412_IISPSR_PSREN, i2s->regs + S3C2412_IISPSR); } else { writel(0x0, i2s->regs + S3C2412_IISPSR); } pr_debug("%s: PSR=%08x\n", __func__, readl(i2s->regs + S3C2412_IISPSR)); break; default: return -EINVAL; } return 0; } static snd_pcm_sframes_t s3c2412_i2s_delay(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct s3c_i2sv2_info *i2s = to_info(dai); u32 reg = readl(i2s->regs + S3C2412_IISFIC); snd_pcm_sframes_t delay; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) delay = S3C2412_IISFIC_TXCOUNT(reg); else delay = S3C2412_IISFIC_RXCOUNT(reg); return delay; } struct clk *s3c_i2sv2_get_clock(struct snd_soc_dai *cpu_dai) { struct s3c_i2sv2_info *i2s = to_info(cpu_dai); u32 iismod = readl(i2s->regs + S3C2412_IISMOD); if (iismod & S3C2412_IISMOD_IMS_SYSMUX) return i2s->iis_cclk; else return i2s->iis_pclk; } EXPORT_SYMBOL_GPL(s3c_i2sv2_get_clock); /* default table of all avaialable root fs divisors */ static unsigned int iis_fs_tab[] = { 256, 512, 384, 768 }; int s3c_i2sv2_iis_calc_rate(struct s3c_i2sv2_rate_calc *info, unsigned int *fstab, unsigned int rate, struct clk *clk) { unsigned long clkrate = clk_get_rate(clk); unsigned int div; unsigned int fsclk; unsigned int actual; unsigned int fs; unsigned int fsdiv; signed int deviation = 0; unsigned int best_fs = 0; unsigned int best_div = 0; unsigned int best_rate = 0; unsigned int best_deviation = INT_MAX; pr_debug("Input clock rate %ldHz\n", clkrate); if (fstab == NULL) fstab = iis_fs_tab; for (fs = 0; fs < ARRAY_SIZE(iis_fs_tab); fs++) { fsdiv = iis_fs_tab[fs]; fsclk = clkrate / fsdiv; div = fsclk / rate; if ((fsclk % rate) > (rate / 2)) div++; if (div <= 1) continue; actual = clkrate / (fsdiv * div); deviation = actual - rate; printk(KERN_DEBUG "%ufs: div %u => result %u, deviation %d\n", fsdiv, div, actual, deviation); deviation = abs(deviation); if (deviation < best_deviation) { best_fs = fsdiv; best_div = div; best_rate = actual; best_deviation = deviation; } if (deviation == 0) break; } printk(KERN_DEBUG "best: fs=%u, div=%u, rate=%u\n", best_fs, best_div, best_rate); info->fs_div = best_fs; info->clk_div = best_div; return 0; } EXPORT_SYMBOL_GPL(s3c_i2sv2_iis_calc_rate); int s3c_i2sv2_probe(struct snd_soc_dai *dai, struct s3c_i2sv2_info *i2s, unsigned long base) { struct device *dev = dai->dev; unsigned int iismod; i2s->dev = dev; /* record our i2s structure for later use in the callbacks */ snd_soc_dai_set_drvdata(dai, i2s); i2s->regs = ioremap(base, 0x100); if (i2s->regs == NULL) { dev_err(dev, "cannot ioremap registers\n"); return -ENXIO; } i2s->iis_pclk = clk_get(dev, "iis"); if (IS_ERR(i2s->iis_pclk)) { dev_err(dev, "failed to get iis_clock\n"); iounmap(i2s->regs); return -ENOENT; } clk_enable(i2s->iis_pclk); /* Mark ourselves as in TXRX mode so we can run through our cleanup * process without warnings. */ iismod = readl(i2s->regs + S3C2412_IISMOD); iismod |= S3C2412_IISMOD_MODE_TXRX; writel(iismod, i2s->regs + S3C2412_IISMOD); s3c2412_snd_txctrl(i2s, 0); s3c2412_snd_rxctrl(i2s, 0); return 0; } EXPORT_SYMBOL_GPL(s3c_i2sv2_probe); #ifdef CONFIG_PM static int s3c2412_i2s_suspend(struct snd_soc_dai *dai) { struct s3c_i2sv2_info *i2s = to_info(dai); u32 iismod; if (dai->active) { i2s->suspend_iismod = readl(i2s->regs + S3C2412_IISMOD); i2s->suspend_iiscon = readl(i2s->regs + S3C2412_IISCON); i2s->suspend_iispsr = readl(i2s->regs + S3C2412_IISPSR); /* some basic suspend checks */ iismod = readl(i2s->regs + S3C2412_IISMOD); if (iismod & S3C2412_IISCON_RXDMA_ACTIVE) pr_warning("%s: RXDMA active?\n", __func__); if (iismod & S3C2412_IISCON_TXDMA_ACTIVE) pr_warning("%s: TXDMA active?\n", __func__); if (iismod & S3C2412_IISCON_IIS_ACTIVE) pr_warning("%s: IIS active\n", __func__); } return 0; } static int s3c2412_i2s_resume(struct snd_soc_dai *dai) { struct s3c_i2sv2_info *i2s = to_info(dai); pr_info("dai_active %d, IISMOD %08x, IISCON %08x\n", dai->active, i2s->suspend_iismod, i2s->suspend_iiscon); if (dai->active) { writel(i2s->suspend_iiscon, i2s->regs + S3C2412_IISCON); writel(i2s->suspend_iismod, i2s->regs + S3C2412_IISMOD); writel(i2s->suspend_iispsr, i2s->regs + S3C2412_IISPSR); writel(S3C2412_IISFIC_RXFLUSH | S3C2412_IISFIC_TXFLUSH, i2s->regs + S3C2412_IISFIC); ndelay(250); writel(0x0, i2s->regs + S3C2412_IISFIC); } return 0; } #else #define s3c2412_i2s_suspend NULL #define s3c2412_i2s_resume NULL #endif int s3c_i2sv2_register_dai(struct device *dev, int id, struct snd_soc_dai_driver *drv) { struct snd_soc_dai_ops *ops = drv->ops; ops->trigger = s3c2412_i2s_trigger; if (!ops->hw_params) ops->hw_params = s3c_i2sv2_hw_params; ops->set_fmt = s3c2412_i2s_set_fmt; ops->set_clkdiv = s3c2412_i2s_set_clkdiv; ops->set_sysclk = s3c_i2sv2_set_sysclk; /* Allow overriding by (for example) IISv4 */ if (!ops->delay) ops->delay = s3c2412_i2s_delay; drv->suspend = s3c2412_i2s_suspend; drv->resume = s3c2412_i2s_resume; return snd_soc_register_dai(dev, drv); } EXPORT_SYMBOL_GPL(s3c_i2sv2_register_dai); MODULE_LICENSE("GPL");
gpl-2.0
andip71/boeffla-kernel-oos-bacon
drivers/firmware/pcdp.c
6691
3365
/* * Parse the EFI PCDP table to locate the console device. * * (c) Copyright 2002, 2003, 2004 Hewlett-Packard Development Company, L.P. * Khalid Aziz <khalid.aziz@hp.com> * Alex Williamson <alex.williamson@hp.com> * Bjorn Helgaas <bjorn.helgaas@hp.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/acpi.h> #include <linux/console.h> #include <linux/efi.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <asm/vga.h> #include "pcdp.h" static int __init setup_serial_console(struct pcdp_uart *uart) { #ifdef CONFIG_SERIAL_8250_CONSOLE int mmio; static char options[64], *p = options; char parity; mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY); p += sprintf(p, "uart8250,%s,0x%llx", mmio ? "mmio" : "io", uart->addr.address); if (uart->baud) { p += sprintf(p, ",%llu", uart->baud); if (uart->bits) { switch (uart->parity) { case 0x2: parity = 'e'; break; case 0x3: parity = 'o'; break; default: parity = 'n'; } p += sprintf(p, "%c%d", parity, uart->bits); } } add_preferred_console("uart", 8250, &options[9]); return setup_early_serial8250_console(options); #else return -ENODEV; #endif } static int __init setup_vga_console(struct pcdp_device *dev) { #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) u8 *if_ptr; if_ptr = ((u8 *)dev + sizeof(struct pcdp_device)); if (if_ptr[0] == PCDP_IF_PCI) { struct pcdp_if_pci if_pci; /* struct copy since ifptr might not be correctly aligned */ memcpy(&if_pci, if_ptr, sizeof(if_pci)); if (if_pci.trans & PCDP_PCI_TRANS_IOPORT) vga_console_iobase = if_pci.ioport_tra; if (if_pci.trans & PCDP_PCI_TRANS_MMIO) vga_console_membase = if_pci.mmio_tra; } if (efi_mem_type(vga_console_membase + 0xA0000) == EFI_CONVENTIONAL_MEMORY) { printk(KERN_ERR "PCDP: VGA selected, but frame buffer is not MMIO!\n"); return -ENODEV; } conswitchp = &vga_con; printk(KERN_INFO "PCDP: VGA console\n"); return 0; #else return -ENODEV; #endif } int __init efi_setup_pcdp_console(char *cmdline) { struct pcdp *pcdp; struct pcdp_uart *uart; struct pcdp_device *dev, *end; int i, serial = 0; int rc = -ENODEV; if (efi.hcdp == EFI_INVALID_TABLE_ADDR) return -ENODEV; pcdp = ioremap(efi.hcdp, 4096); printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp); if (strstr(cmdline, "console=hcdp")) { if (pcdp->rev < 3) serial = 1; } else if (strstr(cmdline, "console=")) { printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n"); goto out; } if (pcdp->rev < 3 && efi_uart_console_only()) serial = 1; for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) { if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) { if (uart->type == PCDP_CONSOLE_UART) { rc = setup_serial_console(uart); goto out; } } } end = (struct pcdp_device *) ((u8 *) pcdp + pcdp->length); for (dev = (struct pcdp_device *) (pcdp->uart + pcdp->num_uarts); dev < end; dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) { if (dev->flags & PCDP_PRIMARY_CONSOLE) { if (dev->type == PCDP_CONSOLE_VGA) { rc = setup_vga_console(dev); goto out; } } } out: iounmap(pcdp); return rc; }
gpl-2.0
linux-shield/kernel
drivers/pcmcia/tcic.c
8227
23942
/*====================================================================== Device driver for Databook TCIC-2 PCMCIA controller tcic.c 1.111 2000/02/15 04:13:12 The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/bitops.h> #include <asm/io.h> #include <pcmcia/ss.h> #include "tcic.h" MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("Databook TCIC-2 PCMCIA socket driver"); MODULE_LICENSE("Dual MPL/GPL"); /*====================================================================*/ /* Parameters that can be set with 'insmod' */ /* The base port address of the TCIC-2 chip */ static unsigned long tcic_base = TCIC_BASE; /* Specify a socket number to ignore */ static int ignore = -1; /* Probe for safe interrupts? */ static int do_scan = 1; /* Bit map of interrupts to choose from */ static u_int irq_mask = 0xffff; static int irq_list[16]; static unsigned int irq_list_count; /* The card status change interrupt -- 0 means autoselect */ static int cs_irq; /* Poll status interval -- 0 means default to interrupt */ static int poll_interval; /* Delay for card status double-checking */ static int poll_quick = HZ/20; /* CCLK external clock time, in nanoseconds. 70 ns = 14.31818 MHz */ static int cycle_time = 70; module_param(tcic_base, ulong, 0444); module_param(ignore, int, 0444); module_param(do_scan, int, 0444); module_param(irq_mask, int, 0444); module_param_array(irq_list, int, &irq_list_count, 0444); module_param(cs_irq, int, 0444); module_param(poll_interval, int, 0444); module_param(poll_quick, int, 0444); module_param(cycle_time, int, 0444); /*====================================================================*/ static irqreturn_t tcic_interrupt(int irq, void *dev); static void tcic_timer(u_long data); static struct pccard_operations tcic_operations; struct tcic_socket { u_short psock; u_char last_sstat; u_char id; struct pcmcia_socket socket; }; static struct timer_list poll_timer; static int tcic_timer_pending; static int sockets; static struct tcic_socket socket_table[2]; /*====================================================================*/ /* Trick when selecting interrupts: the TCIC sktirq pin is supposed to map to irq 11, but is coded as 0 or 1 in the irq registers. */ #define TCIC_IRQ(x) ((x) ? (((x) == 11) ? 1 : (x)) : 15) #ifdef DEBUG_X static u_char tcic_getb(u_char reg) { u_char val = inb(tcic_base+reg); printk(KERN_DEBUG "tcic_getb(%#lx) = %#x\n", tcic_base+reg, val); return val; } static u_short tcic_getw(u_char reg) { u_short val = inw(tcic_base+reg); printk(KERN_DEBUG "tcic_getw(%#lx) = %#x\n", tcic_base+reg, val); return val; } static void tcic_setb(u_char reg, u_char data) { printk(KERN_DEBUG "tcic_setb(%#lx, %#x)\n", tcic_base+reg, data); outb(data, tcic_base+reg); } static void tcic_setw(u_char reg, u_short data) { printk(KERN_DEBUG "tcic_setw(%#lx, %#x)\n", tcic_base+reg, data); outw(data, tcic_base+reg); } #else #define tcic_getb(reg) inb(tcic_base+reg) #define tcic_getw(reg) inw(tcic_base+reg) #define tcic_setb(reg, data) outb(data, tcic_base+reg) #define tcic_setw(reg, data) outw(data, tcic_base+reg) #endif static void tcic_setl(u_char reg, u_int data) { #ifdef DEBUG_X printk(KERN_DEBUG "tcic_setl(%#x, %#lx)\n", tcic_base+reg, data); #endif outw(data & 0xffff, tcic_base+reg); outw(data >> 16, tcic_base+reg+2); } static void tcic_aux_setb(u_short reg, u_char data) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); tcic_setb(TCIC_AUX, data); } static u_short tcic_aux_getw(u_short reg) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); return tcic_getw(TCIC_AUX); } static void tcic_aux_setw(u_short reg, u_short data) { u_char mode = (tcic_getb(TCIC_MODE) & TCIC_MODE_PGMMASK) | reg; tcic_setb(TCIC_MODE, mode); tcic_setw(TCIC_AUX, data); } /*====================================================================*/ /* Time conversion functions */ static int to_cycles(int ns) { if (ns < 14) return 0; else return 2*(ns-14)/cycle_time; } /*====================================================================*/ static volatile u_int irq_hits; static irqreturn_t __init tcic_irq_count(int irq, void *dev) { irq_hits++; return IRQ_HANDLED; } static u_int __init try_irq(int irq) { u_short cfg; irq_hits = 0; if (request_irq(irq, tcic_irq_count, 0, "irq scan", tcic_irq_count) != 0) return -1; mdelay(10); if (irq_hits) { free_irq(irq, tcic_irq_count); return -1; } /* Generate one interrupt */ cfg = TCIC_SYSCFG_AUTOBUSY | 0x0a00; tcic_aux_setw(TCIC_AUX_SYSCFG, cfg | TCIC_IRQ(irq)); tcic_setb(TCIC_IENA, TCIC_IENA_ERR | TCIC_IENA_CFG_HIGH); tcic_setb(TCIC_ICSR, TCIC_ICSR_ERR | TCIC_ICSR_JAM); udelay(1000); free_irq(irq, tcic_irq_count); /* Turn off interrupts */ tcic_setb(TCIC_IENA, TCIC_IENA_CFG_OFF); while (tcic_getb(TCIC_ICSR)) tcic_setb(TCIC_ICSR, TCIC_ICSR_JAM); tcic_aux_setw(TCIC_AUX_SYSCFG, cfg); return (irq_hits != 1); } static u_int __init irq_scan(u_int mask0) { u_int mask1; int i; #ifdef __alpha__ #define PIC 0x4d0 /* Don't probe level-triggered interrupts -- reserved for PCI */ int level_mask = inb_p(PIC) | (inb_p(PIC+1) << 8); if (level_mask) mask0 &= ~level_mask; #endif mask1 = 0; if (do_scan) { for (i = 0; i < 16; i++) if ((mask0 & (1 << i)) && (try_irq(i) == 0)) mask1 |= (1 << i); for (i = 0; i < 16; i++) if ((mask1 & (1 << i)) && (try_irq(i) != 0)) { mask1 ^= (1 << i); } } if (mask1) { printk("scanned"); } else { /* Fallback: just find interrupts that aren't in use */ for (i = 0; i < 16; i++) if ((mask0 & (1 << i)) && (request_irq(i, tcic_irq_count, 0, "x", tcic_irq_count) == 0)) { mask1 |= (1 << i); free_irq(i, tcic_irq_count); } printk("default"); } printk(") = "); for (i = 0; i < 16; i++) if (mask1 & (1<<i)) printk("%s%d", ((mask1 & ((1<<i)-1)) ? "," : ""), i); printk(" "); return mask1; } /*====================================================================== See if a card is present, powered up, in IO mode, and already bound to a (non-PCMCIA) Linux driver. We make an exception for cards that look like serial devices. ======================================================================*/ static int __init is_active(int s) { u_short scf1, ioctl, base, num; u_char pwr, sstat; u_int addr; tcic_setl(TCIC_ADDR, (s << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(s)); scf1 = tcic_getw(TCIC_DATA); pwr = tcic_getb(TCIC_PWR); sstat = tcic_getb(TCIC_SSTAT); addr = TCIC_IWIN(s, 0); tcic_setw(TCIC_ADDR, addr + TCIC_IBASE_X); base = tcic_getw(TCIC_DATA); tcic_setw(TCIC_ADDR, addr + TCIC_ICTL_X); ioctl = tcic_getw(TCIC_DATA); if (ioctl & TCIC_ICTL_TINY) num = 1; else { num = (base ^ (base-1)); base = base & (base-1); } if ((sstat & TCIC_SSTAT_CD) && (pwr & TCIC_PWR_VCC(s)) && (scf1 & TCIC_SCF1_IOSTS) && (ioctl & TCIC_ICTL_ENA) && ((base & 0xfeef) != 0x02e8)) { struct resource *res = request_region(base, num, "tcic-2"); if (!res) /* region is busy */ return 1; release_region(base, num); } return 0; } /*====================================================================== This returns the revision code for the specified socket. ======================================================================*/ static int __init get_tcic_id(void) { u_short id; tcic_aux_setw(TCIC_AUX_TEST, TCIC_TEST_DIAG); id = tcic_aux_getw(TCIC_AUX_ILOCK); id = (id & TCIC_ILOCKTEST_ID_MASK) >> TCIC_ILOCKTEST_ID_SH; tcic_aux_setw(TCIC_AUX_TEST, 0); return id; } /*====================================================================*/ static struct platform_driver tcic_driver = { .driver = { .name = "tcic-pcmcia", .owner = THIS_MODULE, }, }; static struct platform_device tcic_device = { .name = "tcic-pcmcia", .id = 0, }; static int __init init_tcic(void) { int i, sock, ret = 0; u_int mask, scan; if (platform_driver_register(&tcic_driver)) return -1; printk(KERN_INFO "Databook TCIC-2 PCMCIA probe: "); sock = 0; if (!request_region(tcic_base, 16, "tcic-2")) { printk("could not allocate ports,\n "); platform_driver_unregister(&tcic_driver); return -ENODEV; } else { tcic_setw(TCIC_ADDR, 0); if (tcic_getw(TCIC_ADDR) == 0) { tcic_setw(TCIC_ADDR, 0xc3a5); if (tcic_getw(TCIC_ADDR) == 0xc3a5) sock = 2; } if (sock == 0) { /* See if resetting the controller does any good */ tcic_setb(TCIC_SCTRL, TCIC_SCTRL_RESET); tcic_setb(TCIC_SCTRL, 0); tcic_setw(TCIC_ADDR, 0); if (tcic_getw(TCIC_ADDR) == 0) { tcic_setw(TCIC_ADDR, 0xc3a5); if (tcic_getw(TCIC_ADDR) == 0xc3a5) sock = 2; } } } if (sock == 0) { printk("not found.\n"); release_region(tcic_base, 16); platform_driver_unregister(&tcic_driver); return -ENODEV; } sockets = 0; for (i = 0; i < sock; i++) { if ((i == ignore) || is_active(i)) continue; socket_table[sockets].psock = i; socket_table[sockets].id = get_tcic_id(); socket_table[sockets].socket.owner = THIS_MODULE; /* only 16-bit cards, memory windows must be size-aligned */ /* No PCI or CardBus support */ socket_table[sockets].socket.features = SS_CAP_PCCARD | SS_CAP_MEM_ALIGN; /* irq 14, 11, 10, 7, 6, 5, 4, 3 */ socket_table[sockets].socket.irq_mask = 0x4cf8; /* 4K minimum window size */ socket_table[sockets].socket.map_size = 0x1000; sockets++; } switch (socket_table[0].id) { case TCIC_ID_DB86082: printk("DB86082"); break; case TCIC_ID_DB86082A: printk("DB86082A"); break; case TCIC_ID_DB86084: printk("DB86084"); break; case TCIC_ID_DB86084A: printk("DB86084A"); break; case TCIC_ID_DB86072: printk("DB86072"); break; case TCIC_ID_DB86184: printk("DB86184"); break; case TCIC_ID_DB86082B: printk("DB86082B"); break; default: printk("Unknown ID 0x%02x", socket_table[0].id); } /* Set up polling */ poll_timer.function = &tcic_timer; poll_timer.data = 0; init_timer(&poll_timer); /* Build interrupt mask */ printk(KERN_CONT ", %d sockets\n", sockets); printk(KERN_INFO " irq list ("); if (irq_list_count == 0) mask = irq_mask; else for (i = mask = 0; i < irq_list_count; i++) mask |= (1<<irq_list[i]); /* irq 14, 11, 10, 7, 6, 5, 4, 3 */ mask &= 0x4cf8; /* Scan interrupts */ mask = irq_scan(mask); for (i=0;i<sockets;i++) socket_table[i].socket.irq_mask = mask; /* Check for only two interrupts available */ scan = (mask & (mask-1)); if (((scan & (scan-1)) == 0) && (poll_interval == 0)) poll_interval = HZ; if (poll_interval == 0) { /* Avoid irq 12 unless it is explicitly requested */ u_int cs_mask = mask & ((cs_irq) ? (1<<cs_irq) : ~(1<<12)); for (i = 15; i > 0; i--) if ((cs_mask & (1 << i)) && (request_irq(i, tcic_interrupt, 0, "tcic", tcic_interrupt) == 0)) break; cs_irq = i; if (cs_irq == 0) poll_interval = HZ; } if (socket_table[0].socket.irq_mask & (1 << 11)) printk("sktirq is irq 11, "); if (cs_irq != 0) printk("status change on irq %d\n", cs_irq); else printk("polled status, interval = %d ms\n", poll_interval * 1000 / HZ); for (i = 0; i < sockets; i++) { tcic_setw(TCIC_ADDR+2, socket_table[i].psock << TCIC_SS_SHFT); socket_table[i].last_sstat = tcic_getb(TCIC_SSTAT); } /* jump start interrupt handler, if needed */ tcic_interrupt(0, NULL); platform_device_register(&tcic_device); for (i = 0; i < sockets; i++) { socket_table[i].socket.ops = &tcic_operations; socket_table[i].socket.resource_ops = &pccard_nonstatic_ops; socket_table[i].socket.dev.parent = &tcic_device.dev; ret = pcmcia_register_socket(&socket_table[i].socket); if (ret && i) pcmcia_unregister_socket(&socket_table[0].socket); } return ret; return 0; } /* init_tcic */ /*====================================================================*/ static void __exit exit_tcic(void) { int i; del_timer_sync(&poll_timer); if (cs_irq != 0) { tcic_aux_setw(TCIC_AUX_SYSCFG, TCIC_SYSCFG_AUTOBUSY|0x0a00); free_irq(cs_irq, tcic_interrupt); } release_region(tcic_base, 16); for (i = 0; i < sockets; i++) { pcmcia_unregister_socket(&socket_table[i].socket); } platform_device_unregister(&tcic_device); platform_driver_unregister(&tcic_driver); } /* exit_tcic */ /*====================================================================*/ static irqreturn_t tcic_interrupt(int irq, void *dev) { int i, quick = 0; u_char latch, sstat; u_short psock; u_int events; static volatile int active = 0; if (active) { printk(KERN_NOTICE "tcic: reentered interrupt handler!\n"); return IRQ_NONE; } else active = 1; pr_debug("tcic_interrupt()\n"); for (i = 0; i < sockets; i++) { psock = socket_table[i].psock; tcic_setl(TCIC_ADDR, (psock << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(psock)); sstat = tcic_getb(TCIC_SSTAT); latch = sstat ^ socket_table[psock].last_sstat; socket_table[i].last_sstat = sstat; if (tcic_getb(TCIC_ICSR) & TCIC_ICSR_CDCHG) { tcic_setb(TCIC_ICSR, TCIC_ICSR_CLEAR); quick = 1; } if (latch == 0) continue; events = (latch & TCIC_SSTAT_CD) ? SS_DETECT : 0; events |= (latch & TCIC_SSTAT_WP) ? SS_WRPROT : 0; if (tcic_getw(TCIC_DATA) & TCIC_SCF1_IOSTS) { events |= (latch & TCIC_SSTAT_LBAT1) ? SS_STSCHG : 0; } else { events |= (latch & TCIC_SSTAT_RDY) ? SS_READY : 0; events |= (latch & TCIC_SSTAT_LBAT1) ? SS_BATDEAD : 0; events |= (latch & TCIC_SSTAT_LBAT2) ? SS_BATWARN : 0; } if (events) { pcmcia_parse_events(&socket_table[i].socket, events); } } /* Schedule next poll, if needed */ if (((cs_irq == 0) || quick) && (!tcic_timer_pending)) { poll_timer.expires = jiffies + (quick ? poll_quick : poll_interval); add_timer(&poll_timer); tcic_timer_pending = 1; } active = 0; pr_debug("interrupt done\n"); return IRQ_HANDLED; } /* tcic_interrupt */ static void tcic_timer(u_long data) { pr_debug("tcic_timer()\n"); tcic_timer_pending = 0; tcic_interrupt(0, NULL); } /* tcic_timer */ /*====================================================================*/ static int tcic_get_status(struct pcmcia_socket *sock, u_int *value) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_char reg; tcic_setl(TCIC_ADDR, (psock << TCIC_ADDR_SS_SHFT) | TCIC_ADDR_INDREG | TCIC_SCF1(psock)); reg = tcic_getb(TCIC_SSTAT); *value = (reg & TCIC_SSTAT_CD) ? SS_DETECT : 0; *value |= (reg & TCIC_SSTAT_WP) ? SS_WRPROT : 0; if (tcic_getw(TCIC_DATA) & TCIC_SCF1_IOSTS) { *value |= (reg & TCIC_SSTAT_LBAT1) ? SS_STSCHG : 0; } else { *value |= (reg & TCIC_SSTAT_RDY) ? SS_READY : 0; *value |= (reg & TCIC_SSTAT_LBAT1) ? SS_BATDEAD : 0; *value |= (reg & TCIC_SSTAT_LBAT2) ? SS_BATWARN : 0; } reg = tcic_getb(TCIC_PWR); if (reg & (TCIC_PWR_VCC(psock)|TCIC_PWR_VPP(psock))) *value |= SS_POWERON; dev_dbg(&sock->dev, "GetStatus(%d) = %#2.2x\n", psock, *value); return 0; } /* tcic_get_status */ /*====================================================================*/ static int tcic_set_socket(struct pcmcia_socket *sock, socket_state_t *state) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_char reg; u_short scf1, scf2; dev_dbg(&sock->dev, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " "io_irq %d, csc_mask %#2.2x)\n", psock, state->flags, state->Vcc, state->Vpp, state->io_irq, state->csc_mask); tcic_setw(TCIC_ADDR+2, (psock << TCIC_SS_SHFT) | TCIC_ADR2_INDREG); reg = tcic_getb(TCIC_PWR); reg &= ~(TCIC_PWR_VCC(psock) | TCIC_PWR_VPP(psock)); if (state->Vcc == 50) { switch (state->Vpp) { case 0: reg |= TCIC_PWR_VCC(psock) | TCIC_PWR_VPP(psock); break; case 50: reg |= TCIC_PWR_VCC(psock); break; case 120: reg |= TCIC_PWR_VPP(psock); break; default: return -EINVAL; } } else if (state->Vcc != 0) return -EINVAL; if (reg != tcic_getb(TCIC_PWR)) tcic_setb(TCIC_PWR, reg); reg = TCIC_ILOCK_HOLD_CCLK | TCIC_ILOCK_CWAIT; if (state->flags & SS_OUTPUT_ENA) { tcic_setb(TCIC_SCTRL, TCIC_SCTRL_ENA); reg |= TCIC_ILOCK_CRESENA; } else tcic_setb(TCIC_SCTRL, 0); if (state->flags & SS_RESET) reg |= TCIC_ILOCK_CRESET; tcic_aux_setb(TCIC_AUX_ILOCK, reg); tcic_setw(TCIC_ADDR, TCIC_SCF1(psock)); scf1 = TCIC_SCF1_FINPACK; scf1 |= TCIC_IRQ(state->io_irq); if (state->flags & SS_IOCARD) { scf1 |= TCIC_SCF1_IOSTS; if (state->flags & SS_SPKR_ENA) scf1 |= TCIC_SCF1_SPKR; if (state->flags & SS_DMA_MODE) scf1 |= TCIC_SCF1_DREQ2 << TCIC_SCF1_DMA_SHIFT; } tcic_setw(TCIC_DATA, scf1); /* Some general setup stuff, and configure status interrupt */ reg = TCIC_WAIT_ASYNC | TCIC_WAIT_SENSE | to_cycles(250); tcic_aux_setb(TCIC_AUX_WCTL, reg); tcic_aux_setw(TCIC_AUX_SYSCFG, TCIC_SYSCFG_AUTOBUSY|0x0a00| TCIC_IRQ(cs_irq)); /* Card status change interrupt mask */ tcic_setw(TCIC_ADDR, TCIC_SCF2(psock)); scf2 = TCIC_SCF2_MALL; if (state->csc_mask & SS_DETECT) scf2 &= ~TCIC_SCF2_MCD; if (state->flags & SS_IOCARD) { if (state->csc_mask & SS_STSCHG) reg &= ~TCIC_SCF2_MLBAT1; } else { if (state->csc_mask & SS_BATDEAD) reg &= ~TCIC_SCF2_MLBAT1; if (state->csc_mask & SS_BATWARN) reg &= ~TCIC_SCF2_MLBAT2; if (state->csc_mask & SS_READY) reg &= ~TCIC_SCF2_MRDY; } tcic_setw(TCIC_DATA, scf2); /* For the ISA bus, the irq should be active-high totem-pole */ tcic_setb(TCIC_IENA, TCIC_IENA_CDCHG | TCIC_IENA_CFG_HIGH); return 0; } /* tcic_set_socket */ /*====================================================================*/ static int tcic_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_int addr; u_short base, len, ioctl; dev_dbg(&sock->dev, "SetIOMap(%d, %d, %#2.2x, %d ns, " "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed, (unsigned long long)io->start, (unsigned long long)io->stop); if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || (io->stop < io->start)) return -EINVAL; tcic_setw(TCIC_ADDR+2, TCIC_ADR2_INDREG | (psock << TCIC_SS_SHFT)); addr = TCIC_IWIN(psock, io->map); base = io->start; len = io->stop - io->start; /* Check to see that len+1 is power of two, etc */ if ((len & (len+1)) || (base & len)) return -EINVAL; base |= (len+1)>>1; tcic_setw(TCIC_ADDR, addr + TCIC_IBASE_X); tcic_setw(TCIC_DATA, base); ioctl = (psock << TCIC_ICTL_SS_SHFT); ioctl |= (len == 0) ? TCIC_ICTL_TINY : 0; ioctl |= (io->flags & MAP_ACTIVE) ? TCIC_ICTL_ENA : 0; ioctl |= to_cycles(io->speed) & TCIC_ICTL_WSCNT_MASK; if (!(io->flags & MAP_AUTOSZ)) { ioctl |= TCIC_ICTL_QUIET; ioctl |= (io->flags & MAP_16BIT) ? TCIC_ICTL_BW_16 : TCIC_ICTL_BW_8; } tcic_setw(TCIC_ADDR, addr + TCIC_ICTL_X); tcic_setw(TCIC_DATA, ioctl); return 0; } /* tcic_set_io_map */ /*====================================================================*/ static int tcic_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem) { u_short psock = container_of(sock, struct tcic_socket, socket)->psock; u_short addr, ctl; u_long base, len, mmap; dev_dbg(&sock->dev, "SetMemMap(%d, %d, %#2.2x, %d ns, " "%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags, mem->speed, (unsigned long long)mem->res->start, (unsigned long long)mem->res->end, mem->card_start); if ((mem->map > 3) || (mem->card_start > 0x3ffffff) || (mem->res->start > 0xffffff) || (mem->res->end > 0xffffff) || (mem->res->start > mem->res->end) || (mem->speed > 1000)) return -EINVAL; tcic_setw(TCIC_ADDR+2, TCIC_ADR2_INDREG | (psock << TCIC_SS_SHFT)); addr = TCIC_MWIN(psock, mem->map); base = mem->res->start; len = mem->res->end - mem->res->start; if ((len & (len+1)) || (base & len)) return -EINVAL; if (len == 0x0fff) base = (base >> TCIC_MBASE_HA_SHFT) | TCIC_MBASE_4K_BIT; else base = (base | (len+1)>>1) >> TCIC_MBASE_HA_SHFT; tcic_setw(TCIC_ADDR, addr + TCIC_MBASE_X); tcic_setw(TCIC_DATA, base); mmap = mem->card_start - mem->res->start; mmap = (mmap >> TCIC_MMAP_CA_SHFT) & TCIC_MMAP_CA_MASK; if (mem->flags & MAP_ATTRIB) mmap |= TCIC_MMAP_REG; tcic_setw(TCIC_ADDR, addr + TCIC_MMAP_X); tcic_setw(TCIC_DATA, mmap); ctl = TCIC_MCTL_QUIET | (psock << TCIC_MCTL_SS_SHFT); ctl |= to_cycles(mem->speed) & TCIC_MCTL_WSCNT_MASK; ctl |= (mem->flags & MAP_16BIT) ? 0 : TCIC_MCTL_B8; ctl |= (mem->flags & MAP_WRPROT) ? TCIC_MCTL_WP : 0; ctl |= (mem->flags & MAP_ACTIVE) ? TCIC_MCTL_ENA : 0; tcic_setw(TCIC_ADDR, addr + TCIC_MCTL_X); tcic_setw(TCIC_DATA, ctl); return 0; } /* tcic_set_mem_map */ /*====================================================================*/ static int tcic_init(struct pcmcia_socket *s) { int i; struct resource res = { .start = 0, .end = 0x1000 }; pccard_io_map io = { 0, 0, 0, 0, 1 }; pccard_mem_map mem = { .res = &res, }; for (i = 0; i < 2; i++) { io.map = i; tcic_set_io_map(s, &io); } for (i = 0; i < 5; i++) { mem.map = i; tcic_set_mem_map(s, &mem); } return 0; } static struct pccard_operations tcic_operations = { .init = tcic_init, .get_status = tcic_get_status, .set_socket = tcic_set_socket, .set_io_map = tcic_set_io_map, .set_mem_map = tcic_set_mem_map, }; /*====================================================================*/ module_init(init_tcic); module_exit(exit_tcic);
gpl-2.0
NoelMacwan/SXDTianchi
arch/c6x/mm/dma-coherent.c
8739
3232
/* * Port on Texas Instruments TMS320C6x architecture * * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated * Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * DMA uncached mapping support. * * Using code pulled from ARM * Copyright (C) 2000-2004 Russell King * */ #include <linux/slab.h> #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/memblock.h> #include <asm/page.h> /* * DMA coherent memory management, can be redefined using the memdma= * kernel command line */ /* none by default */ static phys_addr_t dma_base; static u32 dma_size; static u32 dma_pages; static unsigned long *dma_bitmap; /* bitmap lock */ static DEFINE_SPINLOCK(dma_lock); /* * Return a DMA coherent and contiguous memory chunk from the DMA memory */ static inline u32 __alloc_dma_pages(int order) { unsigned long flags; u32 pos; spin_lock_irqsave(&dma_lock, flags); pos = bitmap_find_free_region(dma_bitmap, dma_pages, order); spin_unlock_irqrestore(&dma_lock, flags); return dma_base + (pos << PAGE_SHIFT); } static void __free_dma_pages(u32 addr, int order) { unsigned long flags; u32 pos = (addr - dma_base) >> PAGE_SHIFT; if (addr < dma_base || (pos + (1 << order)) >= dma_pages) { printk(KERN_ERR "%s: freeing outside range.\n", __func__); BUG(); } spin_lock_irqsave(&dma_lock, flags); bitmap_release_region(dma_bitmap, pos, order); spin_unlock_irqrestore(&dma_lock, flags); } /* * Allocate DMA coherent memory space and return both the kernel * virtual and DMA address for that space. */ void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { u32 paddr; int order; if (!dma_size || !size) return NULL; order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1); paddr = __alloc_dma_pages(order); if (handle) *handle = paddr; if (!paddr) return NULL; return phys_to_virt(paddr); } EXPORT_SYMBOL(dma_alloc_coherent); /* * Free DMA coherent memory as defined by the above mapping. */ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { int order; if (!dma_size || !size) return; order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1); __free_dma_pages(virt_to_phys(vaddr), order); } EXPORT_SYMBOL(dma_free_coherent); /* * Initialise the coherent DMA memory allocator using the given uncached region. */ void __init coherent_mem_init(phys_addr_t start, u32 size) { phys_addr_t bitmap_phys; if (!size) return; printk(KERN_INFO "Coherent memory (DMA) region start=0x%x size=0x%x\n", start, size); dma_base = start; dma_size = size; /* allocate bitmap */ dma_pages = dma_size >> PAGE_SHIFT; if (dma_size & (PAGE_SIZE - 1)) ++dma_pages; bitmap_phys = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long), sizeof(long)); dma_bitmap = phys_to_virt(bitmap_phys); memset(dma_bitmap, 0, dma_pages * PAGE_SIZE); }
gpl-2.0
lostemp/lsk-3.4.47
net/sched/sch_plug.c
8739
6835
/* * sch_plug.c Queue traffic until an explicit release command * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * There are two ways to use this qdisc: * 1. A simple "instantaneous" plug/unplug operation, by issuing an alternating * sequence of TCQ_PLUG_BUFFER & TCQ_PLUG_RELEASE_INDEFINITE commands. * * 2. For network output buffering (a.k.a output commit) functionality. * Output commit property is commonly used by applications using checkpoint * based fault-tolerance to ensure that the checkpoint from which a system * is being restored is consistent w.r.t outside world. * * Consider for e.g. Remus - a Virtual Machine checkpointing system, * wherein a VM is checkpointed, say every 50ms. The checkpoint is replicated * asynchronously to the backup host, while the VM continues executing the * next epoch speculatively. * * The following is a typical sequence of output buffer operations: * 1.At epoch i, start_buffer(i) * 2. At end of epoch i (i.e. after 50ms): * 2.1 Stop VM and take checkpoint(i). * 2.2 start_buffer(i+1) and Resume VM * 3. While speculatively executing epoch(i+1), asynchronously replicate * checkpoint(i) to backup host. * 4. When checkpoint_ack(i) is received from backup, release_buffer(i) * Thus, this Qdisc would receive the following sequence of commands: * TCQ_PLUG_BUFFER (epoch i) * .. TCQ_PLUG_BUFFER (epoch i+1) * ....TCQ_PLUG_RELEASE_ONE (epoch i) * ......TCQ_PLUG_BUFFER (epoch i+2) * ........ */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/pkt_sched.h> /* * State of the queue, when used for network output buffering: * * plug(i+1) plug(i) head * ------------------+--------------------+----------------> * | | * | | * pkts_current_epoch| pkts_last_epoch |pkts_to_release * ----------------->|<--------+--------->|+---------------> * v v * */ struct plug_sched_data { /* If true, the dequeue function releases all packets * from head to end of the queue. The queue turns into * a pass-through queue for newly arriving packets. */ bool unplug_indefinite; /* Queue Limit in bytes */ u32 limit; /* Number of packets (output) from the current speculatively * executing epoch. */ u32 pkts_current_epoch; /* Number of packets corresponding to the recently finished * epoch. These will be released when we receive a * TCQ_PLUG_RELEASE_ONE command. This command is typically * issued after committing a checkpoint at the target. */ u32 pkts_last_epoch; /* * Number of packets from the head of the queue, that can * be released (committed checkpoint). */ u32 pkts_to_release; }; static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct plug_sched_data *q = qdisc_priv(sch); if (likely(sch->qstats.backlog + skb->len <= q->limit)) { if (!q->unplug_indefinite) q->pkts_current_epoch++; return qdisc_enqueue_tail(skb, sch); } return qdisc_reshape_fail(skb, sch); } static struct sk_buff *plug_dequeue(struct Qdisc *sch) { struct plug_sched_data *q = qdisc_priv(sch); if (qdisc_is_throttled(sch)) return NULL; if (!q->unplug_indefinite) { if (!q->pkts_to_release) { /* No more packets to dequeue. Block the queue * and wait for the next release command. */ qdisc_throttled(sch); return NULL; } q->pkts_to_release--; } return qdisc_dequeue_head(sch); } static int plug_init(struct Qdisc *sch, struct nlattr *opt) { struct plug_sched_data *q = qdisc_priv(sch); q->pkts_current_epoch = 0; q->pkts_last_epoch = 0; q->pkts_to_release = 0; q->unplug_indefinite = false; if (opt == NULL) { /* We will set a default limit of 100 pkts (~150kB) * in case tx_queue_len is not available. The * default value is completely arbitrary. */ u32 pkt_limit = qdisc_dev(sch)->tx_queue_len ? : 100; q->limit = pkt_limit * psched_mtu(qdisc_dev(sch)); } else { struct tc_plug_qopt *ctl = nla_data(opt); if (nla_len(opt) < sizeof(*ctl)) return -EINVAL; q->limit = ctl->limit; } qdisc_throttled(sch); return 0; } /* Receives 4 types of messages: * TCQ_PLUG_BUFFER: Inset a plug into the queue and * buffer any incoming packets * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head * to beginning of the next plug. * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue. * Stop buffering packets until the next TCQ_PLUG_BUFFER * command is received (just act as a pass-thru queue). * TCQ_PLUG_LIMIT: Increase/decrease queue size */ static int plug_change(struct Qdisc *sch, struct nlattr *opt) { struct plug_sched_data *q = qdisc_priv(sch); struct tc_plug_qopt *msg; if (opt == NULL) return -EINVAL; msg = nla_data(opt); if (nla_len(opt) < sizeof(*msg)) return -EINVAL; switch (msg->action) { case TCQ_PLUG_BUFFER: /* Save size of the current buffer */ q->pkts_last_epoch = q->pkts_current_epoch; q->pkts_current_epoch = 0; if (q->unplug_indefinite) qdisc_throttled(sch); q->unplug_indefinite = false; break; case TCQ_PLUG_RELEASE_ONE: /* Add packets from the last complete buffer to the * packets to be released set. */ q->pkts_to_release += q->pkts_last_epoch; q->pkts_last_epoch = 0; qdisc_unthrottled(sch); netif_schedule_queue(sch->dev_queue); break; case TCQ_PLUG_RELEASE_INDEFINITE: q->unplug_indefinite = true; q->pkts_to_release = 0; q->pkts_last_epoch = 0; q->pkts_current_epoch = 0; qdisc_unthrottled(sch); netif_schedule_queue(sch->dev_queue); break; case TCQ_PLUG_LIMIT: /* Limit is supplied in bytes */ q->limit = msg->limit; break; default: return -EINVAL; } return 0; } static struct Qdisc_ops plug_qdisc_ops __read_mostly = { .id = "plug", .priv_size = sizeof(struct plug_sched_data), .enqueue = plug_enqueue, .dequeue = plug_dequeue, .peek = qdisc_peek_head, .init = plug_init, .change = plug_change, .owner = THIS_MODULE, }; static int __init plug_module_init(void) { return register_qdisc(&plug_qdisc_ops); } static void __exit plug_module_exit(void) { unregister_qdisc(&plug_qdisc_ops); } module_init(plug_module_init) module_exit(plug_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
LTEAdvanced/DasKernelBoots-LU6200-STOCKJB-
arch/sh/kernel/cpu/sh3/probe.c
10531
3037
/* * arch/sh/kernel/cpu/sh3/probe.c * * CPU Subtype Probing for SH-3. * * Copyright (C) 1999, 2000 Niibe Yutaka * Copyright (C) 2002 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <asm/processor.h> #include <asm/cache.h> #include <asm/io.h> void __cpuinit cpu_probe(void) { unsigned long addr0, addr1, data0, data1, data2, data3; jump_to_uncached(); /* * Check if the entry shadows or not. * When shadowed, it's 128-entry system. * Otherwise, it's 256-entry system. */ addr0 = CACHE_OC_ADDRESS_ARRAY + (3 << 12); addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12); /* First, write back & invalidate */ data0 = __raw_readl(addr0); __raw_writel(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0); data1 = __raw_readl(addr1); __raw_writel(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1); /* Next, check if there's shadow or not */ data0 = __raw_readl(addr0); data0 ^= SH_CACHE_VALID; __raw_writel(data0, addr0); data1 = __raw_readl(addr1); data2 = data1 ^ SH_CACHE_VALID; __raw_writel(data2, addr1); data3 = __raw_readl(addr0); /* Lastly, invaliate them. */ __raw_writel(data0&~SH_CACHE_VALID, addr0); __raw_writel(data2&~SH_CACHE_VALID, addr1); back_to_cached(); boot_cpu_data.dcache.ways = 4; boot_cpu_data.dcache.entry_shift = 4; boot_cpu_data.dcache.linesz = L1_CACHE_BYTES; boot_cpu_data.dcache.flags = 0; /* * 7709A/7729 has 16K cache (256-entry), while 7702 has only * 2K(direct) 7702 is not supported (yet) */ if (data0 == data1 && data2 == data3) { /* Shadow */ boot_cpu_data.dcache.way_incr = (1 << 11); boot_cpu_data.dcache.entry_mask = 0x7f0; boot_cpu_data.dcache.sets = 128; boot_cpu_data.type = CPU_SH7708; boot_cpu_data.flags |= CPU_HAS_MMU_PAGE_ASSOC; } else { /* 7709A or 7729 */ boot_cpu_data.dcache.way_incr = (1 << 12); boot_cpu_data.dcache.entry_mask = 0xff0; boot_cpu_data.dcache.sets = 256; boot_cpu_data.type = CPU_SH7729; #if defined(CONFIG_CPU_SUBTYPE_SH7706) boot_cpu_data.type = CPU_SH7706; #endif #if defined(CONFIG_CPU_SUBTYPE_SH7710) boot_cpu_data.type = CPU_SH7710; #endif #if defined(CONFIG_CPU_SUBTYPE_SH7712) boot_cpu_data.type = CPU_SH7712; #endif #if defined(CONFIG_CPU_SUBTYPE_SH7720) boot_cpu_data.type = CPU_SH7720; #endif #if defined(CONFIG_CPU_SUBTYPE_SH7721) boot_cpu_data.type = CPU_SH7721; #endif #if defined(CONFIG_CPU_SUBTYPE_SH7705) boot_cpu_data.type = CPU_SH7705; #if defined(CONFIG_SH7705_CACHE_32KB) boot_cpu_data.dcache.way_incr = (1 << 13); boot_cpu_data.dcache.entry_mask = 0x1ff0; boot_cpu_data.dcache.sets = 512; __raw_writel(CCR_CACHE_32KB, CCR3_REG); #else __raw_writel(CCR_CACHE_16KB, CCR3_REG); #endif #endif } /* * SH-3 doesn't have separate caches */ boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED; boot_cpu_data.icache = boot_cpu_data.dcache; boot_cpu_data.family = CPU_FAMILY_SH3; }
gpl-2.0
calixtolinux/Android-ics-kernel
arch/arm/mach-omap1/board-palmtt.c
36
7166
/* * linux/arch/arm/mach-omap1/board-palmtt.c * * Modified from board-palmtt2.c * * Modified and amended for Palm Tungsten|T * by Marek Vasut <marek.vasut@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/notifier.h> #include <linux/clk.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/leds.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <plat/led.h> #include <plat/flash.h> #include <plat/mux.h> #include <plat/usb.h> #include <plat/dma.h> #include <plat/tc.h> #include <plat/board.h> #include <plat/irda.h> #include <plat/keypad.h> #include "common.h" #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #define PALMTT_USBDETECT_GPIO 0 #define PALMTT_CABLE_GPIO 1 #define PALMTT_LED_GPIO 3 #define PALMTT_PENIRQ_GPIO 6 #define PALMTT_MMC_WP_GPIO 8 #define PALMTT_HDQ_GPIO 11 static const unsigned int palmtt_keymap[] = { KEY(0, 0, KEY_ESC), KEY(1, 0, KEY_SPACE), KEY(2, 0, KEY_LEFTCTRL), KEY(3, 0, KEY_TAB), KEY(4, 0, KEY_ENTER), KEY(0, 1, KEY_LEFT), KEY(1, 1, KEY_DOWN), KEY(2, 1, KEY_UP), KEY(3, 1, KEY_RIGHT), KEY(0, 2, KEY_SLEEP), KEY(4, 2, KEY_Y), }; static struct mtd_partition palmtt_partitions[] = { { .name = "write8k", .offset = 0, .size = SZ_8K, .mask_flags = 0, }, { .name = "PalmOS-BootLoader(ro)", .offset = SZ_8K, .size = 7 * SZ_8K, .mask_flags = MTD_WRITEABLE, }, { .name = "u-boot", .offset = MTDPART_OFS_APPEND, .size = 8 * SZ_8K, .mask_flags = 0, }, { .name = "PalmOS-FS(ro)", .offset = MTDPART_OFS_APPEND, .size = 7 * SZ_1M + 4 * SZ_64K - 16 * SZ_8K, .mask_flags = MTD_WRITEABLE, }, { .name = "u-boot(rez)", .offset = MTDPART_OFS_APPEND, .size = SZ_128K, .mask_flags = 0 }, { .name = "empty", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = 0 } }; static struct physmap_flash_data palmtt_flash_data = { .width = 2, .set_vpp = omap1_set_vpp, .parts = palmtt_partitions, .nr_parts = ARRAY_SIZE(palmtt_partitions), }; static struct resource palmtt_flash_resource = { .start = OMAP_CS0_PHYS, .end = OMAP_CS0_PHYS + SZ_8M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device palmtt_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &palmtt_flash_data, }, .num_resources = 1, .resource = &palmtt_flash_resource, }; static struct resource palmtt_kp_resources[] = { [0] = { .start = INT_KEYBOARD, .end = INT_KEYBOARD, .flags = IORESOURCE_IRQ, }, }; static const struct matrix_keymap_data palmtt_keymap_data = { .keymap = palmtt_keymap, .keymap_size = ARRAY_SIZE(palmtt_keymap), }; static struct omap_kp_platform_data palmtt_kp_data = { .rows = 6, .cols = 3, .keymap_data = &palmtt_keymap_data, }; static struct platform_device palmtt_kp_device = { .name = "omap-keypad", .id = -1, .dev = { .platform_data = &palmtt_kp_data, }, .num_resources = ARRAY_SIZE(palmtt_kp_resources), .resource = palmtt_kp_resources, }; static struct platform_device palmtt_lcd_device = { .name = "lcd_palmtt", .id = -1, }; static struct omap_irda_config palmtt_irda_config = { .transceiver_cap = IR_SIRMODE, .rx_channel = OMAP_DMA_UART3_RX, .tx_channel = OMAP_DMA_UART3_TX, .dest_start = UART3_THR, .src_start = UART3_RHR, .tx_trigger = 0, .rx_trigger = 0, }; static struct resource palmtt_irda_resources[] = { [0] = { .start = INT_UART3, .end = INT_UART3, .flags = IORESOURCE_IRQ, }, }; static struct platform_device palmtt_irda_device = { .name = "omapirda", .id = -1, .dev = { .platform_data = &palmtt_irda_config, }, .num_resources = ARRAY_SIZE(palmtt_irda_resources), .resource = palmtt_irda_resources, }; static struct platform_device palmtt_spi_device = { .name = "spi_palmtt", .id = -1, }; static struct omap_backlight_config palmtt_backlight_config = { .default_intensity = 0xa0, }; static struct platform_device palmtt_backlight_device = { .name = "omap-bl", .id = -1, .dev = { .platform_data= &palmtt_backlight_config, }, }; static struct omap_led_config palmtt_led_config[] = { { .cdev = { .name = "palmtt:led0", }, .gpio = PALMTT_LED_GPIO, }, }; static struct omap_led_platform_data palmtt_led_data = { .nr_leds = ARRAY_SIZE(palmtt_led_config), .leds = palmtt_led_config, }; static struct platform_device palmtt_led_device = { .name = "omap-led", .id = -1, .dev = { .platform_data = &palmtt_led_data, }, }; static struct platform_device *palmtt_devices[] __initdata = { &palmtt_flash_device, &palmtt_kp_device, &palmtt_lcd_device, &palmtt_irda_device, &palmtt_spi_device, &palmtt_backlight_device, &palmtt_led_device, }; static int palmtt_get_pendown_state(void) { return !gpio_get_value(6); } static const struct ads7846_platform_data palmtt_ts_info = { .model = 7846, .vref_delay_usecs = 100, /* internal, no capacitor */ .x_plate_ohms = 419, .y_plate_ohms = 486, .get_pendown_state = palmtt_get_pendown_state, }; static struct spi_board_info __initdata palmtt_boardinfo[] = { { /* MicroWire (bus 2) CS0 has an ads7846e */ .modalias = "ads7846", .platform_data = &palmtt_ts_info, .irq = OMAP_GPIO_IRQ(6), .max_speed_hz = 120000 /* max sample rate at 3V */ * 26 /* command + data + overhead */, .bus_num = 2, .chip_select = 0, } }; static struct omap_usb_config palmtt_usb_config __initdata = { .register_dev = 1, .hmc_mode = 0, .pins[0] = 2, }; static struct omap_lcd_config palmtt_lcd_config __initdata = { .ctrl_name = "internal", }; static struct omap_board_config_kernel palmtt_config[] __initdata = { { OMAP_TAG_LCD, &palmtt_lcd_config }, }; static void __init omap_mpu_wdt_mode(int mode) { if (mode) omap_writew(0x8000, OMAP_WDT_TIMER_MODE); else { omap_writew(0x00f5, OMAP_WDT_TIMER_MODE); omap_writew(0x00a0, OMAP_WDT_TIMER_MODE); } } static void __init omap_palmtt_init(void) { /* mux pins for uarts */ omap_cfg_reg(UART1_TX); omap_cfg_reg(UART1_RTS); omap_cfg_reg(UART2_TX); omap_cfg_reg(UART2_RTS); omap_cfg_reg(UART3_TX); omap_cfg_reg(UART3_RX); omap_mpu_wdt_mode(0); omap_board_config = palmtt_config; omap_board_config_size = ARRAY_SIZE(palmtt_config); platform_add_devices(palmtt_devices, ARRAY_SIZE(palmtt_devices)); spi_register_board_info(palmtt_boardinfo,ARRAY_SIZE(palmtt_boardinfo)); omap_serial_init(); omap1_usb_init(&palmtt_usb_config); omap_register_i2c_bus(1, 100, NULL, 0); } MACHINE_START(OMAP_PALMTT, "OMAP1510 based Palm Tungsten|T") .atag_offset = 0x100, .map_io = omap15xx_map_io, .init_early = omap1_init_early, .reserve = omap_reserve, .init_irq = omap1_init_irq, .init_machine = omap_palmtt_init, .timer = &omap1_timer, MACHINE_END
gpl-2.0
navigator117/chromiumos-third_party-kernel
net/wireless/nl80211.c
36
115334
/* * This is the new netlink-based wireless configuration interface. * * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net> */ #include <linux/if.h> #include <linux/module.h> #include <linux/err.h> #include <linux/list.h> #include <linux/if_ether.h> #include <linux/ieee80211.h> #include <linux/nl80211.h> #include <linux/rtnetlink.h> #include <linux/netlink.h> #include <linux/etherdevice.h> #include <net/net_namespace.h> #include <net/genetlink.h> #include <net/cfg80211.h> #include <net/sock.h> #include "core.h" #include "nl80211.h" #include "reg.h" /* the netlink family */ static struct genl_family nl80211_fam = { .id = GENL_ID_GENERATE, /* don't bother with a hardcoded ID */ .name = "nl80211", /* have users key off the name instead */ .hdrsize = 0, /* no private header */ .version = 1, /* no particular meaning now */ .maxattr = NL80211_ATTR_MAX, .netnsok = true, }; /* internal helper: get rdev and dev */ static int get_rdev_dev_by_info_ifindex(struct genl_info *info, struct cfg80211_registered_device **rdev, struct net_device **dev) { struct nlattr **attrs = info->attrs; int ifindex; if (!attrs[NL80211_ATTR_IFINDEX]) return -EINVAL; ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); *dev = dev_get_by_index(genl_info_net(info), ifindex); if (!*dev) return -ENODEV; *rdev = cfg80211_get_dev_from_ifindex(genl_info_net(info), ifindex); if (IS_ERR(*rdev)) { dev_put(*dev); return PTR_ERR(*rdev); } return 0; } /* policy for the attributes */ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, .len = 20-1 }, [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED }, [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_RETRY_SHORT] = { .type = NLA_U8 }, [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 }, [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 }, [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 }, [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN }, [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN }, [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 }, [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 }, [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 }, [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 }, [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 }, [NL80211_ATTR_BEACON_HEAD] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_BEACON_TAIL] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_STA_AID] = { .type = NLA_U16 }, [NL80211_ATTR_STA_FLAGS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_LISTEN_INTERVAL] = { .type = NLA_U16 }, [NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_RATES }, [NL80211_ATTR_STA_PLINK_ACTION] = { .type = NLA_U8 }, [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 }, [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ }, [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_MESH_ID_LEN }, [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 }, [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED }, [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 }, [NL80211_ATTR_BSS_SHORT_PREAMBLE] = { .type = NLA_U8 }, [NL80211_ATTR_BSS_SHORT_SLOT_TIME] = { .type = NLA_U8 }, [NL80211_ATTR_BSS_BASIC_RATES] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_RATES }, [NL80211_ATTR_MESH_PARAMS] = { .type = NLA_NESTED }, [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY, .len = NL80211_HT_CAPABILITY_LEN }, [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 }, [NL80211_ATTR_IE] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_SCAN_FREQUENCIES] = { .type = NLA_NESTED }, [NL80211_ATTR_SCAN_SSIDS] = { .type = NLA_NESTED }, [NL80211_ATTR_SSID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_SSID_LEN }, [NL80211_ATTR_AUTH_TYPE] = { .type = NLA_U32 }, [NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 }, [NL80211_ATTR_FREQ_FIXED] = { .type = NLA_FLAG }, [NL80211_ATTR_TIMED_OUT] = { .type = NLA_FLAG }, [NL80211_ATTR_USE_MFP] = { .type = NLA_U32 }, [NL80211_ATTR_STA_FLAGS2] = { .len = sizeof(struct nl80211_sta_flag_update), }, [NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG }, [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG }, [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 }, [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, [NL80211_ATTR_PID] = { .type = NLA_U32 }, }; /* policy for the attributes */ static struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] __read_mostly = { [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, [NL80211_KEY_IDX] = { .type = NLA_U8 }, [NL80211_KEY_CIPHER] = { .type = NLA_U32 }, [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 }, [NL80211_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG }, }; /* IE validation */ static bool is_valid_ie_attr(const struct nlattr *attr) { const u8 *pos; int len; if (!attr) return true; pos = nla_data(attr); len = nla_len(attr); while (len) { u8 elemlen; if (len < 2) return false; len -= 2; elemlen = pos[1]; if (elemlen > len) return false; len -= elemlen; pos += 2 + elemlen; } return true; } /* message building helper */ static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq, int flags, u8 cmd) { /* since there is no private header just add the generic one */ return genlmsg_put(skb, pid, seq, &nl80211_fam, flags, cmd); } static int nl80211_msg_put_channel(struct sk_buff *msg, struct ieee80211_channel *chan) { NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ, chan->center_freq); if (chan->flags & IEEE80211_CHAN_DISABLED) NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED); if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN); if (chan->flags & IEEE80211_CHAN_NO_IBSS) NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS); if (chan->flags & IEEE80211_CHAN_RADAR) NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR); NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, DBM_TO_MBM(chan->max_power)); return 0; nla_put_failure: return -ENOBUFS; } /* netlink command implementations */ struct key_parse { struct key_params p; int idx; bool def, defmgmt; }; static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k) { struct nlattr *tb[NL80211_KEY_MAX + 1]; int err = nla_parse_nested(tb, NL80211_KEY_MAX, key, nl80211_key_policy); if (err) return err; k->def = !!tb[NL80211_KEY_DEFAULT]; k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT]; if (tb[NL80211_KEY_IDX]) k->idx = nla_get_u8(tb[NL80211_KEY_IDX]); if (tb[NL80211_KEY_DATA]) { k->p.key = nla_data(tb[NL80211_KEY_DATA]); k->p.key_len = nla_len(tb[NL80211_KEY_DATA]); } if (tb[NL80211_KEY_SEQ]) { k->p.seq = nla_data(tb[NL80211_KEY_SEQ]); k->p.seq_len = nla_len(tb[NL80211_KEY_SEQ]); } if (tb[NL80211_KEY_CIPHER]) k->p.cipher = nla_get_u32(tb[NL80211_KEY_CIPHER]); return 0; } static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k) { if (info->attrs[NL80211_ATTR_KEY_DATA]) { k->p.key = nla_data(info->attrs[NL80211_ATTR_KEY_DATA]); k->p.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]); } if (info->attrs[NL80211_ATTR_KEY_SEQ]) { k->p.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]); k->p.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]); } if (info->attrs[NL80211_ATTR_KEY_IDX]) k->idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); if (info->attrs[NL80211_ATTR_KEY_CIPHER]) k->p.cipher = nla_get_u32(info->attrs[NL80211_ATTR_KEY_CIPHER]); k->def = !!info->attrs[NL80211_ATTR_KEY_DEFAULT]; k->defmgmt = !!info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT]; return 0; } static int nl80211_parse_key(struct genl_info *info, struct key_parse *k) { int err; memset(k, 0, sizeof(*k)); k->idx = -1; if (info->attrs[NL80211_ATTR_KEY]) err = nl80211_parse_key_new(info->attrs[NL80211_ATTR_KEY], k); else err = nl80211_parse_key_old(info, k); if (err) return err; if (k->def && k->defmgmt) return -EINVAL; if (k->idx != -1) { if (k->defmgmt) { if (k->idx < 4 || k->idx > 5) return -EINVAL; } else if (k->def) { if (k->idx < 0 || k->idx > 3) return -EINVAL; } else { if (k->idx < 0 || k->idx > 5) return -EINVAL; } } return 0; } static struct cfg80211_cached_keys * nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, struct nlattr *keys) { struct key_parse parse; struct nlattr *key; struct cfg80211_cached_keys *result; int rem, err, def = 0; result = kzalloc(sizeof(*result), GFP_KERNEL); if (!result) return ERR_PTR(-ENOMEM); result->def = -1; result->defmgmt = -1; nla_for_each_nested(key, keys, rem) { memset(&parse, 0, sizeof(parse)); parse.idx = -1; err = nl80211_parse_key_new(key, &parse); if (err) goto error; err = -EINVAL; if (!parse.p.key) goto error; if (parse.idx < 0 || parse.idx > 4) goto error; if (parse.def) { if (def) goto error; def = 1; result->def = parse.idx; } else if (parse.defmgmt) goto error; err = cfg80211_validate_key_settings(rdev, &parse.p, parse.idx, NULL); if (err) goto error; result->params[parse.idx].cipher = parse.p.cipher; result->params[parse.idx].key_len = parse.p.key_len; result->params[parse.idx].key = result->data[parse.idx]; memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len); } return result; error: kfree(result); return ERR_PTR(err); } static int nl80211_key_allowed(struct wireless_dev *wdev) { ASSERT_WDEV_LOCK(wdev); if (!netif_running(wdev->netdev)) return -ENETDOWN; switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: break; case NL80211_IFTYPE_ADHOC: if (!wdev->current_bss) return -ENOLINK; break; case NL80211_IFTYPE_STATION: if (wdev->sme_state != CFG80211_SME_CONNECTED) return -ENOLINK; break; default: return -EINVAL; } return 0; } static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct cfg80211_registered_device *dev) { void *hdr; struct nlattr *nl_bands, *nl_band; struct nlattr *nl_freqs, *nl_freq; struct nlattr *nl_rates, *nl_rate; struct nlattr *nl_modes; struct nlattr *nl_cmds; enum ieee80211_band band; struct ieee80211_channel *chan; struct ieee80211_rate *rate; int i; u16 ifmodes = dev->wiphy.interface_modes; hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); if (!hdr) return -1; NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx); NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, cfg80211_rdev_list_generation); NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, dev->wiphy.retry_short); NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, dev->wiphy.retry_long); NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, dev->wiphy.frag_threshold); NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, dev->wiphy.rts_threshold); NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, dev->wiphy.max_scan_ssids); NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, dev->wiphy.max_scan_ie_len); NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES, sizeof(u32) * dev->wiphy.n_cipher_suites, dev->wiphy.cipher_suites); nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES); if (!nl_modes) goto nla_put_failure; i = 0; while (ifmodes) { if (ifmodes & 1) NLA_PUT_FLAG(msg, i); ifmodes >>= 1; i++; } nla_nest_end(msg, nl_modes); nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); if (!nl_bands) goto nla_put_failure; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (!dev->wiphy.bands[band]) continue; nl_band = nla_nest_start(msg, band); if (!nl_band) goto nla_put_failure; /* add HT info */ if (dev->wiphy.bands[band]->ht_cap.ht_supported) { NLA_PUT(msg, NL80211_BAND_ATTR_HT_MCS_SET, sizeof(dev->wiphy.bands[band]->ht_cap.mcs), &dev->wiphy.bands[band]->ht_cap.mcs); NLA_PUT_U16(msg, NL80211_BAND_ATTR_HT_CAPA, dev->wiphy.bands[band]->ht_cap.cap); NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, dev->wiphy.bands[band]->ht_cap.ampdu_factor); NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, dev->wiphy.bands[band]->ht_cap.ampdu_density); } /* add frequencies */ nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS); if (!nl_freqs) goto nla_put_failure; for (i = 0; i < dev->wiphy.bands[band]->n_channels; i++) { nl_freq = nla_nest_start(msg, i); if (!nl_freq) goto nla_put_failure; chan = &dev->wiphy.bands[band]->channels[i]; if (nl80211_msg_put_channel(msg, chan)) goto nla_put_failure; nla_nest_end(msg, nl_freq); } nla_nest_end(msg, nl_freqs); /* add bitrates */ nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES); if (!nl_rates) goto nla_put_failure; for (i = 0; i < dev->wiphy.bands[band]->n_bitrates; i++) { nl_rate = nla_nest_start(msg, i); if (!nl_rate) goto nla_put_failure; rate = &dev->wiphy.bands[band]->bitrates[i]; NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE, rate->bitrate); if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) NLA_PUT_FLAG(msg, NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE); nla_nest_end(msg, nl_rate); } nla_nest_end(msg, nl_rates); nla_nest_end(msg, nl_band); } nla_nest_end(msg, nl_bands); nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS); if (!nl_cmds) goto nla_put_failure; i = 0; #define CMD(op, n) \ do { \ if (dev->ops->op) { \ i++; \ NLA_PUT_U32(msg, i, NL80211_CMD_ ## n); \ } \ } while (0) CMD(add_virtual_intf, NEW_INTERFACE); CMD(change_virtual_intf, SET_INTERFACE); CMD(add_key, NEW_KEY); CMD(add_beacon, NEW_BEACON); CMD(add_station, NEW_STATION); CMD(add_mpath, NEW_MPATH); CMD(set_mesh_params, SET_MESH_PARAMS); CMD(change_bss, SET_BSS); CMD(auth, AUTHENTICATE); CMD(assoc, ASSOCIATE); CMD(deauth, DEAUTHENTICATE); CMD(disassoc, DISASSOCIATE); CMD(join_ibss, JOIN_IBSS); if (dev->wiphy.netnsok) { i++; NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); } #undef CMD if (dev->ops->connect || dev->ops->auth) { i++; NLA_PUT_U32(msg, i, NL80211_CMD_CONNECT); } if (dev->ops->disconnect || dev->ops->deauth) { i++; NLA_PUT_U32(msg, i, NL80211_CMD_DISCONNECT); } nla_nest_end(msg, nl_cmds); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) { int idx = 0; int start = cb->args[0]; struct cfg80211_registered_device *dev; mutex_lock(&cfg80211_mutex); list_for_each_entry(dev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) continue; if (++idx <= start) continue; if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0) { idx--; break; } } mutex_unlock(&cfg80211_mutex); cb->args[0] = idx; return skb->len; } static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct cfg80211_registered_device *dev; dev = cfg80211_get_dev_from_info(info); if (IS_ERR(dev)) return PTR_ERR(dev); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) goto out_err; if (nl80211_send_wiphy(msg, info->snd_pid, info->snd_seq, 0, dev) < 0) goto out_free; cfg80211_unlock_rdev(dev); return genlmsg_reply(msg, info); out_free: nlmsg_free(msg); out_err: cfg80211_unlock_rdev(dev); return -ENOBUFS; } static const struct nla_policy txq_params_policy[NL80211_TXQ_ATTR_MAX + 1] = { [NL80211_TXQ_ATTR_QUEUE] = { .type = NLA_U8 }, [NL80211_TXQ_ATTR_TXOP] = { .type = NLA_U16 }, [NL80211_TXQ_ATTR_CWMIN] = { .type = NLA_U16 }, [NL80211_TXQ_ATTR_CWMAX] = { .type = NLA_U16 }, [NL80211_TXQ_ATTR_AIFS] = { .type = NLA_U8 }, }; static int parse_txq_params(struct nlattr *tb[], struct ieee80211_txq_params *txq_params) { if (!tb[NL80211_TXQ_ATTR_QUEUE] || !tb[NL80211_TXQ_ATTR_TXOP] || !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] || !tb[NL80211_TXQ_ATTR_AIFS]) return -EINVAL; txq_params->queue = nla_get_u8(tb[NL80211_TXQ_ATTR_QUEUE]); txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]); txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]); txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]); txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]); return 0; } static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int result = 0, rem_txq_params = 0; struct nlattr *nl_txq_params; u32 changed; u8 retry_short = 0, retry_long = 0; u32 frag_threshold = 0, rts_threshold = 0; rtnl_lock(); mutex_lock(&cfg80211_mutex); rdev = __cfg80211_rdev_from_info(info); if (IS_ERR(rdev)) { mutex_unlock(&cfg80211_mutex); result = PTR_ERR(rdev); goto unlock; } mutex_lock(&rdev->mtx); if (info->attrs[NL80211_ATTR_WIPHY_NAME]) result = cfg80211_dev_rename( rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME])); mutex_unlock(&cfg80211_mutex); if (result) goto bad_res; if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) { struct ieee80211_txq_params txq_params; struct nlattr *tb[NL80211_TXQ_ATTR_MAX + 1]; if (!rdev->ops->set_txq_params) { result = -EOPNOTSUPP; goto bad_res; } nla_for_each_nested(nl_txq_params, info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], rem_txq_params) { nla_parse(tb, NL80211_TXQ_ATTR_MAX, nla_data(nl_txq_params), nla_len(nl_txq_params), txq_params_policy); result = parse_txq_params(tb, &txq_params); if (result) goto bad_res; result = rdev->ops->set_txq_params(&rdev->wiphy, &txq_params); if (result) goto bad_res; } } if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; u32 freq; result = -EINVAL; if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { channel_type = nla_get_u32(info->attrs[ NL80211_ATTR_WIPHY_CHANNEL_TYPE]); if (channel_type != NL80211_CHAN_NO_HT && channel_type != NL80211_CHAN_HT20 && channel_type != NL80211_CHAN_HT40PLUS && channel_type != NL80211_CHAN_HT40MINUS) goto bad_res; } freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); mutex_lock(&rdev->devlist_mtx); result = rdev_set_freq(rdev, NULL, freq, channel_type); mutex_unlock(&rdev->devlist_mtx); if (result) goto bad_res; } changed = 0; if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) { retry_short = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]); if (retry_short == 0) { result = -EINVAL; goto bad_res; } changed |= WIPHY_PARAM_RETRY_SHORT; } if (info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]) { retry_long = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]); if (retry_long == 0) { result = -EINVAL; goto bad_res; } changed |= WIPHY_PARAM_RETRY_LONG; } if (info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]) { frag_threshold = nla_get_u32( info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]); if (frag_threshold < 256) { result = -EINVAL; goto bad_res; } if (frag_threshold != (u32) -1) { /* * Fragments (apart from the last one) are required to * have even length. Make the fragmentation code * simpler by stripping LSB should someone try to use * odd threshold value. */ frag_threshold &= ~0x1; } changed |= WIPHY_PARAM_FRAG_THRESHOLD; } if (info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]) { rts_threshold = nla_get_u32( info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]); changed |= WIPHY_PARAM_RTS_THRESHOLD; } if (changed) { u8 old_retry_short, old_retry_long; u32 old_frag_threshold, old_rts_threshold; if (!rdev->ops->set_wiphy_params) { result = -EOPNOTSUPP; goto bad_res; } old_retry_short = rdev->wiphy.retry_short; old_retry_long = rdev->wiphy.retry_long; old_frag_threshold = rdev->wiphy.frag_threshold; old_rts_threshold = rdev->wiphy.rts_threshold; if (changed & WIPHY_PARAM_RETRY_SHORT) rdev->wiphy.retry_short = retry_short; if (changed & WIPHY_PARAM_RETRY_LONG) rdev->wiphy.retry_long = retry_long; if (changed & WIPHY_PARAM_FRAG_THRESHOLD) rdev->wiphy.frag_threshold = frag_threshold; if (changed & WIPHY_PARAM_RTS_THRESHOLD) rdev->wiphy.rts_threshold = rts_threshold; result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); if (result) { rdev->wiphy.retry_short = old_retry_short; rdev->wiphy.retry_long = old_retry_long; rdev->wiphy.frag_threshold = old_frag_threshold; rdev->wiphy.rts_threshold = old_rts_threshold; } } bad_res: mutex_unlock(&rdev->mtx); unlock: rtnl_unlock(); return result; } static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct cfg80211_registered_device *rdev, struct net_device *dev) { void *hdr; hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_INTERFACE); if (!hdr) return -1; NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype); NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->devlist_generation ^ (cfg80211_rdev_list_generation << 2)); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb) { int wp_idx = 0; int if_idx = 0; int wp_start = cb->args[0]; int if_start = cb->args[1]; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; mutex_lock(&cfg80211_mutex); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk))) continue; if (wp_idx < wp_start) { wp_idx++; continue; } if_idx = 0; mutex_lock(&rdev->devlist_mtx); list_for_each_entry(wdev, &rdev->netdev_list, list) { if (if_idx < if_start) { if_idx++; continue; } if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, rdev, wdev->netdev) < 0) { mutex_unlock(&rdev->devlist_mtx); goto out; } if_idx++; } mutex_unlock(&rdev->devlist_mtx); wp_idx++; } out: mutex_unlock(&cfg80211_mutex); cb->args[0] = wp_idx; cb->args[1] = if_idx; return skb->len; } static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct cfg80211_registered_device *dev; struct net_device *netdev; int err; err = get_rdev_dev_by_info_ifindex(info, &dev, &netdev); if (err) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) goto out_err; if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0, dev, netdev) < 0) goto out_free; dev_put(netdev); cfg80211_unlock_rdev(dev); return genlmsg_reply(msg, info); out_free: nlmsg_free(msg); out_err: dev_put(netdev); cfg80211_unlock_rdev(dev); return -ENOBUFS; } static const struct nla_policy mntr_flags_policy[NL80211_MNTR_FLAG_MAX + 1] = { [NL80211_MNTR_FLAG_FCSFAIL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_PLCPFAIL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_CONTROL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_OTHER_BSS] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_COOK_FRAMES] = { .type = NLA_FLAG }, }; static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags) { struct nlattr *flags[NL80211_MNTR_FLAG_MAX + 1]; int flag; *mntrflags = 0; if (!nla) return -EINVAL; if (nla_parse_nested(flags, NL80211_MNTR_FLAG_MAX, nla, mntr_flags_policy)) return -EINVAL; for (flag = 1; flag <= NL80211_MNTR_FLAG_MAX; flag++) if (flags[flag]) *mntrflags |= (1<<flag); return 0; } static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct vif_params params; int err; enum nl80211_iftype otype, ntype; struct net_device *dev; u32 _flags, *flags = NULL; bool change = false; memset(&params, 0, sizeof(params)); rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; otype = ntype = dev->ieee80211_ptr->iftype; if (info->attrs[NL80211_ATTR_IFTYPE]) { ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); if (otype != ntype) change = true; if (ntype > NL80211_IFTYPE_MAX) { err = -EINVAL; goto unlock; } } if (info->attrs[NL80211_ATTR_MESH_ID]) { if (ntype != NL80211_IFTYPE_MESH_POINT) { err = -EINVAL; goto unlock; } params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); change = true; } if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) { if (ntype != NL80211_IFTYPE_MONITOR) { err = -EINVAL; goto unlock; } err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS], &_flags); if (err) goto unlock; flags = &_flags; change = true; } if (change) err = cfg80211_change_iface(rdev, dev, ntype, flags, &params); else err = 0; unlock: dev_put(dev); cfg80211_unlock_rdev(rdev); unlock_rtnl: rtnl_unlock(); return err; } static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct vif_params params; int err; enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; u32 flags; memset(&params, 0, sizeof(params)); if (!info->attrs[NL80211_ATTR_IFNAME]) return -EINVAL; if (info->attrs[NL80211_ATTR_IFTYPE]) { type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); if (type > NL80211_IFTYPE_MAX) return -EINVAL; } rtnl_lock(); rdev = cfg80211_get_dev_from_info(info); if (IS_ERR(rdev)) { err = PTR_ERR(rdev); goto unlock_rtnl; } if (!rdev->ops->add_virtual_intf || !(rdev->wiphy.interface_modes & (1 << type))) { err = -EOPNOTSUPP; goto unlock; } if (type == NL80211_IFTYPE_MESH_POINT && info->attrs[NL80211_ATTR_MESH_ID]) { params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); } err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, &flags); err = rdev->ops->add_virtual_intf(&rdev->wiphy, nla_data(info->attrs[NL80211_ATTR_IFNAME]), type, err ? NULL : &flags, &params); unlock: cfg80211_unlock_rdev(rdev); unlock_rtnl: rtnl_unlock(); return err; } static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (!rdev->ops->del_virtual_intf) { err = -EOPNOTSUPP; goto out; } err = rdev->ops->del_virtual_intf(&rdev->wiphy, dev); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: rtnl_unlock(); return err; } struct get_key_cookie { struct sk_buff *msg; int error; int idx; }; static void get_key_callback(void *c, struct key_params *params) { struct nlattr *key; struct get_key_cookie *cookie = c; if (params->key) NLA_PUT(cookie->msg, NL80211_ATTR_KEY_DATA, params->key_len, params->key); if (params->seq) NLA_PUT(cookie->msg, NL80211_ATTR_KEY_SEQ, params->seq_len, params->seq); if (params->cipher) NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER, params->cipher); key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY); if (!key) goto nla_put_failure; if (params->key) NLA_PUT(cookie->msg, NL80211_KEY_DATA, params->key_len, params->key); if (params->seq) NLA_PUT(cookie->msg, NL80211_KEY_SEQ, params->seq_len, params->seq); if (params->cipher) NLA_PUT_U32(cookie->msg, NL80211_KEY_CIPHER, params->cipher); NLA_PUT_U8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx); nla_nest_end(cookie->msg, key); return; nla_put_failure: cookie->error = 1; } static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; u8 key_idx = 0; u8 *mac_addr = NULL; struct get_key_cookie cookie = { .error = 0, }; void *hdr; struct sk_buff *msg; if (info->attrs[NL80211_ATTR_KEY_IDX]) key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); if (key_idx > 5) return -EINVAL; if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (!rdev->ops->get_key) { err = -EOPNOTSUPP; goto out; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOMEM; goto out; } hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, NL80211_CMD_NEW_KEY); if (IS_ERR(hdr)) { err = PTR_ERR(hdr); goto free_msg; } cookie.msg = msg; cookie.idx = key_idx; NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx); if (mac_addr) NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); err = rdev->ops->get_key(&rdev->wiphy, dev, key_idx, mac_addr, &cookie, get_key_callback); if (err) goto free_msg; if (cookie.error) goto nla_put_failure; genlmsg_end(msg, hdr); err = genlmsg_reply(msg, info); goto out; nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: rtnl_unlock(); return err; } static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct key_parse key; int err; struct net_device *dev; int (*func)(struct wiphy *wiphy, struct net_device *netdev, u8 key_index); err = nl80211_parse_key(info, &key); if (err) return err; if (key.idx < 0) return -EINVAL; /* only support setting default key */ if (!key.def && !key.defmgmt) return -EINVAL; rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (key.def) func = rdev->ops->set_default_key; else func = rdev->ops->set_default_mgmt_key; if (!func) { err = -EOPNOTSUPP; goto out; } wdev_lock(dev->ieee80211_ptr); err = nl80211_key_allowed(dev->ieee80211_ptr); if (!err) err = func(&rdev->wiphy, dev, key.idx); #ifdef CONFIG_WIRELESS_EXT if (!err) { if (func == rdev->ops->set_default_key) dev->ieee80211_ptr->wext.default_key = key.idx; else dev->ieee80211_ptr->wext.default_mgmt_key = key.idx; } #endif wdev_unlock(dev->ieee80211_ptr); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: rtnl_unlock(); return err; } static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; struct key_parse key; u8 *mac_addr = NULL; err = nl80211_parse_key(info, &key); if (err) return err; if (!key.p.key) return -EINVAL; if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (!rdev->ops->add_key) { err = -EOPNOTSUPP; goto out; } if (cfg80211_validate_key_settings(rdev, &key.p, key.idx, mac_addr)) { err = -EINVAL; goto out; } wdev_lock(dev->ieee80211_ptr); err = nl80211_key_allowed(dev->ieee80211_ptr); if (!err) err = rdev->ops->add_key(&rdev->wiphy, dev, key.idx, mac_addr, &key.p); wdev_unlock(dev->ieee80211_ptr); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: rtnl_unlock(); return err; } static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; u8 *mac_addr = NULL; struct key_parse key; err = nl80211_parse_key(info, &key); if (err) return err; if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (!rdev->ops->del_key) { err = -EOPNOTSUPP; goto out; } wdev_lock(dev->ieee80211_ptr); err = nl80211_key_allowed(dev->ieee80211_ptr); if (!err) err = rdev->ops->del_key(&rdev->wiphy, dev, key.idx, mac_addr); #ifdef CONFIG_WIRELESS_EXT if (!err) { if (key.idx == dev->ieee80211_ptr->wext.default_key) dev->ieee80211_ptr->wext.default_key = -1; else if (key.idx == dev->ieee80211_ptr->wext.default_mgmt_key) dev->ieee80211_ptr->wext.default_mgmt_key = -1; } #endif wdev_unlock(dev->ieee80211_ptr); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: rtnl_unlock(); return err; } static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) { int (*call)(struct wiphy *wiphy, struct net_device *dev, struct beacon_parameters *info); struct cfg80211_registered_device *rdev; int err; struct net_device *dev; struct beacon_parameters params; int haveinfo = 0; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL])) return -EINVAL; rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { err = -EOPNOTSUPP; goto out; } switch (info->genlhdr->cmd) { case NL80211_CMD_NEW_BEACON: /* these are required for NEW_BEACON */ if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] || !info->attrs[NL80211_ATTR_DTIM_PERIOD] || !info->attrs[NL80211_ATTR_BEACON_HEAD]) { err = -EINVAL; goto out; } call = rdev->ops->add_beacon; break; case NL80211_CMD_SET_BEACON: call = rdev->ops->set_beacon; break; default: WARN_ON(1); err = -EOPNOTSUPP; goto out; } if (!call) { err = -EOPNOTSUPP; goto out; } memset(&params, 0, sizeof(params)); if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) { params.interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); haveinfo = 1; } if (info->attrs[NL80211_ATTR_DTIM_PERIOD]) { params.dtim_period = nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]); haveinfo = 1; } if (info->attrs[NL80211_ATTR_BEACON_HEAD]) { params.head = nla_data(info->attrs[NL80211_ATTR_BEACON_HEAD]); params.head_len = nla_len(info->attrs[NL80211_ATTR_BEACON_HEAD]); haveinfo = 1; } if (info->attrs[NL80211_ATTR_BEACON_TAIL]) { params.tail = nla_data(info->attrs[NL80211_ATTR_BEACON_TAIL]); params.tail_len = nla_len(info->attrs[NL80211_ATTR_BEACON_TAIL]); haveinfo = 1; } if (!haveinfo) { err = -EINVAL; goto out; } err = call(&rdev->wiphy, dev, &params); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: rtnl_unlock(); return err; } static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (!rdev->ops->del_beacon) { err = -EOPNOTSUPP; goto out; } if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { err = -EOPNOTSUPP; goto out; } err = rdev->ops->del_beacon(&rdev->wiphy, dev); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: rtnl_unlock(); return err; } static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = { [NL80211_STA_FLAG_AUTHORIZED] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG }, }; static int parse_station_flags(struct genl_info *info, struct station_parameters *params) { struct nlattr *flags[NL80211_STA_FLAG_MAX + 1]; struct nlattr *nla; int flag; /* * Try parsing the new attribute first so userspace * can specify both for older kernels. */ nla = info->attrs[NL80211_ATTR_STA_FLAGS2]; if (nla) { struct nl80211_sta_flag_update *sta_flags; sta_flags = nla_data(nla); params->sta_flags_mask = sta_flags->mask; params->sta_flags_set = sta_flags->set; if ((params->sta_flags_mask | params->sta_flags_set) & BIT(__NL80211_STA_FLAG_INVALID)) return -EINVAL; return 0; } /* if present, parse the old attribute */ nla = info->attrs[NL80211_ATTR_STA_FLAGS]; if (!nla) return 0; if (nla_parse_nested(flags, NL80211_STA_FLAG_MAX, nla, sta_flags_policy)) return -EINVAL; params->sta_flags_mask = (1 << __NL80211_STA_FLAG_AFTER_LAST) - 1; params->sta_flags_mask &= ~1; for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) if (flags[flag]) params->sta_flags_set |= (1<<flag); return 0; } static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct net_device *dev, u8 *mac_addr, struct station_info *sinfo) { void *hdr; struct nlattr *sinfoattr, *txrate; u16 bitrate; hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); if (!hdr) return -1; NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation); sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); if (!sinfoattr) goto nla_put_failure; if (sinfo->filled & STATION_INFO_INACTIVE_TIME) NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME, sinfo->inactive_time); if (sinfo->filled & STATION_INFO_RX_BYTES) NLA_PUT_U32(msg, NL80211_STA_INFO_RX_BYTES, sinfo->rx_bytes); if (sinfo->filled & STATION_INFO_TX_BYTES) NLA_PUT_U32(msg, NL80211_STA_INFO_TX_BYTES, sinfo->tx_bytes); if (sinfo->filled & STATION_INFO_LLID) NLA_PUT_U16(msg, NL80211_STA_INFO_LLID, sinfo->llid); if (sinfo->filled & STATION_INFO_PLID) NLA_PUT_U16(msg, NL80211_STA_INFO_PLID, sinfo->plid); if (sinfo->filled & STATION_INFO_PLINK_STATE) NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE, sinfo->plink_state); if (sinfo->filled & STATION_INFO_SIGNAL) NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL, sinfo->signal); if (sinfo->filled & STATION_INFO_TX_BITRATE) { txrate = nla_nest_start(msg, NL80211_STA_INFO_TX_BITRATE); if (!txrate) goto nla_put_failure; /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ bitrate = cfg80211_calculate_bitrate(&sinfo->txrate); if (bitrate > 0) NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); if (sinfo->txrate.flags & RATE_INFO_FLAGS_MCS) NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, sinfo->txrate.mcs); if (sinfo->txrate.flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH); if (sinfo->txrate.flags & RATE_INFO_FLAGS_SHORT_GI) NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI); nla_nest_end(msg, txrate); } if (sinfo->filled & STATION_INFO_RX_PACKETS) NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS, sinfo->rx_packets); if (sinfo->filled & STATION_INFO_TX_PACKETS) NLA_PUT_U32(msg, NL80211_STA_INFO_TX_PACKETS, sinfo->tx_packets); nla_nest_end(msg, sinfoattr); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_station(struct sk_buff *skb, struct netlink_callback *cb) { struct station_info sinfo; struct cfg80211_registered_device *dev; struct net_device *netdev; u8 mac_addr[ETH_ALEN]; int ifidx = cb->args[0]; int sta_idx = cb->args[1]; int err; if (!ifidx) { err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, nl80211_fam.attrbuf, nl80211_fam.maxattr, nl80211_policy); if (err) return err; if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]) return -EINVAL; ifidx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]); if (!ifidx) return -EINVAL; } rtnl_lock(); netdev = __dev_get_by_index(sock_net(skb->sk), ifidx); if (!netdev) { err = -ENODEV; goto out_rtnl; } dev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); if (IS_ERR(dev)) { err = PTR_ERR(dev); goto out_rtnl; } if (!dev->ops->dump_station) { err = -EOPNOTSUPP; goto out_err; } while (1) { err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx, mac_addr, &sinfo); if (err == -ENOENT) break; if (err) goto out_err; if (nl80211_send_station(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, netdev, mac_addr, &sinfo) < 0) goto out; sta_idx++; } out: cb->args[1] = sta_idx; err = skb->len; out_err: cfg80211_unlock_rdev(dev); out_rtnl: rtnl_unlock(); return err; } static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; struct station_info sinfo; struct sk_buff *msg; u8 *mac_addr = NULL; memset(&sinfo, 0, sizeof(sinfo)); if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto out_rtnl; if (!rdev->ops->get_station) { err = -EOPNOTSUPP; goto out; } err = rdev->ops->get_station(&rdev->wiphy, dev, mac_addr, &sinfo); if (err) goto out; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) goto out; if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0, dev, mac_addr, &sinfo) < 0) goto out_free; err = genlmsg_reply(msg, info); goto out; out_free: nlmsg_free(msg); out: cfg80211_unlock_rdev(rdev); dev_put(dev); out_rtnl: rtnl_unlock(); return err; } /* * Get vlan interface making sure it is on the right wiphy. */ static int get_vlan(struct genl_info *info, struct cfg80211_registered_device *rdev, struct net_device **vlan) { struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN]; *vlan = NULL; if (vlanattr) { *vlan = dev_get_by_index(genl_info_net(info), nla_get_u32(vlanattr)); if (!*vlan) return -ENODEV; if (!(*vlan)->ieee80211_ptr) return -EINVAL; if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy) return -EINVAL; } return 0; } static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; struct station_parameters params; u8 *mac_addr = NULL; memset(&params, 0, sizeof(params)); params.listen_interval = -1; if (info->attrs[NL80211_ATTR_STA_AID]) return -EINVAL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) { params.supported_rates = nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); params.supported_rates_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); } if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) params.listen_interval = nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) params.ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); if (parse_station_flags(info, &params)) return -EINVAL; if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) params.plink_action = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto out_rtnl; err = get_vlan(info, rdev, &params.vlan); if (err) goto out; /* validate settings */ err = 0; switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: /* disallow mesh-specific things */ if (params.plink_action) err = -EINVAL; break; case NL80211_IFTYPE_STATION: /* disallow everything but AUTHORIZED flag */ if (params.plink_action) err = -EINVAL; if (params.vlan) err = -EINVAL; if (params.supported_rates) err = -EINVAL; if (params.ht_capa) err = -EINVAL; if (params.listen_interval >= 0) err = -EINVAL; if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED)) err = -EINVAL; break; case NL80211_IFTYPE_MESH_POINT: /* disallow things mesh doesn't support */ if (params.vlan) err = -EINVAL; if (params.ht_capa) err = -EINVAL; if (params.listen_interval >= 0) err = -EINVAL; if (params.supported_rates) err = -EINVAL; if (params.sta_flags_mask) err = -EINVAL; break; default: err = -EINVAL; } if (err) goto out; if (!rdev->ops->change_station) { err = -EOPNOTSUPP; goto out; } err = rdev->ops->change_station(&rdev->wiphy, dev, mac_addr, &params); out: if (params.vlan) dev_put(params.vlan); cfg80211_unlock_rdev(rdev); dev_put(dev); out_rtnl: rtnl_unlock(); return err; } static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; struct station_parameters params; u8 *mac_addr = NULL; memset(&params, 0, sizeof(params)); if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) return -EINVAL; if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) return -EINVAL; mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); params.supported_rates = nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); params.supported_rates_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); params.listen_interval = nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); if (info->attrs[NL80211_ATTR_STA_AID]) { params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); if (!params.aid || params.aid > IEEE80211_MAX_AID) return -EINVAL; } if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) params.ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); if (parse_station_flags(info, &params)) return -EINVAL; rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto out_rtnl; err = get_vlan(info, rdev, &params.vlan); if (err) goto out; /* validate settings */ err = 0; switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: /* all ok but must have AID */ if (!params.aid) err = -EINVAL; break; case NL80211_IFTYPE_MESH_POINT: /* disallow things mesh doesn't support */ if (params.vlan) err = -EINVAL; if (params.aid) err = -EINVAL; if (params.ht_capa) err = -EINVAL; if (params.listen_interval >= 0) err = -EINVAL; if (params.supported_rates) err = -EINVAL; if (params.sta_flags_mask) err = -EINVAL; break; default: err = -EINVAL; } if (err) goto out; if (!rdev->ops->add_station) { err = -EOPNOTSUPP; goto out; } if (!netif_running(dev)) { err = -ENETDOWN; goto out; } err = rdev->ops->add_station(&rdev->wiphy, dev, mac_addr, &params); out: if (params.vlan) dev_put(params.vlan); cfg80211_unlock_rdev(rdev); dev_put(dev); out_rtnl: rtnl_unlock(); return err; } static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; u8 *mac_addr = NULL; if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto out_rtnl; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { err = -EINVAL; goto out; } if (!rdev->ops->del_station) { err = -EOPNOTSUPP; goto out; } err = rdev->ops->del_station(&rdev->wiphy, dev, mac_addr); out: cfg80211_unlock_rdev(rdev); dev_put(dev); out_rtnl: rtnl_unlock(); return err; } static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct net_device *dev, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { void *hdr; struct nlattr *pinfoattr; hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); if (!hdr) return -1; NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst); NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop); NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation); pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO); if (!pinfoattr) goto nla_put_failure; if (pinfo->filled & MPATH_INFO_FRAME_QLEN) NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN, pinfo->frame_qlen); if (pinfo->filled & MPATH_INFO_DSN) NLA_PUT_U32(msg, NL80211_MPATH_INFO_DSN, pinfo->dsn); if (pinfo->filled & MPATH_INFO_METRIC) NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC, pinfo->metric); if (pinfo->filled & MPATH_INFO_EXPTIME) NLA_PUT_U32(msg, NL80211_MPATH_INFO_EXPTIME, pinfo->exptime); if (pinfo->filled & MPATH_INFO_FLAGS) NLA_PUT_U8(msg, NL80211_MPATH_INFO_FLAGS, pinfo->flags); if (pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) NLA_PUT_U32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, pinfo->discovery_timeout); if (pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) NLA_PUT_U8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES, pinfo->discovery_retries); nla_nest_end(msg, pinfoattr); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_mpath(struct sk_buff *skb, struct netlink_callback *cb) { struct mpath_info pinfo; struct cfg80211_registered_device *dev; struct net_device *netdev; u8 dst[ETH_ALEN]; u8 next_hop[ETH_ALEN]; int ifidx = cb->args[0]; int path_idx = cb->args[1]; int err; if (!ifidx) { err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, nl80211_fam.attrbuf, nl80211_fam.maxattr, nl80211_policy); if (err) return err; if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]) return -EINVAL; ifidx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]); if (!ifidx) return -EINVAL; } rtnl_lock(); netdev = __dev_get_by_index(sock_net(skb->sk), ifidx); if (!netdev) { err = -ENODEV; goto out_rtnl; } dev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); if (IS_ERR(dev)) { err = PTR_ERR(dev); goto out_rtnl; } if (!dev->ops->dump_mpath) { err = -EOPNOTSUPP; goto out_err; } if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { err = -EOPNOTSUPP; goto out_err; } while (1) { err = dev->ops->dump_mpath(&dev->wiphy, netdev, path_idx, dst, next_hop, &pinfo); if (err == -ENOENT) break; if (err) goto out_err; if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, netdev, dst, next_hop, &pinfo) < 0) goto out; path_idx++; } out: cb->args[1] = path_idx; err = skb->len; out_err: cfg80211_unlock_rdev(dev); out_rtnl: rtnl_unlock(); return err; } static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; struct mpath_info pinfo; struct sk_buff *msg; u8 *dst = NULL; u8 next_hop[ETH_ALEN]; memset(&pinfo, 0, sizeof(pinfo)); if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto out_rtnl; if (!rdev->ops->get_mpath) { err = -EOPNOTSUPP; goto out; } if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { err = -EOPNOTSUPP; goto out; } err = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, &pinfo); if (err) goto out; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) goto out; if (nl80211_send_mpath(msg, info->snd_pid, info->snd_seq, 0, dev, dst, next_hop, &pinfo) < 0) goto out_free; err = genlmsg_reply(msg, info); goto out; out_free: nlmsg_free(msg); out: cfg80211_unlock_rdev(rdev); dev_put(dev); out_rtnl: rtnl_unlock(); return err; } static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; u8 *dst = NULL; u8 *next_hop = NULL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto out_rtnl; if (!rdev->ops->change_mpath) { err = -EOPNOTSUPP; goto out; } if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { err = -EOPNOTSUPP; goto out; } if (!netif_running(dev)) { err = -ENETDOWN; goto out; } err = rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop); out: cfg80211_unlock_rdev(rdev); dev_put(dev); out_rtnl: rtnl_unlock(); return err; } static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; u8 *dst = NULL; u8 *next_hop = NULL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto out_rtnl; if (!rdev->ops->add_mpath) { err = -EOPNOTSUPP; goto out; } if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { err = -EOPNOTSUPP; goto out; } if (!netif_running(dev)) { err = -ENETDOWN; goto out; } err = rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop); out: cfg80211_unlock_rdev(rdev); dev_put(dev); out_rtnl: rtnl_unlock(); return err; } static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; u8 *dst = NULL; if (info->attrs[NL80211_ATTR_MAC]) dst = nla_data(info->attrs[NL80211_ATTR_MAC]); rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto out_rtnl; if (!rdev->ops->del_mpath) { err = -EOPNOTSUPP; goto out; } err = rdev->ops->del_mpath(&rdev->wiphy, dev, dst); out: cfg80211_unlock_rdev(rdev); dev_put(dev); out_rtnl: rtnl_unlock(); return err; } static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; struct net_device *dev; struct bss_parameters params; memset(&params, 0, sizeof(params)); /* default to not changing parameters */ params.use_cts_prot = -1; params.use_short_preamble = -1; params.use_short_slot_time = -1; if (info->attrs[NL80211_ATTR_BSS_CTS_PROT]) params.use_cts_prot = nla_get_u8(info->attrs[NL80211_ATTR_BSS_CTS_PROT]); if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]) params.use_short_preamble = nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]); if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]) params.use_short_slot_time = nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]); if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { params.basic_rates = nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); params.basic_rates_len = nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); } rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto out_rtnl; if (!rdev->ops->change_bss) { err = -EOPNOTSUPP; goto out; } if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { err = -EOPNOTSUPP; goto out; } err = rdev->ops->change_bss(&rdev->wiphy, dev, &params); out: cfg80211_unlock_rdev(rdev); dev_put(dev); out_rtnl: rtnl_unlock(); return err; } static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = { [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 }, [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 }, [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 }, [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 }, [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 }, }; static int parse_reg_rule(struct nlattr *tb[], struct ieee80211_reg_rule *reg_rule) { struct ieee80211_freq_range *freq_range = &reg_rule->freq_range; struct ieee80211_power_rule *power_rule = &reg_rule->power_rule; if (!tb[NL80211_ATTR_REG_RULE_FLAGS]) return -EINVAL; if (!tb[NL80211_ATTR_FREQ_RANGE_START]) return -EINVAL; if (!tb[NL80211_ATTR_FREQ_RANGE_END]) return -EINVAL; if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]) return -EINVAL; if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]) return -EINVAL; reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]); freq_range->start_freq_khz = nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]); freq_range->end_freq_khz = nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]); freq_range->max_bandwidth_khz = nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]); power_rule->max_eirp = nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]); if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]) power_rule->max_antenna_gain = nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]); return 0; } static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info) { int r; char *data = NULL; /* * You should only get this when cfg80211 hasn't yet initialized * completely when built-in to the kernel right between the time * window between nl80211_init() and regulatory_init(), if that is * even possible. */ mutex_lock(&cfg80211_mutex); if (unlikely(!cfg80211_regdomain)) { mutex_unlock(&cfg80211_mutex); return -EINPROGRESS; } mutex_unlock(&cfg80211_mutex); if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) return -EINVAL; data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); #ifdef CONFIG_WIRELESS_OLD_REGULATORY /* We ignore world regdom requests with the old regdom setup */ if (is_world_regdom(data)) return -EINVAL; #endif r = regulatory_hint_user(data); return r; } static int nl80211_get_mesh_params(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct mesh_config cur_params; int err; struct net_device *dev; void *hdr; struct nlattr *pinfoattr; struct sk_buff *msg; rtnl_lock(); /* Look up our device */ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto out_rtnl; if (!rdev->ops->get_mesh_params) { err = -EOPNOTSUPP; goto out; } /* Get the mesh params */ err = rdev->ops->get_mesh_params(&rdev->wiphy, dev, &cur_params); if (err) goto out; /* Draw up a netlink message to send back */ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOBUFS; goto out; } hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, NL80211_CMD_GET_MESH_PARAMS); if (!hdr) goto nla_put_failure; pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_PARAMS); if (!pinfoattr) goto nla_put_failure; NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); NLA_PUT_U16(msg, NL80211_MESHCONF_RETRY_TIMEOUT, cur_params.dot11MeshRetryTimeout); NLA_PUT_U16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT, cur_params.dot11MeshConfirmTimeout); NLA_PUT_U16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT, cur_params.dot11MeshHoldingTimeout); NLA_PUT_U16(msg, NL80211_MESHCONF_MAX_PEER_LINKS, cur_params.dot11MeshMaxPeerLinks); NLA_PUT_U8(msg, NL80211_MESHCONF_MAX_RETRIES, cur_params.dot11MeshMaxRetries); NLA_PUT_U8(msg, NL80211_MESHCONF_TTL, cur_params.dot11MeshTTL); NLA_PUT_U8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS, cur_params.auto_open_plinks); NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, cur_params.dot11MeshHWMPmaxPREQretries); NLA_PUT_U32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME, cur_params.path_refresh_time); NLA_PUT_U16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, cur_params.min_discovery_timeout); NLA_PUT_U32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, cur_params.dot11MeshHWMPactivePathTimeout); NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, cur_params.dot11MeshHWMPpreqMinInterval); NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, cur_params.dot11MeshHWMPnetDiameterTraversalTime); nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); err = genlmsg_reply(msg, info); goto out; nla_put_failure: genlmsg_cancel(msg, hdr); err = -EMSGSIZE; out: /* Cleanup */ cfg80211_unlock_rdev(rdev); dev_put(dev); out_rtnl: rtnl_unlock(); return err; } #define FILL_IN_MESH_PARAM_IF_SET(table, cfg, param, mask, attr_num, nla_fn) \ do {\ if (table[attr_num]) {\ cfg.param = nla_fn(table[attr_num]); \ mask |= (1 << (attr_num - 1)); \ } \ } while (0);\ static struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] __read_mostly = { [NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 }, [NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 }, [NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 }, [NL80211_MESHCONF_MAX_PEER_LINKS] = { .type = NLA_U16 }, [NL80211_MESHCONF_MAX_RETRIES] = { .type = NLA_U8 }, [NL80211_MESHCONF_TTL] = { .type = NLA_U8 }, [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 }, [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 }, [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 }, [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 }, [NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 }, [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 }, [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 }, }; static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info) { int err; u32 mask; struct cfg80211_registered_device *rdev; struct net_device *dev; struct mesh_config cfg; struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; struct nlattr *parent_attr; parent_attr = info->attrs[NL80211_ATTR_MESH_PARAMS]; if (!parent_attr) return -EINVAL; if (nla_parse_nested(tb, NL80211_MESHCONF_ATTR_MAX, parent_attr, nl80211_meshconf_params_policy)) return -EINVAL; rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto out_rtnl; if (!rdev->ops->set_mesh_params) { err = -EOPNOTSUPP; goto out; } /* This makes sure that there aren't more than 32 mesh config * parameters (otherwise our bitfield scheme would not work.) */ BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32); /* Fill in the params struct */ mask = 0; FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, mask, NL80211_MESHCONF_RETRY_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, mask, NL80211_MESHCONF_HOLDING_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, mask, NL80211_MESHCONF_MAX_PEER_LINKS, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, mask, NL80211_MESHCONF_MAX_RETRIES, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, mask, NL80211_MESHCONF_TTL, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, mask, NL80211_MESHCONF_PATH_REFRESH_TIME, nla_get_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, mask, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, nla_get_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPnetDiameterTraversalTime, mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, nla_get_u16); /* Apply changes */ err = rdev->ops->set_mesh_params(&rdev->wiphy, dev, &cfg, mask); out: /* cleanup */ cfg80211_unlock_rdev(rdev); dev_put(dev); out_rtnl: rtnl_unlock(); return err; } #undef FILL_IN_MESH_PARAM_IF_SET static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; void *hdr = NULL; struct nlattr *nl_reg_rules; unsigned int i; int err = -EINVAL; mutex_lock(&cfg80211_mutex); if (!cfg80211_regdomain) goto out; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOBUFS; goto out; } hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, NL80211_CMD_GET_REG); if (!hdr) goto nla_put_failure; NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, cfg80211_regdomain->alpha2); nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); if (!nl_reg_rules) goto nla_put_failure; for (i = 0; i < cfg80211_regdomain->n_reg_rules; i++) { struct nlattr *nl_reg_rule; const struct ieee80211_reg_rule *reg_rule; const struct ieee80211_freq_range *freq_range; const struct ieee80211_power_rule *power_rule; reg_rule = &cfg80211_regdomain->reg_rules[i]; freq_range = &reg_rule->freq_range; power_rule = &reg_rule->power_rule; nl_reg_rule = nla_nest_start(msg, i); if (!nl_reg_rule) goto nla_put_failure; NLA_PUT_U32(msg, NL80211_ATTR_REG_RULE_FLAGS, reg_rule->flags); NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_START, freq_range->start_freq_khz); NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_END, freq_range->end_freq_khz); NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW, freq_range->max_bandwidth_khz); NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, power_rule->max_antenna_gain); NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP, power_rule->max_eirp); nla_nest_end(msg, nl_reg_rule); } nla_nest_end(msg, nl_reg_rules); genlmsg_end(msg, hdr); err = genlmsg_reply(msg, info); goto out; nla_put_failure: genlmsg_cancel(msg, hdr); err = -EMSGSIZE; out: mutex_unlock(&cfg80211_mutex); return err; } static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1]; struct nlattr *nl_reg_rule; char *alpha2 = NULL; int rem_reg_rules = 0, r = 0; u32 num_rules = 0, rule_idx = 0, size_of_regd; struct ieee80211_regdomain *rd = NULL; if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) return -EINVAL; if (!info->attrs[NL80211_ATTR_REG_RULES]) return -EINVAL; alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], rem_reg_rules) { num_rules++; if (num_rules > NL80211_MAX_SUPP_REG_RULES) return -EINVAL; } mutex_lock(&cfg80211_mutex); if (!reg_is_valid_request(alpha2)) { r = -EINVAL; goto bad_reg; } size_of_regd = sizeof(struct ieee80211_regdomain) + (num_rules * sizeof(struct ieee80211_reg_rule)); rd = kzalloc(size_of_regd, GFP_KERNEL); if (!rd) { r = -ENOMEM; goto bad_reg; } rd->n_reg_rules = num_rules; rd->alpha2[0] = alpha2[0]; rd->alpha2[1] = alpha2[1]; nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], rem_reg_rules) { nla_parse(tb, NL80211_REG_RULE_ATTR_MAX, nla_data(nl_reg_rule), nla_len(nl_reg_rule), reg_rule_policy); r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]); if (r) goto bad_reg; rule_idx++; if (rule_idx > NL80211_MAX_SUPP_REG_RULES) { r = -EINVAL; goto bad_reg; } } BUG_ON(rule_idx != num_rules); r = set_regdom(rd); mutex_unlock(&cfg80211_mutex); return r; bad_reg: mutex_unlock(&cfg80211_mutex); kfree(rd); return r; } static int validate_scan_freqs(struct nlattr *freqs) { struct nlattr *attr1, *attr2; int n_channels = 0, tmp1, tmp2; nla_for_each_nested(attr1, freqs, tmp1) { n_channels++; /* * Some hardware has a limited channel list for * scanning, and it is pretty much nonsensical * to scan for a channel twice, so disallow that * and don't require drivers to check that the * channel list they get isn't longer than what * they can scan, as long as they can scan all * the channels they registered at once. */ nla_for_each_nested(attr2, freqs, tmp2) if (attr1 != attr2 && nla_get_u32(attr1) == nla_get_u32(attr2)) return 0; } return n_channels; } static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net_device *dev; struct cfg80211_scan_request *request; struct cfg80211_ssid *ssid; struct ieee80211_channel *channel; struct nlattr *attr; struct wiphy *wiphy; int err, tmp, n_ssids = 0, n_channels, i; enum ieee80211_band band; size_t ie_len; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto out_rtnl; wiphy = &rdev->wiphy; if (!rdev->ops->scan) { err = -EOPNOTSUPP; goto out; } if (!netif_running(dev)) { err = -ENETDOWN; goto out; } if (rdev->scan_req) { err = -EBUSY; goto out; } if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { n_channels = validate_scan_freqs( info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); if (!n_channels) { err = -EINVAL; goto out; } } else { n_channels = 0; for (band = 0; band < IEEE80211_NUM_BANDS; band++) if (wiphy->bands[band]) n_channels += wiphy->bands[band]->n_channels; } if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) n_ssids++; if (n_ssids > wiphy->max_scan_ssids) { err = -EINVAL; goto out; } if (info->attrs[NL80211_ATTR_IE]) ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); else ie_len = 0; if (ie_len > wiphy->max_scan_ie_len) { err = -EINVAL; goto out; } request = kzalloc(sizeof(*request) + sizeof(*ssid) * n_ssids + sizeof(channel) * n_channels + ie_len, GFP_KERNEL); if (!request) { err = -ENOMEM; goto out; } request->n_channels = n_channels; if (n_ssids) request->ssids = (void *)&request->channels[n_channels]; request->n_ssids = n_ssids; if (ie_len) { if (request->ssids) request->ie = (void *)(request->ssids + n_ssids); else request->ie = (void *)(request->channels + n_channels); } if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { /* user specified, bail out if channel not found */ request->n_channels = n_channels; i = 0; nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) { request->channels[i] = ieee80211_get_channel(wiphy, nla_get_u32(attr)); if (!request->channels[i]) { err = -EINVAL; goto out_free; } i++; } } else { /* all channels */ i = 0; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { int j; if (!wiphy->bands[band]) continue; for (j = 0; j < wiphy->bands[band]->n_channels; j++) { request->channels[i] = &wiphy->bands[band]->channels[j]; i++; } } } i = 0; if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) { err = -EINVAL; goto out_free; } memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); request->ssids[i].ssid_len = nla_len(attr); i++; } } if (info->attrs[NL80211_ATTR_IE]) { request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); memcpy((void *)request->ie, nla_data(info->attrs[NL80211_ATTR_IE]), request->ie_len); } request->dev = dev; request->wiphy = &rdev->wiphy; rdev->scan_req = request; err = rdev->ops->scan(&rdev->wiphy, dev, request); if (!err) { nl80211_send_scan_start(rdev, dev); dev_hold(dev); } out_free: if (err) { rdev->scan_req = NULL; kfree(request); } out: cfg80211_unlock_rdev(rdev); dev_put(dev); out_rtnl: rtnl_unlock(); return err; } static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_internal_bss *intbss) { struct cfg80211_bss *res = &intbss->pub; void *hdr; struct nlattr *bss; int i; ASSERT_WDEV_LOCK(wdev); hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_SCAN_RESULTS); if (!hdr) return -1; NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex); bss = nla_nest_start(msg, NL80211_ATTR_BSS); if (!bss) goto nla_put_failure; if (!is_zero_ether_addr(res->bssid)) NLA_PUT(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid); if (res->information_elements && res->len_information_elements) NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS, res->len_information_elements, res->information_elements); if (res->tsf) NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf); if (res->beacon_interval) NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval); NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability); NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq); switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: NLA_PUT_U32(msg, NL80211_BSS_SIGNAL_MBM, res->signal); break; case CFG80211_SIGNAL_TYPE_UNSPEC: NLA_PUT_U8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal); break; default: break; } switch (wdev->iftype) { case NL80211_IFTYPE_STATION: if (intbss == wdev->current_bss) NLA_PUT_U32(msg, NL80211_BSS_STATUS, NL80211_BSS_STATUS_ASSOCIATED); else for (i = 0; i < MAX_AUTH_BSSES; i++) { if (intbss != wdev->auth_bsses[i]) continue; NLA_PUT_U32(msg, NL80211_BSS_STATUS, NL80211_BSS_STATUS_AUTHENTICATED); break; } break; case NL80211_IFTYPE_ADHOC: if (intbss == wdev->current_bss) NLA_PUT_U32(msg, NL80211_BSS_STATUS, NL80211_BSS_STATUS_IBSS_JOINED); break; default: break; } nla_nest_end(msg, bss); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb) { struct cfg80211_registered_device *rdev; struct net_device *dev; struct cfg80211_internal_bss *scan; struct wireless_dev *wdev; int ifidx = cb->args[0]; int start = cb->args[1], idx = 0; int err; if (!ifidx) { err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, nl80211_fam.attrbuf, nl80211_fam.maxattr, nl80211_policy); if (err) return err; if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]) return -EINVAL; ifidx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]); if (!ifidx) return -EINVAL; cb->args[0] = ifidx; } dev = dev_get_by_index(sock_net(skb->sk), ifidx); if (!dev) return -ENODEV; rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); if (IS_ERR(rdev)) { err = PTR_ERR(rdev); goto out_put_netdev; } wdev = dev->ieee80211_ptr; wdev_lock(wdev); spin_lock_bh(&rdev->bss_lock); cfg80211_bss_expire(rdev); list_for_each_entry(scan, &rdev->bss_list, list) { if (++idx <= start) continue; if (nl80211_send_bss(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, rdev, wdev, scan) < 0) { idx--; goto out; } } out: spin_unlock_bh(&rdev->bss_lock); wdev_unlock(wdev); cb->args[1] = idx; err = skb->len; cfg80211_unlock_rdev(rdev); out_put_netdev: dev_put(dev); return err; } static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type) { return auth_type <= NL80211_AUTHTYPE_MAX; } static bool nl80211_valid_wpa_versions(u32 wpa_versions) { return !(wpa_versions & ~(NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)); } static bool nl80211_valid_akm_suite(u32 akm) { return akm == WLAN_AKM_SUITE_8021X || akm == WLAN_AKM_SUITE_PSK; } static bool nl80211_valid_cipher_suite(u32 cipher) { return cipher == WLAN_CIPHER_SUITE_WEP40 || cipher == WLAN_CIPHER_SUITE_WEP104 || cipher == WLAN_CIPHER_SUITE_TKIP || cipher == WLAN_CIPHER_SUITE_CCMP || cipher == WLAN_CIPHER_SUITE_AES_CMAC; } static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net_device *dev; struct ieee80211_channel *chan; const u8 *bssid, *ssid, *ie = NULL; int err, ssid_len, ie_len = 0; enum nl80211_auth_type auth_type; struct key_parse key; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_AUTH_TYPE]) return -EINVAL; if (!info->attrs[NL80211_ATTR_SSID]) return -EINVAL; if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; err = nl80211_parse_key(info, &key); if (err) return err; if (key.idx >= 0) { if (!key.p.key || !key.p.key_len) return -EINVAL; if ((key.p.cipher != WLAN_CIPHER_SUITE_WEP40 || key.p.key_len != WLAN_KEY_LEN_WEP40) && (key.p.cipher != WLAN_CIPHER_SUITE_WEP104 || key.p.key_len != WLAN_KEY_LEN_WEP104)) return -EINVAL; if (key.idx > 4) return -EINVAL; } else { key.p.key_len = 0; key.p.key = NULL; } rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (!rdev->ops->auth) { err = -EOPNOTSUPP; goto out; } if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { err = -EOPNOTSUPP; goto out; } if (!netif_running(dev)) { err = -ENETDOWN; goto out; } bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); chan = ieee80211_get_channel(&rdev->wiphy, nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED)) { err = -EINVAL; goto out; } ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { ie = nla_data(info->attrs[NL80211_ATTR_IE]); ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); if (!nl80211_valid_auth_type(auth_type)) { err = -EINVAL; goto out; } err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, ssid, ssid_len, ie, ie_len, key.p.key, key.p.key_len, key.idx); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: rtnl_unlock(); return err; } static int nl80211_crypto_settings(struct genl_info *info, struct cfg80211_crypto_settings *settings, int cipher_limit) { memset(settings, 0, sizeof(*settings)); settings->control_port = info->attrs[NL80211_ATTR_CONTROL_PORT]; if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) { void *data; int len, i; data = nla_data(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]); len = nla_len(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]); settings->n_ciphers_pairwise = len / sizeof(u32); if (len % sizeof(u32)) return -EINVAL; if (settings->n_ciphers_pairwise > cipher_limit) return -EINVAL; memcpy(settings->ciphers_pairwise, data, len); for (i = 0; i < settings->n_ciphers_pairwise; i++) if (!nl80211_valid_cipher_suite( settings->ciphers_pairwise[i])) return -EINVAL; } if (info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]) { settings->cipher_group = nla_get_u32(info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]); if (!nl80211_valid_cipher_suite(settings->cipher_group)) return -EINVAL; } if (info->attrs[NL80211_ATTR_WPA_VERSIONS]) { settings->wpa_versions = nla_get_u32(info->attrs[NL80211_ATTR_WPA_VERSIONS]); if (!nl80211_valid_wpa_versions(settings->wpa_versions)) return -EINVAL; } if (info->attrs[NL80211_ATTR_AKM_SUITES]) { void *data; int len, i; data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]); len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]); settings->n_akm_suites = len / sizeof(u32); if (len % sizeof(u32)) return -EINVAL; memcpy(settings->akm_suites, data, len); for (i = 0; i < settings->n_ciphers_pairwise; i++) if (!nl80211_valid_akm_suite(settings->akm_suites[i])) return -EINVAL; } return 0; } static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net_device *dev; struct cfg80211_crypto_settings crypto; struct ieee80211_channel *chan, *fixedchan; const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; int err, ssid_len, ie_len = 0; bool use_mfp = false; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; if (!info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_SSID] || !info->attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (!rdev->ops->assoc) { err = -EOPNOTSUPP; goto out; } if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { err = -EOPNOTSUPP; goto out; } if (!netif_running(dev)) { err = -ENETDOWN; goto out; } bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); chan = ieee80211_get_channel(&rdev->wiphy, nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED)) { err = -EINVAL; goto out; } mutex_lock(&rdev->devlist_mtx); fixedchan = rdev_fixed_channel(rdev, NULL); if (fixedchan && chan != fixedchan) { err = -EBUSY; mutex_unlock(&rdev->devlist_mtx); goto out; } mutex_unlock(&rdev->devlist_mtx); ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { ie = nla_data(info->attrs[NL80211_ATTR_IE]); ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } if (info->attrs[NL80211_ATTR_USE_MFP]) { enum nl80211_mfp mfp = nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]); if (mfp == NL80211_MFP_REQUIRED) use_mfp = true; else if (mfp != NL80211_MFP_NO) { err = -EINVAL; goto out; } } if (info->attrs[NL80211_ATTR_PREV_BSSID]) prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); err = nl80211_crypto_settings(info, &crypto, 1); if (!err) err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, ssid, ssid_len, ie, ie_len, use_mfp, &crypto); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: rtnl_unlock(); return err; } static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net_device *dev; const u8 *ie = NULL, *bssid; int err, ie_len = 0; u16 reason_code; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_REASON_CODE]) return -EINVAL; rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (!rdev->ops->deauth) { err = -EOPNOTSUPP; goto out; } if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { err = -EOPNOTSUPP; goto out; } if (!netif_running(dev)) { err = -ENETDOWN; goto out; } bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (reason_code == 0) { /* Reason Code 0 is reserved */ err = -EINVAL; goto out; } if (info->attrs[NL80211_ATTR_IE]) { ie = nla_data(info->attrs[NL80211_ATTR_IE]); ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: rtnl_unlock(); return err; } static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net_device *dev; const u8 *ie = NULL, *bssid; int err, ie_len = 0; u16 reason_code; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_REASON_CODE]) return -EINVAL; rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (!rdev->ops->disassoc) { err = -EOPNOTSUPP; goto out; } if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { err = -EOPNOTSUPP; goto out; } if (!netif_running(dev)) { err = -ENETDOWN; goto out; } bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (reason_code == 0) { /* Reason Code 0 is reserved */ err = -EINVAL; goto out; } if (info->attrs[NL80211_ATTR_IE]) { ie = nla_data(info->attrs[NL80211_ATTR_IE]); ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: rtnl_unlock(); return err; } static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net_device *dev; struct cfg80211_ibss_params ibss; struct wiphy *wiphy; struct cfg80211_cached_keys *connkeys = NULL; int err; memset(&ibss, 0, sizeof(ibss)); if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || !info->attrs[NL80211_ATTR_SSID] || !nla_len(info->attrs[NL80211_ATTR_SSID])) return -EINVAL; ibss.beacon_interval = 100; if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) { ibss.beacon_interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); if (ibss.beacon_interval < 1 || ibss.beacon_interval > 10000) return -EINVAL; } rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (!rdev->ops->join_ibss) { err = -EOPNOTSUPP; goto out; } if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) { err = -EOPNOTSUPP; goto out; } if (!netif_running(dev)) { err = -ENETDOWN; goto out; } wiphy = &rdev->wiphy; if (info->attrs[NL80211_ATTR_MAC]) ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); ibss.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); ibss.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { ibss.ie = nla_data(info->attrs[NL80211_ATTR_IE]); ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } ibss.channel = ieee80211_get_channel(wiphy, nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); if (!ibss.channel || ibss.channel->flags & IEEE80211_CHAN_NO_IBSS || ibss.channel->flags & IEEE80211_CHAN_DISABLED) { err = -EINVAL; goto out; } ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED]; ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) { connkeys = nl80211_parse_connkeys(rdev, info->attrs[NL80211_ATTR_KEYS]); if (IS_ERR(connkeys)) { err = PTR_ERR(connkeys); connkeys = NULL; goto out; } } err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: if (err) kfree(connkeys); rtnl_unlock(); return err; } static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net_device *dev; int err; rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (!rdev->ops->leave_ibss) { err = -EOPNOTSUPP; goto out; } if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) { err = -EOPNOTSUPP; goto out; } if (!netif_running(dev)) { err = -ENETDOWN; goto out; } err = cfg80211_leave_ibss(rdev, dev, false); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: rtnl_unlock(); return err; } #ifdef CONFIG_NL80211_TESTMODE static struct genl_multicast_group nl80211_testmode_mcgrp = { .name = "testmode", }; static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; int err; if (!info->attrs[NL80211_ATTR_TESTDATA]) return -EINVAL; rtnl_lock(); rdev = cfg80211_get_dev_from_info(info); if (IS_ERR(rdev)) { err = PTR_ERR(rdev); goto unlock_rtnl; } err = -EOPNOTSUPP; if (rdev->ops->testmode_cmd) { rdev->testmode_info = info; err = rdev->ops->testmode_cmd(&rdev->wiphy, nla_data(info->attrs[NL80211_ATTR_TESTDATA]), nla_len(info->attrs[NL80211_ATTR_TESTDATA])); rdev->testmode_info = NULL; } cfg80211_unlock_rdev(rdev); unlock_rtnl: rtnl_unlock(); return err; } static struct sk_buff * __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev, int approxlen, u32 pid, u32 seq, gfp_t gfp) { struct sk_buff *skb; void *hdr; struct nlattr *data; skb = nlmsg_new(approxlen + 100, gfp); if (!skb) return NULL; hdr = nl80211hdr_put(skb, pid, seq, 0, NL80211_CMD_TESTMODE); if (!hdr) { kfree_skb(skb); return NULL; } NLA_PUT_U32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx); data = nla_nest_start(skb, NL80211_ATTR_TESTDATA); ((void **)skb->cb)[0] = rdev; ((void **)skb->cb)[1] = hdr; ((void **)skb->cb)[2] = data; return skb; nla_put_failure: kfree_skb(skb); return NULL; } struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy, int approxlen) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); if (WARN_ON(!rdev->testmode_info)) return NULL; return __cfg80211_testmode_alloc_skb(rdev, approxlen, rdev->testmode_info->snd_pid, rdev->testmode_info->snd_seq, GFP_KERNEL); } EXPORT_SYMBOL(cfg80211_testmode_alloc_reply_skb); int cfg80211_testmode_reply(struct sk_buff *skb) { struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0]; void *hdr = ((void **)skb->cb)[1]; struct nlattr *data = ((void **)skb->cb)[2]; if (WARN_ON(!rdev->testmode_info)) { kfree_skb(skb); return -EINVAL; } nla_nest_end(skb, data); genlmsg_end(skb, hdr); return genlmsg_reply(skb, rdev->testmode_info); } EXPORT_SYMBOL(cfg80211_testmode_reply); struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); return __cfg80211_testmode_alloc_skb(rdev, approxlen, 0, 0, gfp); } EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb); void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) { void *hdr = ((void **)skb->cb)[1]; struct nlattr *data = ((void **)skb->cb)[2]; nla_nest_end(skb, data); genlmsg_end(skb, hdr); genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp); } EXPORT_SYMBOL(cfg80211_testmode_event); #endif static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net_device *dev; struct cfg80211_connect_params connect; struct wiphy *wiphy; struct cfg80211_cached_keys *connkeys = NULL; int err; memset(&connect, 0, sizeof(connect)); if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; if (!info->attrs[NL80211_ATTR_SSID] || !nla_len(info->attrs[NL80211_ATTR_SSID])) return -EINVAL; if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { connect.auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); if (!nl80211_valid_auth_type(connect.auth_type)) return -EINVAL; } else connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; connect.privacy = info->attrs[NL80211_ATTR_PRIVACY]; err = nl80211_crypto_settings(info, &connect.crypto, NL80211_MAX_NR_CIPHER_SUITES); if (err) return err; rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { err = -EOPNOTSUPP; goto out; } if (!netif_running(dev)) { err = -ENETDOWN; goto out; } wiphy = &rdev->wiphy; if (info->attrs[NL80211_ATTR_MAC]) connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); connect.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]); connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { connect.channel = ieee80211_get_channel(wiphy, nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); if (!connect.channel || connect.channel->flags & IEEE80211_CHAN_DISABLED) { err = -EINVAL; goto out; } } if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) { connkeys = nl80211_parse_connkeys(rdev, info->attrs[NL80211_ATTR_KEYS]); if (IS_ERR(connkeys)) { err = PTR_ERR(connkeys); connkeys = NULL; goto out; } } err = cfg80211_connect(rdev, dev, &connect, connkeys); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: if (err) kfree(connkeys); rtnl_unlock(); return err; } static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net_device *dev; int err; u16 reason; if (!info->attrs[NL80211_ATTR_REASON_CODE]) reason = WLAN_REASON_DEAUTH_LEAVING; else reason = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (reason == 0) return -EINVAL; rtnl_lock(); err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) goto unlock_rtnl; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { err = -EOPNOTSUPP; goto out; } if (!netif_running(dev)) { err = -ENETDOWN; goto out; } err = cfg80211_disconnect(rdev, dev, reason, true); out: cfg80211_unlock_rdev(rdev); dev_put(dev); unlock_rtnl: rtnl_unlock(); return err; } static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net *net; int err; u32 pid; if (!info->attrs[NL80211_ATTR_PID]) return -EINVAL; pid = nla_get_u32(info->attrs[NL80211_ATTR_PID]); rtnl_lock(); rdev = cfg80211_get_dev_from_info(info); if (IS_ERR(rdev)) { err = PTR_ERR(rdev); goto out_rtnl; } net = get_net_ns_by_pid(pid); if (IS_ERR(net)) { err = PTR_ERR(net); goto out; } err = 0; /* check if anything to do */ if (net_eq(wiphy_net(&rdev->wiphy), net)) goto out_put_net; err = cfg80211_switch_netns(rdev, net); out_put_net: put_net(net); out: cfg80211_unlock_rdev(rdev); out_rtnl: rtnl_unlock(); return err; } static struct genl_ops nl80211_ops[] = { { .cmd = NL80211_CMD_GET_WIPHY, .doit = nl80211_get_wiphy, .dumpit = nl80211_dump_wiphy, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ }, { .cmd = NL80211_CMD_SET_WIPHY, .doit = nl80211_set_wiphy, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_GET_INTERFACE, .doit = nl80211_get_interface, .dumpit = nl80211_dump_interface, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ }, { .cmd = NL80211_CMD_SET_INTERFACE, .doit = nl80211_set_interface, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_NEW_INTERFACE, .doit = nl80211_new_interface, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_DEL_INTERFACE, .doit = nl80211_del_interface, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_GET_KEY, .doit = nl80211_get_key, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_SET_KEY, .doit = nl80211_set_key, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_NEW_KEY, .doit = nl80211_new_key, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_DEL_KEY, .doit = nl80211_del_key, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_SET_BEACON, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .doit = nl80211_addset_beacon, }, { .cmd = NL80211_CMD_NEW_BEACON, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .doit = nl80211_addset_beacon, }, { .cmd = NL80211_CMD_DEL_BEACON, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .doit = nl80211_del_beacon, }, { .cmd = NL80211_CMD_GET_STATION, .doit = nl80211_get_station, .dumpit = nl80211_dump_station, .policy = nl80211_policy, }, { .cmd = NL80211_CMD_SET_STATION, .doit = nl80211_set_station, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_NEW_STATION, .doit = nl80211_new_station, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_DEL_STATION, .doit = nl80211_del_station, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_GET_MPATH, .doit = nl80211_get_mpath, .dumpit = nl80211_dump_mpath, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_SET_MPATH, .doit = nl80211_set_mpath, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_NEW_MPATH, .doit = nl80211_new_mpath, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_DEL_MPATH, .doit = nl80211_del_mpath, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_SET_BSS, .doit = nl80211_set_bss, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_GET_REG, .doit = nl80211_get_reg, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ }, { .cmd = NL80211_CMD_SET_REG, .doit = nl80211_set_reg, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_REQ_SET_REG, .doit = nl80211_req_set_reg, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_GET_MESH_PARAMS, .doit = nl80211_get_mesh_params, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ }, { .cmd = NL80211_CMD_SET_MESH_PARAMS, .doit = nl80211_set_mesh_params, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_TRIGGER_SCAN, .doit = nl80211_trigger_scan, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_GET_SCAN, .policy = nl80211_policy, .dumpit = nl80211_dump_scan, }, { .cmd = NL80211_CMD_AUTHENTICATE, .doit = nl80211_authenticate, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_ASSOCIATE, .doit = nl80211_associate, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_DEAUTHENTICATE, .doit = nl80211_deauthenticate, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_DISASSOCIATE, .doit = nl80211_disassociate, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_JOIN_IBSS, .doit = nl80211_join_ibss, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_LEAVE_IBSS, .doit = nl80211_leave_ibss, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, #ifdef CONFIG_NL80211_TESTMODE { .cmd = NL80211_CMD_TESTMODE, .doit = nl80211_testmode_do, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, #endif { .cmd = NL80211_CMD_CONNECT, .doit = nl80211_connect, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_DISCONNECT, .doit = nl80211_disconnect, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_SET_WIPHY_NETNS, .doit = nl80211_wiphy_netns, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, }; static struct genl_multicast_group nl80211_mlme_mcgrp = { .name = "mlme", }; /* multicast groups */ static struct genl_multicast_group nl80211_config_mcgrp = { .name = "config", }; static struct genl_multicast_group nl80211_scan_mcgrp = { .name = "scan", }; static struct genl_multicast_group nl80211_regulatory_mcgrp = { .name = "regulatory", }; /* notification functions */ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_wiphy(msg, 0, 0, 0, rdev) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_config_mcgrp.id, GFP_KERNEL); } static int nl80211_add_scan_req(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { struct cfg80211_scan_request *req = rdev->scan_req; struct nlattr *nest; int i; ASSERT_RDEV_LOCK(rdev); if (WARN_ON(!req)) return 0; nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS); if (!nest) goto nla_put_failure; for (i = 0; i < req->n_ssids; i++) NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid); nla_nest_end(msg, nest); nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES); if (!nest) goto nla_put_failure; for (i = 0; i < req->n_channels; i++) NLA_PUT_U32(msg, i, req->channels[i]->center_freq); nla_nest_end(msg, nest); if (req->ie) NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie); return 0; nla_put_failure: return -ENOBUFS; } static int nl80211_send_scan_msg(struct sk_buff *msg, struct cfg80211_registered_device *rdev, struct net_device *netdev, u32 pid, u32 seq, int flags, u32 cmd) { void *hdr; hdr = nl80211hdr_put(msg, pid, seq, flags, cmd); if (!hdr) return -1; NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); /* ignore errors and send incomplete event anyway */ nl80211_add_scan_req(msg, rdev); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, struct net_device *netdev) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, NL80211_CMD_TRIGGER_SCAN) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL); } void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, struct net_device *netdev) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, NL80211_CMD_NEW_SCAN_RESULTS) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL); } void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, struct net_device *netdev) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, NL80211_CMD_SCAN_ABORTED) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL); } /* * This can happen on global regulatory changes or device specific settings * based on custom world regulatory domains. */ void nl80211_send_reg_change_event(struct regulatory_request *request) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_REG_CHANGE); if (!hdr) { nlmsg_free(msg); return; } /* Userspace can always count this one always being set */ NLA_PUT_U8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator); if (request->alpha2[0] == '0' && request->alpha2[1] == '0') NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_WORLD); else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_CUSTOM_WORLD); else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') || request->intersect) NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_INTERSECTION); else { NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_COUNTRY); NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, request->alpha2); } if (wiphy_idx_valid(request->wiphy_idx)) NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } rcu_read_lock(); genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, GFP_ATOMIC); rcu_read_unlock(); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, enum nl80211_commands cmd, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_AUTHENTICATE, gfp); } void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_ASSOCIATE, gfp); } void nl80211_send_deauth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_DEAUTHENTICATE, gfp); } void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_DISASSOCIATE, gfp); } static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, int cmd, const u8 *addr, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp) { nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_AUTHENTICATE, addr, gfp); } void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp) { nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_ASSOCIATE, addr, gfp); } void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, u16 status, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_GOODSIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONNECT); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); if (bssid) NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); NLA_PUT_U16(msg, NL80211_ATTR_STATUS_CODE, status); if (req_ie) NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); if (resp_ie) NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_roamed(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_GOODSIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ROAM); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); if (req_ie) NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); if (resp_ie) NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, struct net_device *netdev, u16 reason, const u8 *ie, size_t ie_len, bool from_ap) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DISCONNECT); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); if (from_ap && reason) NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason); if (from_ap) NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP); if (ie) NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_JOIN_IBSS); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, enum nl80211_key_type key_type, int key_id, const u8 *tsc, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_MICHAEL_MIC_FAILURE); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); if (addr) NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type); NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id); if (tsc) NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_beacon_hint_event(struct wiphy *wiphy, struct ieee80211_channel *channel_before, struct ieee80211_channel *channel_after) { struct sk_buff *msg; void *hdr; struct nlattr *nl_freq; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_REG_BEACON_HINT); if (!hdr) { nlmsg_free(msg); return; } /* * Since we are applying the beacon hint to a wiphy we know its * wiphy_idx is valid */ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)); /* Before */ nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE); if (!nl_freq) goto nla_put_failure; if (nl80211_msg_put_channel(msg, channel_before)) goto nla_put_failure; nla_nest_end(msg, nl_freq); /* After */ nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_AFTER); if (!nl_freq) goto nla_put_failure; if (nl80211_msg_put_channel(msg, channel_after)) goto nla_put_failure; nla_nest_end(msg, nl_freq); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } rcu_read_lock(); genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, GFP_ATOMIC); rcu_read_unlock(); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } /* initialisation/exit functions */ int nl80211_init(void) { int err; err = genl_register_family_with_ops(&nl80211_fam, nl80211_ops, ARRAY_SIZE(nl80211_ops)); if (err) return err; err = genl_register_mc_group(&nl80211_fam, &nl80211_config_mcgrp); if (err) goto err_out; err = genl_register_mc_group(&nl80211_fam, &nl80211_scan_mcgrp); if (err) goto err_out; err = genl_register_mc_group(&nl80211_fam, &nl80211_regulatory_mcgrp); if (err) goto err_out; err = genl_register_mc_group(&nl80211_fam, &nl80211_mlme_mcgrp); if (err) goto err_out; #ifdef CONFIG_NL80211_TESTMODE err = genl_register_mc_group(&nl80211_fam, &nl80211_testmode_mcgrp); if (err) goto err_out; #endif return 0; err_out: genl_unregister_family(&nl80211_fam); return err; } void nl80211_exit(void) { genl_unregister_family(&nl80211_fam); }
gpl-2.0
tieto/gstreamer-plugins-base-pkg
gst/playback/gstplaysinkaudioconvert.c
36
6363
/* GStreamer * Copyright (C) <2011> Sebastian Dröge <sebastian.droege@collabora.co.uk> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301, USA. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "gstplaysinkaudioconvert.h" #include <gst/pbutils/pbutils.h> #include <gst/gst-i18n-plugin.h> GST_DEBUG_CATEGORY_STATIC (gst_play_sink_audio_convert_debug); #define GST_CAT_DEFAULT gst_play_sink_audio_convert_debug #define parent_class gst_play_sink_audio_convert_parent_class G_DEFINE_TYPE (GstPlaySinkAudioConvert, gst_play_sink_audio_convert, GST_TYPE_PLAY_SINK_CONVERT_BIN); enum { PROP_0, PROP_USE_CONVERTERS, PROP_USE_VOLUME, }; static gboolean gst_play_sink_audio_convert_add_conversion_elements (GstPlaySinkAudioConvert * self) { GstPlaySinkConvertBin *cbin = GST_PLAY_SINK_CONVERT_BIN (self); GstElement *el, *prev = NULL; g_assert (cbin->conversion_elements == NULL); GST_DEBUG_OBJECT (self, "Building audio conversion with use-converters %d, use-volume %d", self->use_converters, self->use_volume); if (self->use_converters) { el = gst_play_sink_convert_bin_add_conversion_element_factory (cbin, "audioconvert", "conv"); if (el) { prev = el; } el = gst_play_sink_convert_bin_add_conversion_element_factory (cbin, "audioresample", "resample"); if (el) { if (prev) { if (!gst_element_link_pads_full (prev, "src", el, "sink", GST_PAD_LINK_CHECK_TEMPLATE_CAPS)) goto link_failed; } prev = el; } } if (self->use_volume && self->volume) { el = self->volume; gst_play_sink_convert_bin_add_conversion_element (cbin, el); if (prev) { if (!gst_element_link_pads_full (prev, "src", el, "sink", GST_PAD_LINK_CHECK_TEMPLATE_CAPS)) goto link_failed; } prev = el; } return TRUE; link_failed: return FALSE; } static void gst_play_sink_audio_convert_finalize (GObject * object) { GstPlaySinkAudioConvert *self = GST_PLAY_SINK_AUDIO_CONVERT_CAST (object); if (self->volume) gst_object_unref (self->volume); G_OBJECT_CLASS (parent_class)->finalize (object); } static void gst_play_sink_audio_convert_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { GstPlaySinkAudioConvert *self = GST_PLAY_SINK_AUDIO_CONVERT_CAST (object); gboolean v, changed = FALSE; GST_PLAY_SINK_CONVERT_BIN_LOCK (self); switch (prop_id) { case PROP_USE_CONVERTERS: v = g_value_get_boolean (value); if (v != self->use_converters) { self->use_converters = v; changed = TRUE; } break; case PROP_USE_VOLUME: v = g_value_get_boolean (value); if (v != self->use_volume) { self->use_volume = v; changed = TRUE; } break; default: break; } if (changed) { GstPlaySinkConvertBin *cbin = GST_PLAY_SINK_CONVERT_BIN (self); GST_DEBUG_OBJECT (self, "Rebuilding converter bin"); gst_play_sink_convert_bin_remove_elements (cbin); gst_play_sink_audio_convert_add_conversion_elements (self); gst_play_sink_convert_bin_add_identity (cbin); gst_play_sink_convert_bin_cache_converter_caps (cbin); } GST_PLAY_SINK_CONVERT_BIN_UNLOCK (self); } static void gst_play_sink_audio_convert_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec) { GstPlaySinkAudioConvert *self = GST_PLAY_SINK_AUDIO_CONVERT_CAST (object); GST_PLAY_SINK_CONVERT_BIN_LOCK (self); switch (prop_id) { case PROP_USE_CONVERTERS: g_value_set_boolean (value, self->use_converters); break; case PROP_USE_VOLUME: g_value_set_boolean (value, self->use_volume); break; default: break; } GST_PLAY_SINK_CONVERT_BIN_UNLOCK (self); } static void gst_play_sink_audio_convert_class_init (GstPlaySinkAudioConvertClass * klass) { GObjectClass *gobject_class; GstElementClass *gstelement_class; GST_DEBUG_CATEGORY_INIT (gst_play_sink_audio_convert_debug, "playsinkaudioconvert", 0, "play bin"); gobject_class = (GObjectClass *) klass; gstelement_class = (GstElementClass *) klass; gobject_class->finalize = gst_play_sink_audio_convert_finalize; gobject_class->set_property = gst_play_sink_audio_convert_set_property; gobject_class->get_property = gst_play_sink_audio_convert_get_property; g_object_class_install_property (gobject_class, PROP_USE_CONVERTERS, g_param_spec_boolean ("use-converters", "Use converters", "Whether to use conversion elements", FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (gobject_class, PROP_USE_VOLUME, g_param_spec_boolean ("use-volume", "Use volume", "Whether to use a volume element", FALSE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); gst_element_class_set_static_metadata (gstelement_class, "Player Sink Audio Converter", "Audio/Bin/Converter", "Convenience bin for audio conversion", "Sebastian Dröge <sebastian.droege@collabora.co.uk>"); } static void gst_play_sink_audio_convert_init (GstPlaySinkAudioConvert * self) { GstPlaySinkConvertBin *cbin = GST_PLAY_SINK_CONVERT_BIN (self); cbin->audio = TRUE; /* FIXME: Only create this on demand but for now we need * it to always exist because of playsink's volume proxying * logic. */ self->volume = gst_element_factory_make ("volume", "volume"); if (self->volume) gst_object_ref_sink (self->volume); gst_play_sink_audio_convert_add_conversion_elements (self); gst_play_sink_convert_bin_cache_converter_caps (cbin); }
gpl-2.0
yajnab/android_kernel_semc_msm7x30
drivers/mmc/core/core.c
36
33517
/* * linux/drivers/mmc/core/core.c * * Copyright (C) 2003-2004 Russell King, All Rights Reserved. * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved. * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/pagemap.h> #include <linux/err.h> #include <linux/leds.h> #include <linux/scatterlist.h> #include <linux/log2.h> #include <linux/regulator/consumer.h> #include <linux/wakelock.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> #include "core.h" #include "bus.h" #include "host.h" #include "sdio_bus.h" #include "mmc_ops.h" #include "sd_ops.h" #include "sdio_ops.h" #include <linux/pm.h> static struct workqueue_struct *workqueue; static struct wake_lock mmc_delayed_work_wake_lock; /* * Enabling software CRCs on the data blocks can be a significant (30%) * performance cost, and for other reasons may not always be desired. * So we allow it it to be disabled. */ int use_spi_crc = 1; module_param(use_spi_crc, bool, 0); /* * Internal function. Schedule delayed work in the MMC work queue. */ static int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) { wake_lock(&mmc_delayed_work_wake_lock); return queue_delayed_work(workqueue, work, delay); } /* * Internal function. Flush all scheduled work from the MMC work queue. */ static void mmc_flush_scheduled_work(void) { flush_workqueue(workqueue); } /** * mmc_request_done - finish processing an MMC request * @host: MMC host which completed request * @mrq: MMC request which request * * MMC drivers should call this function when they have completed * their processing of a request. */ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) { struct mmc_command *cmd = mrq->cmd; int err = cmd->error; #ifdef CONFIG_MMC_PERF_PROFILING ktime_t diff; #endif if (err && cmd->retries && mmc_host_is_spi(host)) { if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) cmd->retries = 0; } if (err && cmd->retries) { pr_debug("%s: req failed (CMD%u): %d, retrying...\n", mmc_hostname(host), cmd->opcode, err); cmd->retries--; cmd->error = 0; host->ops->request(host, mrq); } else { led_trigger_event(host->led, LED_OFF); pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", mmc_hostname(host), cmd->opcode, err, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); if (mrq->data) { #ifdef CONFIG_MMC_PERF_PROFILING diff = ktime_sub(ktime_get(), host->perf.start); if (mrq->data->flags == MMC_DATA_READ) { host->perf.rbytes_drv += mrq->data->bytes_xfered; host->perf.rtime_drv = ktime_add(host->perf.rtime_drv, diff); } else { host->perf.wbytes_drv += mrq->data->bytes_xfered; host->perf.wtime_drv = ktime_add(host->perf.wtime_drv, diff); } #endif pr_debug("%s: %d bytes transferred: %d\n", mmc_hostname(host), mrq->data->bytes_xfered, mrq->data->error); } if (mrq->stop) { pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n", mmc_hostname(host), mrq->stop->opcode, mrq->stop->error, mrq->stop->resp[0], mrq->stop->resp[1], mrq->stop->resp[2], mrq->stop->resp[3]); } if (mrq->done) mrq->done(mrq); } } EXPORT_SYMBOL(mmc_request_done); static void mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) { #ifdef CONFIG_MMC_DEBUG unsigned int i, sz; struct scatterlist *sg; #endif pr_debug("%s: starting CMD%u arg %08x flags %08x\n", mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags); if (mrq->data) { pr_debug("%s: blksz %d blocks %d flags %08x " "tsac %d ms nsac %d\n", mmc_hostname(host), mrq->data->blksz, mrq->data->blocks, mrq->data->flags, mrq->data->timeout_ns / 1000000, mrq->data->timeout_clks); } if (mrq->stop) { pr_debug("%s: CMD%u arg %08x flags %08x\n", mmc_hostname(host), mrq->stop->opcode, mrq->stop->arg, mrq->stop->flags); } WARN_ON(!host->claimed); led_trigger_event(host->led, LED_FULL); mrq->cmd->error = 0; mrq->cmd->mrq = mrq; if (mrq->data) { BUG_ON(mrq->data->blksz > host->max_blk_size); BUG_ON(mrq->data->blocks > host->max_blk_count); BUG_ON(mrq->data->blocks * mrq->data->blksz > host->max_req_size); #ifdef CONFIG_MMC_DEBUG sz = 0; for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) sz += sg->length; BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); #endif mrq->cmd->data = mrq->data; mrq->data->error = 0; mrq->data->mrq = mrq; if (mrq->stop) { mrq->data->stop = mrq->stop; mrq->stop->error = 0; mrq->stop->mrq = mrq; } #ifdef CONFIG_MMC_PERF_PROFILING host->perf.start = ktime_get(); #endif } host->ops->request(host, mrq); } static void mmc_wait_done(struct mmc_request *mrq) { complete(mrq->done_data); } /** * mmc_wait_for_req - start a request and wait for completion * @host: MMC host to start command * @mrq: MMC request to start * * Start a new MMC custom command request for a host, and wait * for the command to complete. Does not attempt to parse the * response. */ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) { DECLARE_COMPLETION_ONSTACK(complete); mrq->done_data = &complete; mrq->done = mmc_wait_done; mmc_start_request(host, mrq); wait_for_completion(&complete); } EXPORT_SYMBOL(mmc_wait_for_req); /** * mmc_wait_for_cmd - start a command and wait for completion * @host: MMC host to start command * @cmd: MMC command to start * @retries: maximum number of retries * * Start a new MMC command for a host, and wait for the command * to complete. Return any error that occurred while the command * was executing. Do not attempt to parse the response. */ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) { struct mmc_request mrq; WARN_ON(!host->claimed); memset(&mrq, 0, sizeof(struct mmc_request)); memset(cmd->resp, 0, sizeof(cmd->resp)); cmd->retries = retries; mrq.cmd = cmd; cmd->data = NULL; mmc_wait_for_req(host, &mrq); return cmd->error; } EXPORT_SYMBOL(mmc_wait_for_cmd); /** * mmc_set_data_timeout - set the timeout for a data command * @data: data phase for command * @card: the MMC card associated with the data transfer * * Computes the data timeout parameters according to the * correct algorithm given the card type. */ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) { unsigned int mult; /* * SDIO cards only define an upper 1 s limit on access. */ if (mmc_card_sdio(card)) { data->timeout_ns = 1000000000; data->timeout_clks = 0; return; } /* * SD cards use a 100 multiplier rather than 10 */ mult = mmc_card_sd(card) ? 100 : 10; /* * Scale up the multiplier (and therefore the timeout) by * the r2w factor for writes. */ if (data->flags & MMC_DATA_WRITE) mult <<= card->csd.r2w_factor; data->timeout_ns = card->csd.tacc_ns * mult; data->timeout_clks = card->csd.tacc_clks * mult; /* * SD cards also have an upper limit on the timeout. */ if (mmc_card_sd(card)) { unsigned int timeout_us, limit_us; timeout_us = data->timeout_ns / 1000; timeout_us += data->timeout_clks * 1000 / (card->host->ios.clock / 1000); if (data->flags & MMC_DATA_WRITE) /* * The limit is really 250 ms, but that is * insufficient for some crappy cards. */ limit_us = 300000; else limit_us = 100000; /* * SDHC cards always use these fixed values. */ if (timeout_us > limit_us || mmc_card_blockaddr(card)) { data->timeout_ns = limit_us * 1000; data->timeout_clks = 0; } } /* * Some cards need very high timeouts if driven in SPI mode. * The worst observed timeout was 900ms after writing a * continuous stream of data until the internal logic * overflowed. */ if (mmc_host_is_spi(card->host)) { if (data->flags & MMC_DATA_WRITE) { if (data->timeout_ns < 1000000000) data->timeout_ns = 1000000000; /* 1s */ } else { if (data->timeout_ns < 100000000) data->timeout_ns = 100000000; /* 100ms */ } } } EXPORT_SYMBOL(mmc_set_data_timeout); /** * mmc_align_data_size - pads a transfer size to a more optimal value * @card: the MMC card associated with the data transfer * @sz: original transfer size * * Pads the original data size with a number of extra bytes in * order to avoid controller bugs and/or performance hits * (e.g. some controllers revert to PIO for certain sizes). * * Returns the improved size, which might be unmodified. * * Note that this function is only relevant when issuing a * single scatter gather entry. */ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) { /* * FIXME: We don't have a system for the controller to tell * the core about its problems yet, so for now we just 32-bit * align the size. */ sz = ((sz + 3) / 4) * 4; return sz; } EXPORT_SYMBOL(mmc_align_data_size); /** * mmc_host_enable - enable a host. * @host: mmc host to enable * * Hosts that support power saving can use the 'enable' and 'disable' * methods to exit and enter power saving states. For more information * see comments for struct mmc_host_ops. */ int mmc_host_enable(struct mmc_host *host) { if (!(host->caps & MMC_CAP_DISABLE)) return 0; if (host->en_dis_recurs) return 0; if (host->nesting_cnt++) return 0; cancel_delayed_work_sync(&host->disable); if (host->enabled) return 0; if (host->ops->enable) { int err; host->en_dis_recurs = 1; err = host->ops->enable(host); host->en_dis_recurs = 0; if (err) { pr_debug("%s: enable error %d\n", mmc_hostname(host), err); return err; } } host->enabled = 1; return 0; } EXPORT_SYMBOL(mmc_host_enable); static int mmc_host_do_disable(struct mmc_host *host, int lazy) { if (host->ops->disable) { int err; host->en_dis_recurs = 1; err = host->ops->disable(host, lazy); host->en_dis_recurs = 0; if (err < 0) { pr_debug("%s: disable error %d\n", mmc_hostname(host), err); return err; } if (err > 0) { unsigned long delay = msecs_to_jiffies(err); mmc_schedule_delayed_work(&host->disable, delay); } } host->enabled = 0; return 0; } /** * mmc_host_disable - disable a host. * @host: mmc host to disable * * Hosts that support power saving can use the 'enable' and 'disable' * methods to exit and enter power saving states. For more information * see comments for struct mmc_host_ops. */ int mmc_host_disable(struct mmc_host *host) { int err; if (!(host->caps & MMC_CAP_DISABLE)) return 0; if (host->en_dis_recurs) return 0; if (--host->nesting_cnt) return 0; if (!host->enabled) return 0; err = mmc_host_do_disable(host, 0); return err; } EXPORT_SYMBOL(mmc_host_disable); /** * __mmc_claim_host - exclusively claim a host * @host: mmc host to claim * @abort: whether or not the operation should be aborted * * Claim a host for a set of operations. If @abort is non null and * dereference a non-zero value then this will return prematurely with * that non-zero value without acquiring the lock. Returns zero * with the lock held otherwise. */ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) { DECLARE_WAITQUEUE(wait, current); unsigned long flags; int stop; might_sleep(); add_wait_queue(&host->wq, &wait); #ifdef CONFIG_PM_RUNTIME while (mmc_dev(host)->power.runtime_status == RPM_SUSPENDING) { if (host->suspend_task == current) break; msleep(15); } #endif spin_lock_irqsave(&host->lock, flags); while (1) { set_current_state(TASK_UNINTERRUPTIBLE); stop = abort ? atomic_read(abort) : 0; if (stop || !host->claimed || host->claimer == current) break; spin_unlock_irqrestore(&host->lock, flags); schedule(); spin_lock_irqsave(&host->lock, flags); } set_current_state(TASK_RUNNING); if (!stop) { host->claimed = 1; host->claimer = current; host->claim_cnt += 1; } else wake_up(&host->wq); spin_unlock_irqrestore(&host->lock, flags); remove_wait_queue(&host->wq, &wait); if (!stop) mmc_host_enable(host); return stop; } EXPORT_SYMBOL(__mmc_claim_host); /** * mmc_try_claim_host - try exclusively to claim a host * @host: mmc host to claim * * Returns %1 if the host is claimed, %0 otherwise. */ int mmc_try_claim_host(struct mmc_host *host) { int claimed_host = 0; unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (!host->claimed || host->claimer == current) { host->claimed = 1; host->claimer = current; host->claim_cnt += 1; claimed_host = 1; } spin_unlock_irqrestore(&host->lock, flags); return claimed_host; } EXPORT_SYMBOL(mmc_try_claim_host); static void mmc_do_release_host(struct mmc_host *host) { unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (--host->claim_cnt) { /* Release for nested claim */ spin_unlock_irqrestore(&host->lock, flags); } else { host->claimed = 0; host->claimer = NULL; spin_unlock_irqrestore(&host->lock, flags); wake_up(&host->wq); } } void mmc_host_deeper_disable(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, disable.work); /* If the host is claimed then we do not want to disable it anymore */ if (!mmc_try_claim_host(host)) goto out; mmc_host_do_disable(host, 1); mmc_do_release_host(host); out: wake_unlock(&mmc_delayed_work_wake_lock); } /** * mmc_host_lazy_disable - lazily disable a host. * @host: mmc host to disable * * Hosts that support power saving can use the 'enable' and 'disable' * methods to exit and enter power saving states. For more information * see comments for struct mmc_host_ops. */ int mmc_host_lazy_disable(struct mmc_host *host) { if (!(host->caps & MMC_CAP_DISABLE)) return 0; if (host->en_dis_recurs) return 0; if (--host->nesting_cnt) return 0; if (!host->enabled) return 0; if (host->disable_delay) { mmc_schedule_delayed_work(&host->disable, msecs_to_jiffies(host->disable_delay)); return 0; } else return mmc_host_do_disable(host, 1); } EXPORT_SYMBOL(mmc_host_lazy_disable); /** * mmc_release_host - release a host * @host: mmc host to release * * Release a MMC host, allowing others to claim the host * for their operations. */ void mmc_release_host(struct mmc_host *host) { WARN_ON(!host->claimed); mmc_host_lazy_disable(host); mmc_do_release_host(host); } EXPORT_SYMBOL(mmc_release_host); /* * Internal function that does the actual ios call to the host driver, * optionally printing some debug output. */ static inline void mmc_set_ios(struct mmc_host *host) { struct mmc_ios *ios = &host->ios; pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u " "width %u timing %u\n", mmc_hostname(host), ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select, ios->vdd, ios->bus_width, ios->timing); host->ops->set_ios(host, ios); } /* * Control chip select pin on a host. */ void mmc_set_chip_select(struct mmc_host *host, int mode) { host->ios.chip_select = mode; mmc_set_ios(host); } /* * Sets the host clock to the highest possible frequency that * is below "hz". */ void mmc_set_clock(struct mmc_host *host, unsigned int hz) { WARN_ON(hz < host->f_min); if (hz > host->f_max) hz = host->f_max; host->ios.clock = hz; mmc_set_ios(host); } /* * Change the bus mode (open drain/push-pull) of a host. */ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) { host->ios.bus_mode = mode; mmc_set_ios(host); } /* * Change data bus width of a host. */ void mmc_set_bus_width(struct mmc_host *host, unsigned int width) { host->ios.bus_width = width; mmc_set_ios(host); } /** * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number * @vdd: voltage (mV) * @low_bits: prefer low bits in boundary cases * * This function returns the OCR bit number according to the provided @vdd * value. If conversion is not possible a negative errno value returned. * * Depending on the @low_bits flag the function prefers low or high OCR bits * on boundary voltages. For example, * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); * * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21). */ static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits) { const int max_bit = ilog2(MMC_VDD_35_36); int bit; if (vdd < 1650 || vdd > 3600) return -EINVAL; if (vdd >= 1650 && vdd <= 1950) return ilog2(MMC_VDD_165_195); if (low_bits) vdd -= 1; /* Base 2000 mV, step 100 mV, bit's base 8. */ bit = (vdd - 2000) / 100 + 8; if (bit > max_bit) return max_bit; return bit; } /** * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask * @vdd_min: minimum voltage value (mV) * @vdd_max: maximum voltage value (mV) * * This function returns the OCR mask bits according to the provided @vdd_min * and @vdd_max values. If conversion is not possible the function returns 0. * * Notes wrt boundary cases: * This function sets the OCR bits for all boundary voltages, for example * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | * MMC_VDD_34_35 mask. */ u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) { u32 mask = 0; if (vdd_max < vdd_min) return 0; /* Prefer high bits for the boundary vdd_max values. */ vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); if (vdd_max < 0) return 0; /* Prefer low bits for the boundary vdd_min values. */ vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); if (vdd_min < 0) return 0; /* Fill the mask, from max bit to min bit. */ while (vdd_max >= vdd_min) mask |= 1 << vdd_max--; return mask; } EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); #ifdef CONFIG_REGULATOR /** * mmc_regulator_get_ocrmask - return mask of supported voltages * @supply: regulator to use * * This returns either a negative errno, or a mask of voltages that * can be provided to MMC/SD/SDIO devices using the specified voltage * regulator. This would normally be called before registering the * MMC host adapter. */ int mmc_regulator_get_ocrmask(struct regulator *supply) { int result = 0; int count; int i; count = regulator_count_voltages(supply); if (count < 0) return count; for (i = 0; i < count; i++) { int vdd_uV; int vdd_mV; vdd_uV = regulator_list_voltage(supply, i); if (vdd_uV <= 0) continue; vdd_mV = vdd_uV / 1000; result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); } return result; } EXPORT_SYMBOL(mmc_regulator_get_ocrmask); /** * mmc_regulator_set_ocr - set regulator to match host->ios voltage * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) * @supply: regulator to use * * Returns zero on success, else negative errno. * * MMC host drivers may use this to enable or disable a regulator using * a particular supply voltage. This would normally be called from the * set_ios() method. */ int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit) { int result = 0; int min_uV, max_uV; int enabled; enabled = regulator_is_enabled(supply); if (enabled < 0) return enabled; if (vdd_bit) { int tmp; int voltage; /* REVISIT mmc_vddrange_to_ocrmask() may have set some * bits this regulator doesn't quite support ... don't * be too picky, most cards and regulators are OK with * a 0.1V range goof (it's a small error percentage). */ tmp = vdd_bit - ilog2(MMC_VDD_165_195); if (tmp == 0) { min_uV = 1650 * 1000; max_uV = 1950 * 1000; } else { min_uV = 1900 * 1000 + tmp * 100 * 1000; max_uV = min_uV + 100 * 1000; } /* avoid needless changes to this voltage; the regulator * might not allow this operation */ voltage = regulator_get_voltage(supply); if (voltage < 0) result = voltage; else if (voltage < min_uV || voltage > max_uV) result = regulator_set_voltage(supply, min_uV, max_uV); else result = 0; if (result == 0 && !enabled) result = regulator_enable(supply); } else if (enabled) { result = regulator_disable(supply); } return result; } EXPORT_SYMBOL(mmc_regulator_set_ocr); #endif /* * Mask off any voltages we don't support and select * the lowest voltage */ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) { int bit; ocr &= host->ocr_avail; bit = ffs(ocr); if (bit) { bit -= 1; ocr &= 3 << bit; host->ios.vdd = bit; mmc_set_ios(host); } else { pr_warning("%s: host doesn't support card's voltages\n", mmc_hostname(host)); ocr = 0; } return ocr; } /* * Select timing parameters for host. */ void mmc_set_timing(struct mmc_host *host, unsigned int timing) { host->ios.timing = timing; mmc_set_ios(host); } /* * Apply power to the MMC stack. This is a two-stage process. * First, we enable power to the card without the clock running. * We then wait a bit for the power to stabilise. Finally, * enable the bus drivers and clock to the card. * * We must _NOT_ enable the clock prior to power stablising. * * If a host does all the power sequencing itself, ignore the * initial MMC_POWER_UP stage. */ static void mmc_power_up(struct mmc_host *host) { int bit; /* If ocr is set, we use it */ if (host->ocr) bit = ffs(host->ocr) - 1; else bit = fls(host->ocr_avail) - 1; host->ios.vdd = bit; if (mmc_host_is_spi(host)) { host->ios.chip_select = MMC_CS_HIGH; host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; } else { host->ios.chip_select = MMC_CS_DONTCARE; host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; } host->ios.power_mode = MMC_POWER_UP; host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); /* * This delay should be sufficient to allow the power supply * to reach the minimum voltage. */ mmc_delay(10); host->ios.clock = host->f_min; host->ios.power_mode = MMC_POWER_ON; mmc_set_ios(host); /* * This delay must be at least 74 clock sizes, or 1 ms, or the * time required to reach a stable voltage. */ mmc_delay(10); } static void mmc_power_off(struct mmc_host *host) { host->ios.clock = 0; host->ios.vdd = 0; if (!mmc_host_is_spi(host)) { host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.chip_select = MMC_CS_DONTCARE; } host->ios.power_mode = MMC_POWER_OFF; host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); } /* * Cleanup when the last reference to the bus operator is dropped. */ static void __mmc_release_bus(struct mmc_host *host) { BUG_ON(!host); BUG_ON(host->bus_refs); BUG_ON(!host->bus_dead); host->bus_ops = NULL; } /* * Increase reference count of bus operator */ static inline void mmc_bus_get(struct mmc_host *host) { unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->bus_refs++; spin_unlock_irqrestore(&host->lock, flags); } /* * Decrease reference count of bus operator and free it if * it is the last reference. */ static inline void mmc_bus_put(struct mmc_host *host) { unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->bus_refs--; if ((host->bus_refs == 0) && host->bus_ops) __mmc_release_bus(host); spin_unlock_irqrestore(&host->lock, flags); } int mmc_resume_bus(struct mmc_host *host) { unsigned long flags; if (!mmc_bus_needs_resume(host)) return -EINVAL; printk("%s: Starting deferred resume\n", mmc_hostname(host)); spin_lock_irqsave(&host->lock, flags); host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME; spin_unlock_irqrestore(&host->lock, flags); mmc_bus_get(host); if (host->bus_ops && !host->bus_dead) { mmc_power_up(host); BUG_ON(!host->bus_ops->resume); host->bus_ops->resume(host); } if (host->bus_ops && host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); mmc_bus_put(host); printk("%s: Deferred resume completed\n", mmc_hostname(host)); return 0; } EXPORT_SYMBOL(mmc_resume_bus); /* * Assign a mmc bus handler to a host. Only one bus handler may control a * host at any given time. */ void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops) { unsigned long flags; BUG_ON(!host); BUG_ON(!ops); WARN_ON(!host->claimed); spin_lock_irqsave(&host->lock, flags); BUG_ON(host->bus_ops); BUG_ON(host->bus_refs); host->bus_ops = ops; host->bus_refs = 1; host->bus_dead = 0; spin_unlock_irqrestore(&host->lock, flags); } /* * Remove the current bus handler from a host. Assumes that there are * no interesting cards left, so the bus is powered down. */ void mmc_detach_bus(struct mmc_host *host) { unsigned long flags; BUG_ON(!host); WARN_ON(!host->claimed); WARN_ON(!host->bus_ops); spin_lock_irqsave(&host->lock, flags); host->bus_dead = 1; spin_unlock_irqrestore(&host->lock, flags); mmc_power_off(host); mmc_bus_put(host); } /** * mmc_detect_change - process change of state on a MMC socket * @host: host which changed state. * @delay: optional delay to wait before detection (jiffies) * * MMC drivers should call this when they detect a card has been * inserted or removed. The MMC layer will confirm that any * present card is still functional, and initialize any newly * inserted. */ void mmc_detect_change(struct mmc_host *host, unsigned long delay) { #ifdef CONFIG_MMC_DEBUG unsigned long flags; spin_lock_irqsave(&host->lock, flags); WARN_ON(host->removed); spin_unlock_irqrestore(&host->lock, flags); #endif mmc_schedule_delayed_work(&host->detect, delay); } EXPORT_SYMBOL(mmc_detect_change); void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; int extend_wakelock = 0; mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) { host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; } mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); } void mmc_start_host(struct mmc_host *host) { mmc_power_off(host); mmc_detect_change(host, 0); } void mmc_stop_host(struct mmc_host *host) { #ifdef CONFIG_MMC_DEBUG unsigned long flags; spin_lock_irqsave(&host->lock, flags); host->removed = 1; spin_unlock_irqrestore(&host->lock, flags); #endif if (host->caps & MMC_CAP_DISABLE) cancel_delayed_work(&host->disable); cancel_delayed_work(&host->detect); mmc_flush_scheduled_work(); /* clear pm flags now and let card drivers set them as needed */ host->pm_flags = 0; mmc_bus_get(host); if (host->bus_ops && !host->bus_dead) { if (host->bus_ops->remove) host->bus_ops->remove(host); mmc_claim_host(host); mmc_detach_bus(host); mmc_release_host(host); mmc_bus_put(host); return; } mmc_bus_put(host); BUG_ON(host->card); mmc_power_off(host); } int mmc_power_save_host(struct mmc_host *host) { int ret = 0; mmc_bus_get(host); if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { mmc_bus_put(host); return -EINVAL; } if (host->bus_ops->power_save) ret = host->bus_ops->power_save(host); mmc_bus_put(host); mmc_power_off(host); return ret; } EXPORT_SYMBOL(mmc_power_save_host); int mmc_power_restore_host(struct mmc_host *host) { int ret; mmc_bus_get(host); if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { mmc_bus_put(host); return -EINVAL; } mmc_power_up(host); ret = host->bus_ops->power_restore(host); mmc_bus_put(host); return ret; } EXPORT_SYMBOL(mmc_power_restore_host); int mmc_card_awake(struct mmc_host *host) { int err = -ENOSYS; mmc_bus_get(host); if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) err = host->bus_ops->awake(host); mmc_bus_put(host); return err; } EXPORT_SYMBOL(mmc_card_awake); int mmc_card_sleep(struct mmc_host *host) { int err = -ENOSYS; mmc_bus_get(host); if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) err = host->bus_ops->sleep(host); mmc_bus_put(host); return err; } EXPORT_SYMBOL(mmc_card_sleep); int mmc_card_can_sleep(struct mmc_host *host) { struct mmc_card *card = host->card; if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) return 1; return 0; } EXPORT_SYMBOL(mmc_card_can_sleep); #ifdef CONFIG_PM /** * mmc_suspend_host - suspend a host * @host: mmc host * @state: suspend mode (PM_SUSPEND_xxx) */ int mmc_suspend_host(struct mmc_host *host, pm_message_t state) { int err = 0; if (mmc_bus_needs_resume(host)) return 0; if (host->caps & MMC_CAP_DISABLE) cancel_delayed_work(&host->disable); mmc_bus_get(host); if (host->bus_ops && !host->bus_dead) { if (host->bus_ops->suspend) err = host->bus_ops->suspend(host); } mmc_bus_put(host); if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER)) mmc_power_off(host); return err; } EXPORT_SYMBOL(mmc_suspend_host); /** * mmc_resume_host - resume a previously suspended host * @host: mmc host */ int mmc_resume_host(struct mmc_host *host) { int err = 0; mmc_bus_get(host); if (host->bus_resume_flags & MMC_BUSRESUME_MANUAL_RESUME) { host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME; mmc_bus_put(host); return 0; } if (host->bus_ops && !host->bus_dead) { if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { mmc_power_up(host); mmc_select_voltage(host, host->ocr); } BUG_ON(!host->bus_ops->resume); err = host->bus_ops->resume(host); if (err) { printk(KERN_WARNING "%s: error %d during resume " "(card was removed?)\n", mmc_hostname(host), err); err = 0; } } mmc_bus_put(host); /* * We add a slight delay here so that resume can progress * in parallel. */ mmc_detect_change(host, 1); return err; } EXPORT_SYMBOL(mmc_resume_host); /* Do the card removal on suspend if card is assumed removeable * Do that in pm notifier while userspace isn't yet frozen, so we will be able * to sync the card. */ int mmc_pm_notify(struct notifier_block *notify_block, unsigned long mode, void *unused) { struct mmc_host *host = container_of( notify_block, struct mmc_host, pm_notify); switch (mode) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: if (!host->bus_ops || host->bus_ops->suspend) break; if (host->bus_ops->remove) host->bus_ops->remove(host); mmc_claim_host(host); mmc_detach_bus(host); mmc_release_host(host); host->pm_flags = 0; break; } return 0; } #endif #ifdef CONFIG_MMC_EMBEDDED_SDIO void mmc_set_embedded_sdio_data(struct mmc_host *host, struct sdio_cis *cis, struct sdio_cccr *cccr, struct sdio_embedded_func *funcs, int num_funcs) { host->embedded_sdio_data.cis = cis; host->embedded_sdio_data.cccr = cccr; host->embedded_sdio_data.funcs = funcs; host->embedded_sdio_data.num_funcs = num_funcs; } EXPORT_SYMBOL(mmc_set_embedded_sdio_data); #endif static int __init mmc_init(void) { int ret; wake_lock_init(&mmc_delayed_work_wake_lock, WAKE_LOCK_SUSPEND, "mmc_delayed_work"); workqueue = create_freezeable_workqueue("kmmcd"); if (!workqueue) return -ENOMEM; ret = mmc_register_bus(); if (ret) goto destroy_workqueue; ret = mmc_register_host_class(); if (ret) goto unregister_bus; ret = sdio_register_bus(); if (ret) goto unregister_host_class; return 0; unregister_host_class: mmc_unregister_host_class(); unregister_bus: mmc_unregister_bus(); destroy_workqueue: destroy_workqueue(workqueue); return ret; } static void __exit mmc_exit(void) { sdio_unregister_bus(); mmc_unregister_host_class(); mmc_unregister_bus(); destroy_workqueue(workqueue); wake_lock_destroy(&mmc_delayed_work_wake_lock); } subsys_initcall(mmc_init); module_exit(mmc_exit); MODULE_LICENSE("GPL");
gpl-2.0
sloanyang/raspberrry2v8
deps/libcxx/test/utilities/time/time.traits/time.traits.duration_values/max.pass.cpp
36
1175
//===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // <chrono> // duration_values::max #include <chrono> #include <limits> #include <cassert> #include "../../rep.h" int main() { assert(std::chrono::duration_values<int>::max() == std::numeric_limits<int>::max()); assert(std::chrono::duration_values<double>::max() == std::numeric_limits<double>::max()); assert(std::chrono::duration_values<Rep>::max() == std::numeric_limits<Rep>::max()); #ifndef _LIBCPP_HAS_NO_CONSTEXPR static_assert(std::chrono::duration_values<int>::max() == std::numeric_limits<int>::max(), ""); static_assert(std::chrono::duration_values<double>::max() == std::numeric_limits<double>::max(), ""); static_assert(std::chrono::duration_values<Rep>::max() == std::numeric_limits<Rep>::max(), ""); #endif }
gpl-2.0
schqiushui/kernel_kk444_sense_a31
fs/notify/fanotify/fanotify_user.c
292
21759
#include <linux/fanotify.h> #include <linux/fcntl.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/fsnotify_backend.h> #include <linux/init.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/poll.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/compat.h> #include <asm/ioctls.h> #include "../../mount.h" #include "../fdinfo.h" #define FANOTIFY_DEFAULT_MAX_EVENTS 16384 #define FANOTIFY_DEFAULT_MAX_MARKS 8192 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128 extern const struct fsnotify_ops fanotify_fsnotify_ops; static struct kmem_cache *fanotify_mark_cache __read_mostly; static struct kmem_cache *fanotify_response_event_cache __read_mostly; struct fanotify_response_event { struct list_head list; __s32 fd; struct fsnotify_event *event; }; /* * Get an fsnotify notification event if one exists and is small * enough to fit in "count". Return an error pointer if the count * is not large enough. * * Called with the group->notification_mutex held. */ static struct fsnotify_event *get_one_event(struct fsnotify_group *group, size_t count) { BUG_ON(!mutex_is_locked(&group->notification_mutex)); pr_debug("%s: group=%p count=%zd\n", __func__, group, count); if (fsnotify_notify_queue_is_empty(group)) return NULL; if (FAN_EVENT_METADATA_LEN > count) return ERR_PTR(-EINVAL); /* held the notification_mutex the whole time, so this is the * same event we peeked above */ return fsnotify_remove_notify_event(group); } static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event, struct file **file) { int client_fd; struct file *new_file; pr_debug("%s: group=%p event=%p\n", __func__, group, event); client_fd = get_unused_fd(); if (client_fd < 0) return client_fd; if (event->data_type != FSNOTIFY_EVENT_PATH) { WARN_ON(1); put_unused_fd(client_fd); return -EINVAL; } /* * we need a new file handle for the userspace program so it can read even if it was * originally opened O_WRONLY. */ /* it's possible this event was an overflow event. in that case dentry and mnt * are NULL; That's fine, just don't call dentry open */ if (event->path.dentry && event->path.mnt) new_file = dentry_open(&event->path, group->fanotify_data.f_flags | FMODE_NONOTIFY, current_cred()); else new_file = ERR_PTR(-EOVERFLOW); if (IS_ERR(new_file)) { /* * we still send an event even if we can't open the file. this * can happen when say tasks are gone and we try to open their * /proc files or we try to open a WRONLY file like in sysfs * we just send the errno to userspace since there isn't much * else we can do. */ put_unused_fd(client_fd); client_fd = PTR_ERR(new_file); } else { *file = new_file; } return client_fd; } static int fill_event_metadata(struct fsnotify_group *group, struct fanotify_event_metadata *metadata, struct fsnotify_event *event, struct file **file) { int ret = 0; pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, group, metadata, event); *file = NULL; metadata->event_len = FAN_EVENT_METADATA_LEN; metadata->metadata_len = FAN_EVENT_METADATA_LEN; metadata->vers = FANOTIFY_METADATA_VERSION; metadata->reserved = 0; metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; metadata->pid = pid_vnr(event->tgid); if (unlikely(event->mask & FAN_Q_OVERFLOW)) metadata->fd = FAN_NOFD; else { metadata->fd = create_fd(group, event, file); if (metadata->fd < 0) ret = metadata->fd; } return ret; } #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group, __s32 fd) { struct fanotify_response_event *re, *return_re = NULL; mutex_lock(&group->fanotify_data.access_mutex); list_for_each_entry(re, &group->fanotify_data.access_list, list) { if (re->fd != fd) continue; list_del_init(&re->list); return_re = re; break; } mutex_unlock(&group->fanotify_data.access_mutex); pr_debug("%s: found return_re=%p\n", __func__, return_re); return return_re; } static int process_access_response(struct fsnotify_group *group, struct fanotify_response *response_struct) { struct fanotify_response_event *re; __s32 fd = response_struct->fd; __u32 response = response_struct->response; pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, fd, response); /* * make sure the response is valid, if invalid we do nothing and either * userspace can send a valid response or we will clean it up after the * timeout */ switch (response) { case FAN_ALLOW: case FAN_DENY: break; default: return -EINVAL; } if (fd < 0) return -EINVAL; re = dequeue_re(group, fd); if (!re) return -ENOENT; re->event->response = response; wake_up(&group->fanotify_data.access_waitq); kmem_cache_free(fanotify_response_event_cache, re); return 0; } static int prepare_for_access_response(struct fsnotify_group *group, struct fsnotify_event *event, __s32 fd) { struct fanotify_response_event *re; if (!(event->mask & FAN_ALL_PERM_EVENTS)) return 0; re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL); if (!re) return -ENOMEM; re->event = event; re->fd = fd; mutex_lock(&group->fanotify_data.access_mutex); if (atomic_read(&group->fanotify_data.bypass_perm)) { mutex_unlock(&group->fanotify_data.access_mutex); kmem_cache_free(fanotify_response_event_cache, re); event->response = FAN_ALLOW; return 0; } list_add_tail(&re->list, &group->fanotify_data.access_list); mutex_unlock(&group->fanotify_data.access_mutex); return 0; } #else static int prepare_for_access_response(struct fsnotify_group *group, struct fsnotify_event *event, __s32 fd) { return 0; } #endif static ssize_t copy_event_to_user(struct fsnotify_group *group, struct fsnotify_event *event, char __user *buf) { struct fanotify_event_metadata fanotify_event_metadata; struct file *f; int fd, ret; pr_debug("%s: group=%p event=%p\n", __func__, group, event); ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f); if (ret < 0) goto out; fd = fanotify_event_metadata.fd; ret = -EFAULT; if (copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len)) goto out_close_fd; ret = prepare_for_access_response(group, event, fd); if (ret) goto out_close_fd; if (fd != FAN_NOFD) fd_install(fd, f); return fanotify_event_metadata.event_len; out_close_fd: if (fd != FAN_NOFD) { put_unused_fd(fd); fput(f); } out: #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS if (event->mask & FAN_ALL_PERM_EVENTS) { event->response = FAN_DENY; wake_up(&group->fanotify_data.access_waitq); } #endif return ret; } /* intofiy userspace file descriptor functions */ static unsigned int fanotify_poll(struct file *file, poll_table *wait) { struct fsnotify_group *group = file->private_data; int ret = 0; poll_wait(file, &group->notification_waitq, wait); mutex_lock(&group->notification_mutex); if (!fsnotify_notify_queue_is_empty(group)) ret = POLLIN | POLLRDNORM; mutex_unlock(&group->notification_mutex); return ret; } static ssize_t fanotify_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct fsnotify_group *group; struct fsnotify_event *kevent; char __user *start; int ret; DEFINE_WAIT(wait); start = buf; group = file->private_data; pr_debug("%s: group=%p\n", __func__, group); while (1) { prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE); mutex_lock(&group->notification_mutex); kevent = get_one_event(group, count); mutex_unlock(&group->notification_mutex); if (kevent) { ret = PTR_ERR(kevent); if (IS_ERR(kevent)) break; ret = copy_event_to_user(group, kevent, buf); fsnotify_put_event(kevent); if (ret < 0) break; buf += ret; count -= ret; continue; } ret = -EAGAIN; if (file->f_flags & O_NONBLOCK) break; ret = -ERESTARTSYS; if (signal_pending(current)) break; if (start != buf) break; schedule(); } finish_wait(&group->notification_waitq, &wait); if (start != buf && ret != -EFAULT) ret = buf - start; return ret; } static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS struct fanotify_response response = { .fd = -1, .response = -1 }; struct fsnotify_group *group; int ret; group = file->private_data; if (count > sizeof(response)) count = sizeof(response); pr_debug("%s: group=%p count=%zu\n", __func__, group, count); if (copy_from_user(&response, buf, count)) return -EFAULT; ret = process_access_response(group, &response); if (ret < 0) count = ret; return count; #else return -EINVAL; #endif } static int fanotify_release(struct inode *ignored, struct file *file) { struct fsnotify_group *group = file->private_data; #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS struct fanotify_response_event *re, *lre; mutex_lock(&group->fanotify_data.access_mutex); atomic_inc(&group->fanotify_data.bypass_perm); list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) { pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group, re, re->event); list_del_init(&re->list); re->event->response = FAN_ALLOW; kmem_cache_free(fanotify_response_event_cache, re); } mutex_unlock(&group->fanotify_data.access_mutex); wake_up(&group->fanotify_data.access_waitq); #endif if (file->f_flags & FASYNC) fsnotify_fasync(-1, file, 0); /* matches the fanotify_init->fsnotify_alloc_group */ fsnotify_destroy_group(group); return 0; } static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fsnotify_group *group; struct fsnotify_event_holder *holder; void __user *p; int ret = -ENOTTY; size_t send_len = 0; group = file->private_data; p = (void __user *) arg; switch (cmd) { case FIONREAD: mutex_lock(&group->notification_mutex); list_for_each_entry(holder, &group->notification_list, event_list) send_len += FAN_EVENT_METADATA_LEN; mutex_unlock(&group->notification_mutex); ret = put_user(send_len, (int __user *) p); break; } return ret; } static const struct file_operations fanotify_fops = { .show_fdinfo = fanotify_show_fdinfo, .poll = fanotify_poll, .read = fanotify_read, .write = fanotify_write, .fasync = NULL, .release = fanotify_release, .unlocked_ioctl = fanotify_ioctl, .compat_ioctl = fanotify_ioctl, .llseek = noop_llseek, }; static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) { kmem_cache_free(fanotify_mark_cache, fsn_mark); } static int fanotify_find_path(int dfd, const char __user *filename, struct path *path, unsigned int flags) { int ret; pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__, dfd, filename, flags); if (filename == NULL) { struct fd f = fdget(dfd); ret = -EBADF; if (!f.file) goto out; ret = -ENOTDIR; if ((flags & FAN_MARK_ONLYDIR) && !(S_ISDIR(file_inode(f.file)->i_mode))) { fdput(f); goto out; } *path = f.file->f_path; path_get(path); fdput(f); } else { unsigned int lookup_flags = 0; if (!(flags & FAN_MARK_DONT_FOLLOW)) lookup_flags |= LOOKUP_FOLLOW; if (flags & FAN_MARK_ONLYDIR) lookup_flags |= LOOKUP_DIRECTORY; ret = user_path_at(dfd, filename, lookup_flags, path); if (ret) goto out; } /* you can only watch an inode if you have read permissions on it */ ret = inode_permission(path->dentry->d_inode, MAY_READ); if (ret) path_put(path); out: return ret; } static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, __u32 mask, unsigned int flags, int *destroy) { __u32 oldmask; spin_lock(&fsn_mark->lock); if (!(flags & FAN_MARK_IGNORED_MASK)) { oldmask = fsn_mark->mask; fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask)); } else { oldmask = fsn_mark->ignored_mask; fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask)); } spin_unlock(&fsn_mark->lock); *destroy = !(oldmask & ~mask); return mask & oldmask; } static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt, __u32 mask, unsigned int flags) { struct fsnotify_mark *fsn_mark = NULL; __u32 removed; int destroy_mark; fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); if (!fsn_mark) return -ENOENT; removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, &destroy_mark); if (destroy_mark) fsnotify_destroy_mark(fsn_mark, group); fsnotify_put_mark(fsn_mark); if (removed & real_mount(mnt)->mnt_fsnotify_mask) fsnotify_recalc_vfsmount_mask(mnt); return 0; } static int fanotify_remove_inode_mark(struct fsnotify_group *group, struct inode *inode, __u32 mask, unsigned int flags) { struct fsnotify_mark *fsn_mark = NULL; __u32 removed; int destroy_mark; fsn_mark = fsnotify_find_inode_mark(group, inode); if (!fsn_mark) return -ENOENT; removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, &destroy_mark); if (destroy_mark) fsnotify_destroy_mark(fsn_mark, group); /* matches the fsnotify_find_inode_mark() */ fsnotify_put_mark(fsn_mark); if (removed & inode->i_fsnotify_mask) fsnotify_recalc_inode_mask(inode); return 0; } static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, __u32 mask, unsigned int flags) { __u32 oldmask = -1; spin_lock(&fsn_mark->lock); if (!(flags & FAN_MARK_IGNORED_MASK)) { oldmask = fsn_mark->mask; fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask)); } else { __u32 tmask = fsn_mark->ignored_mask | mask; fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask); if (flags & FAN_MARK_IGNORED_SURV_MODIFY) fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; } if (!(flags & FAN_MARK_ONDIR)) { __u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR; fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask); } spin_unlock(&fsn_mark->lock); return mask & ~oldmask; } static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt, __u32 mask, unsigned int flags) { struct fsnotify_mark *fsn_mark; __u32 added; int ret = 0; fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); if (!fsn_mark) { if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) return -ENOSPC; fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); if (!fsn_mark) return -ENOMEM; fsnotify_init_mark(fsn_mark, fanotify_free_mark); ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0); if (ret) goto err; } added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); if (added & ~real_mount(mnt)->mnt_fsnotify_mask) fsnotify_recalc_vfsmount_mask(mnt); err: fsnotify_put_mark(fsn_mark); return ret; } static int fanotify_add_inode_mark(struct fsnotify_group *group, struct inode *inode, __u32 mask, unsigned int flags) { struct fsnotify_mark *fsn_mark; __u32 added; int ret = 0; pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); /* * If some other task has this inode open for write we should not add * an ignored mark, unless that ignored mark is supposed to survive * modification changes anyway. */ if ((flags & FAN_MARK_IGNORED_MASK) && !(flags & FAN_MARK_IGNORED_SURV_MODIFY) && (atomic_read(&inode->i_writecount) > 0)) return 0; fsn_mark = fsnotify_find_inode_mark(group, inode); if (!fsn_mark) { if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) return -ENOSPC; fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); if (!fsn_mark) return -ENOMEM; fsnotify_init_mark(fsn_mark, fanotify_free_mark); ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0); if (ret) goto err; } added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); if (added & ~inode->i_fsnotify_mask) fsnotify_recalc_inode_mask(inode); err: fsnotify_put_mark(fsn_mark); return ret; } /* fanotify syscalls */ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) { struct fsnotify_group *group; int f_flags, fd; struct user_struct *user; pr_debug("%s: flags=%d event_f_flags=%d\n", __func__, flags, event_f_flags); if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (flags & ~FAN_ALL_INIT_FLAGS) return -EINVAL; user = get_current_user(); if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) { free_uid(user); return -EMFILE; } f_flags = O_RDWR | FMODE_NONOTIFY; if (flags & FAN_CLOEXEC) f_flags |= O_CLOEXEC; if (flags & FAN_NONBLOCK) f_flags |= O_NONBLOCK; /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ group = fsnotify_alloc_group(&fanotify_fsnotify_ops); if (IS_ERR(group)) { free_uid(user); return PTR_ERR(group); } group->fanotify_data.user = user; atomic_inc(&user->fanotify_listeners); group->fanotify_data.f_flags = event_f_flags; #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS mutex_init(&group->fanotify_data.access_mutex); init_waitqueue_head(&group->fanotify_data.access_waitq); INIT_LIST_HEAD(&group->fanotify_data.access_list); atomic_set(&group->fanotify_data.bypass_perm, 0); #endif switch (flags & FAN_ALL_CLASS_BITS) { case FAN_CLASS_NOTIF: group->priority = FS_PRIO_0; break; case FAN_CLASS_CONTENT: group->priority = FS_PRIO_1; break; case FAN_CLASS_PRE_CONTENT: group->priority = FS_PRIO_2; break; default: fd = -EINVAL; goto out_destroy_group; } if (flags & FAN_UNLIMITED_QUEUE) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out_destroy_group; group->max_events = UINT_MAX; } else { group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS; } if (flags & FAN_UNLIMITED_MARKS) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out_destroy_group; group->fanotify_data.max_marks = UINT_MAX; } else { group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS; } fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); if (fd < 0) goto out_destroy_group; return fd; out_destroy_group: fsnotify_destroy_group(group); return fd; } SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags, __u64, mask, int, dfd, const char __user *, pathname) { struct inode *inode = NULL; struct vfsmount *mnt = NULL; struct fsnotify_group *group; struct fd f; struct path path; int ret; pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", __func__, fanotify_fd, flags, dfd, pathname, mask); /* we only use the lower 32 bits as of right now. */ if (mask & ((__u64)0xffffffff << 32)) return -EINVAL; if (flags & ~FAN_ALL_MARK_FLAGS) return -EINVAL; switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { case FAN_MARK_ADD: /* fallthrough */ case FAN_MARK_REMOVE: if (!mask) return -EINVAL; case FAN_MARK_FLUSH: break; default: return -EINVAL; } if (mask & FAN_ONDIR) { flags |= FAN_MARK_ONDIR; mask &= ~FAN_ONDIR; } #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD)) #else if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD)) #endif return -EINVAL; f = fdget(fanotify_fd); if (unlikely(!f.file)) return -EBADF; /* verify that this is indeed an fanotify instance */ ret = -EINVAL; if (unlikely(f.file->f_op != &fanotify_fops)) goto fput_and_out; group = f.file->private_data; /* * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not * allowed to set permissions events. */ ret = -EINVAL; if (mask & FAN_ALL_PERM_EVENTS && group->priority == FS_PRIO_0) goto fput_and_out; ret = fanotify_find_path(dfd, pathname, &path, flags); if (ret) goto fput_and_out; /* inode held in place by reference to path; group by fget on fd */ if (!(flags & FAN_MARK_MOUNT)) inode = path.dentry->d_inode; else mnt = path.mnt; /* create/update an inode mark */ switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { case FAN_MARK_ADD: if (flags & FAN_MARK_MOUNT) ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags); else ret = fanotify_add_inode_mark(group, inode, mask, flags); break; case FAN_MARK_REMOVE: if (flags & FAN_MARK_MOUNT) ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags); else ret = fanotify_remove_inode_mark(group, inode, mask, flags); break; case FAN_MARK_FLUSH: if (flags & FAN_MARK_MOUNT) fsnotify_clear_vfsmount_marks_by_group(group); else fsnotify_clear_inode_marks_by_group(group); break; default: ret = -EINVAL; } path_put(&path); fput_and_out: fdput(f); return ret; } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE6(fanotify_mark, int, fanotify_fd, unsigned int, flags, __u32, mask0, __u32, mask1, int, dfd, const char __user *, pathname) { return sys_fanotify_mark(fanotify_fd, flags, #ifdef __BIG_ENDIAN ((__u64)mask1 << 32) | mask0, #else ((__u64)mask0 << 32) | mask1, #endif dfd, pathname); } #endif /* * fanotify_user_setup - Our initialization function. Note that we cannot return * error because we have compiled-in VFS hooks. So an (unlikely) failure here * must result in panic(). */ static int __init fanotify_user_setup(void) { fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC); fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event, SLAB_PANIC); return 0; } device_initcall(fanotify_user_setup);
gpl-2.0
T-Macgnolia/android_kernel_lge_g4stylus-stock
arch/score/mm/fault.c
548
6096
/* * arch/score/mm/fault.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Lennox Wu <lennox.wu@sunplusct.com> * Chen Liqin <liqin.chen@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. */ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, unsigned long address) { struct vm_area_struct *vma = NULL; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; const int field = sizeof(unsigned long) * 2; unsigned long flags = 0; siginfo_t info; int fault; info.si_code = SEGV_MAPERR; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. */ if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) goto vmalloc_fault; #ifdef MODULE_START if (unlikely(address >= MODULE_START && address < MODULE_END)) goto vmalloc_fault; #endif /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (in_atomic() || !mm) goto bad_area_nosemaphore; if (user_mode(regs)) flags |= FAULT_FLAG_USER; down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: info.si_code = SEGV_ACCERR; if (write) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; flags |= FAULT_FLAG_WRITE; } else { if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) goto bad_area; } survive: /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { tsk->thread.cp0_badvaddr = address; tsk->thread.error_code = write; info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ info.si_addr = (void __user *) address; force_sig_info(SIGSEGV, &info, tsk); return; } no_context: /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) { current->thread.cp0_baduaddr = address; return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ bust_spinlocks(1); printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n", 0, field, address, field, regs->cp0_epc, field, regs->regs[3]); die("Oops", regs); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); if (is_global_init(tsk)) { yield(); down_read(&mm->mmap_sem); goto survive; } printk("VM: killing process %s\n", tsk->comm); if (user_mode(regs)) do_group_exit(SIGKILL); goto no_context; do_sigbus: up_read(&mm->mmap_sem); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; else /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ tsk->thread.cp0_badvaddr = address; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void __user *) address; force_sig_info(SIGBUS, &info, tsk); return; vmalloc_fault: { /* * Synchronize this task's top level page-table * with the 'reference' page table. * * Do _not_ use "tsk" here. We might be inside * an interrupt in the middle of a task switch.. */ int offset = __pgd_offset(address); pgd_t *pgd, *pgd_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; pgd = (pgd_t *) pgd_current + offset; pgd_k = init_mm.pgd + offset; if (!pgd_present(*pgd_k)) goto no_context; set_pgd(pgd, *pgd_k); pud = pud_offset(pgd, address); pud_k = pud_offset(pgd_k, address); if (!pud_present(*pud_k)) goto no_context; pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) goto no_context; set_pmd(pmd, *pmd_k); pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) goto no_context; return; } }
gpl-2.0
triplekill/linux
drivers/input/misc/soc_button_array.c
804
5198
/* * Supports for the button array on SoC tablets originally running * Windows 8. * * (C) Copyright 2014 Intel Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #include <linux/module.h> #include <linux/input.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/acpi.h> #include <linux/gpio/consumer.h> #include <linux/gpio_keys.h> #include <linux/platform_device.h> /* * Definition of buttons on the tablet. The ACPI index of each button * is defined in section 2.8.7.2 of "Windows ACPI Design Guide for SoC * Platforms" */ #define MAX_NBUTTONS 5 struct soc_button_info { const char *name; int acpi_index; unsigned int event_type; unsigned int event_code; bool autorepeat; bool wakeup; }; /* * Some of the buttons like volume up/down are auto repeat, while others * are not. To support both, we register two platform devices, and put * buttons into them based on whether the key should be auto repeat. */ #define BUTTON_TYPES 2 struct soc_button_data { struct platform_device *children[BUTTON_TYPES]; }; /* * Get the Nth GPIO number from the ACPI object. */ static int soc_button_lookup_gpio(struct device *dev, int acpi_index) { struct gpio_desc *desc; int gpio; desc = gpiod_get_index(dev, KBUILD_MODNAME, acpi_index, GPIOD_ASIS); if (IS_ERR(desc)) return PTR_ERR(desc); gpio = desc_to_gpio(desc); gpiod_put(desc); return gpio; } static struct platform_device * soc_button_device_create(struct platform_device *pdev, const struct soc_button_info *button_info, bool autorepeat) { const struct soc_button_info *info; struct platform_device *pd; struct gpio_keys_button *gpio_keys; struct gpio_keys_platform_data *gpio_keys_pdata; int n_buttons = 0; int gpio; int error; gpio_keys_pdata = devm_kzalloc(&pdev->dev, sizeof(*gpio_keys_pdata) + sizeof(*gpio_keys) * MAX_NBUTTONS, GFP_KERNEL); if (!gpio_keys_pdata) return ERR_PTR(-ENOMEM); gpio_keys = (void *)(gpio_keys_pdata + 1); for (info = button_info; info->name; info++) { if (info->autorepeat != autorepeat) continue; gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index); if (gpio < 0) continue; gpio_keys[n_buttons].type = info->event_type; gpio_keys[n_buttons].code = info->event_code; gpio_keys[n_buttons].gpio = gpio; gpio_keys[n_buttons].active_low = 1; gpio_keys[n_buttons].desc = info->name; gpio_keys[n_buttons].wakeup = info->wakeup; n_buttons++; } if (n_buttons == 0) { error = -ENODEV; goto err_free_mem; } gpio_keys_pdata->buttons = gpio_keys; gpio_keys_pdata->nbuttons = n_buttons; gpio_keys_pdata->rep = autorepeat; pd = platform_device_alloc("gpio-keys", PLATFORM_DEVID_AUTO); if (!pd) { error = -ENOMEM; goto err_free_mem; } error = platform_device_add_data(pd, gpio_keys_pdata, sizeof(*gpio_keys_pdata)); if (error) goto err_free_pdev; error = platform_device_add(pd); if (error) goto err_free_pdev; return pd; err_free_pdev: platform_device_put(pd); err_free_mem: devm_kfree(&pdev->dev, gpio_keys_pdata); return ERR_PTR(error); } static int soc_button_remove(struct platform_device *pdev) { struct soc_button_data *priv = platform_get_drvdata(pdev); int i; for (i = 0; i < BUTTON_TYPES; i++) if (priv->children[i]) platform_device_unregister(priv->children[i]); return 0; } static int soc_button_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct acpi_device_id *id; struct soc_button_info *button_info; struct soc_button_data *priv; struct platform_device *pd; int i; int error; id = acpi_match_device(dev->driver->acpi_match_table, dev); if (!id) return -ENODEV; button_info = (struct soc_button_info *)id->driver_data; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; platform_set_drvdata(pdev, priv); for (i = 0; i < BUTTON_TYPES; i++) { pd = soc_button_device_create(pdev, button_info, i == 0); if (IS_ERR(pd)) { error = PTR_ERR(pd); if (error != -ENODEV) { soc_button_remove(pdev); return error; } continue; } priv->children[i] = pd; } if (!priv->children[0] && !priv->children[1]) return -ENODEV; return 0; } static struct soc_button_info soc_button_PNP0C40[] = { { "power", 0, EV_KEY, KEY_POWER, false, true }, { "home", 1, EV_KEY, KEY_LEFTMETA, false, true }, { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false }, { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false }, { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false }, { } }; static const struct acpi_device_id soc_button_acpi_match[] = { { "PNP0C40", (unsigned long)soc_button_PNP0C40 }, { } }; MODULE_DEVICE_TABLE(acpi, soc_button_acpi_match); static struct platform_driver soc_button_driver = { .probe = soc_button_probe, .remove = soc_button_remove, .driver = { .name = KBUILD_MODNAME, .acpi_match_table = ACPI_PTR(soc_button_acpi_match), }, }; module_platform_driver(soc_button_driver); MODULE_LICENSE("GPL");
gpl-2.0
mrabe89sigma/linux-curie
arch/sh/kernel/smp.c
804
9748
/* * arch/sh/kernel/smp.c * * SMP support for the SuperH processors. * * Copyright (C) 2002 - 2010 Paul Mundt * Copyright (C) 2006 - 2007 Akio Idehara * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/err.h> #include <linux/cache.h> #include <linux/cpumask.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/atomic.h> #include <asm/processor.h> #include <asm/mmu_context.h> #include <asm/smp.h> #include <asm/cacheflush.h> #include <asm/sections.h> #include <asm/setup.h> int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ struct plat_smp_ops *mp_ops = NULL; /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; void register_smp_ops(struct plat_smp_ops *ops) { if (mp_ops) printk(KERN_WARNING "Overriding previously set SMP ops\n"); mp_ops = ops; } static inline void smp_store_cpu_info(unsigned int cpu) { struct sh_cpuinfo *c = cpu_data + cpu; memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo)); c->loops_per_jiffy = loops_per_jiffy; } void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int cpu = smp_processor_id(); init_new_context(current, &init_mm); current_thread_info()->cpu = cpu; mp_ops->prepare_cpus(max_cpus); #ifndef CONFIG_HOTPLUG_CPU init_cpu_present(cpu_possible_mask); #endif } void __init smp_prepare_boot_cpu(void) { unsigned int cpu = smp_processor_id(); __cpu_number_map[0] = cpu; __cpu_logical_map[0] = cpu; set_cpu_online(cpu, true); set_cpu_possible(cpu, true); per_cpu(cpu_state, cpu) = CPU_ONLINE; } #ifdef CONFIG_HOTPLUG_CPU void native_cpu_die(unsigned int cpu) { unsigned int i; for (i = 0; i < 10; i++) { smp_rmb(); if (per_cpu(cpu_state, cpu) == CPU_DEAD) { if (system_state == SYSTEM_RUNNING) pr_info("CPU %u is now offline\n", cpu); return; } msleep(100); } pr_err("CPU %u didn't die...\n", cpu); } int native_cpu_disable(unsigned int cpu) { return cpu == 0 ? -EPERM : 0; } void play_dead_common(void) { idle_task_exit(); irq_ctx_exit(raw_smp_processor_id()); mb(); __this_cpu_write(cpu_state, CPU_DEAD); local_irq_disable(); } void native_play_dead(void) { play_dead_common(); } int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); int ret; ret = mp_ops->cpu_disable(cpu); if (ret) return ret; /* * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu. */ set_cpu_online(cpu, false); /* * OK - migrate IRQs away from this CPU */ migrate_irqs(); /* * Stop the local timer for this CPU. */ local_timer_stop(cpu); /* * Flush user cache and TLB mappings, and then remove this CPU * from the vm mask set of all processes. */ flush_cache_all(); local_flush_tlb_all(); clear_tasks_mm_cpumask(cpu); return 0; } #else /* ... !CONFIG_HOTPLUG_CPU */ int native_cpu_disable(unsigned int cpu) { return -ENOSYS; } void native_cpu_die(unsigned int cpu) { /* We said "no" in __cpu_disable */ BUG(); } void native_play_dead(void) { BUG(); } #endif asmlinkage void start_secondary(void) { unsigned int cpu = smp_processor_id(); struct mm_struct *mm = &init_mm; enable_mmu(); atomic_inc(&mm->mm_count); atomic_inc(&mm->mm_users); current->active_mm = mm; enter_lazy_tlb(mm, current); local_flush_tlb_all(); per_cpu_trap_init(); preempt_disable(); notify_cpu_starting(cpu); local_irq_enable(); /* Enable local timers */ local_timer_setup(cpu); calibrate_delay(); smp_store_cpu_info(cpu); set_cpu_online(cpu, true); per_cpu(cpu_state, cpu) = CPU_ONLINE; cpu_startup_entry(CPUHP_ONLINE); } extern struct { unsigned long sp; unsigned long bss_start; unsigned long bss_end; void *start_kernel_fn; void *cpu_init_fn; void *thread_info; } stack_start; int __cpu_up(unsigned int cpu, struct task_struct *tsk) { unsigned long timeout; per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* Fill in data in head.S for secondary cpus */ stack_start.sp = tsk->thread.sp; stack_start.thread_info = tsk->stack; stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ stack_start.start_kernel_fn = start_secondary; flush_icache_range((unsigned long)&stack_start, (unsigned long)&stack_start + sizeof(stack_start)); wmb(); mp_ops->start_cpu(cpu, (unsigned long)_stext); timeout = jiffies + HZ; while (time_before(jiffies, timeout)) { if (cpu_online(cpu)) break; udelay(10); barrier(); } if (cpu_online(cpu)) return 0; return -ENOENT; } void __init smp_cpus_done(unsigned int max_cpus) { unsigned long bogosum = 0; int cpu; for_each_online_cpu(cpu) bogosum += cpu_data[cpu].loops_per_jiffy; printk(KERN_INFO "SMP: Total of %d processors activated " "(%lu.%02lu BogoMIPS).\n", num_online_cpus(), bogosum / (500000/HZ), (bogosum / (5000/HZ)) % 100); } void smp_send_reschedule(int cpu) { mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE); } void smp_send_stop(void) { smp_call_function(stop_this_cpu, 0, 0); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) { int cpu; for_each_cpu(cpu, mask) mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION); } void arch_send_call_function_single_ipi(int cpu) { mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); } void smp_timer_broadcast(const struct cpumask *mask) { int cpu; for_each_cpu(cpu, mask) mp_ops->send_ipi(cpu, SMP_MSG_TIMER); } static void ipi_timer(void) { irq_enter(); local_timer_interrupt(); irq_exit(); } void smp_message_recv(unsigned int msg) { switch (msg) { case SMP_MSG_FUNCTION: generic_smp_call_function_interrupt(); break; case SMP_MSG_RESCHEDULE: scheduler_ipi(); break; case SMP_MSG_FUNCTION_SINGLE: generic_smp_call_function_single_interrupt(); break; case SMP_MSG_TIMER: ipi_timer(); break; default: printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n", smp_processor_id(), __func__, msg); break; } } /* Not really SMP stuff ... */ int setup_profiling_timer(unsigned int multiplier) { return 0; } static void flush_tlb_all_ipi(void *info) { local_flush_tlb_all(); } void flush_tlb_all(void) { on_each_cpu(flush_tlb_all_ipi, 0, 1); } static void flush_tlb_mm_ipi(void *mm) { local_flush_tlb_mm((struct mm_struct *)mm); } /* * The following tlb flush calls are invoked when old translations are * being torn down, or pte attributes are changing. For single threaded * address spaces, a new context is obtained on the current cpu, and tlb * context on other cpus are invalidated to force a new context allocation * at switch_mm time, should the mm ever be used on other cpus. For * multithreaded address spaces, intercpu interrupts have to be sent. * Another case where intercpu interrupts are required is when the target * mm might be active on another cpu (eg debuggers doing the flushes on * behalf of debugees, kswapd stealing pages from another process etc). * Kanoj 07/00. */ void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); } else { int i; for_each_online_cpu(i) if (smp_processor_id() != i) cpu_context(i, mm) = 0; } local_flush_tlb_mm(mm); preempt_enable(); } struct flush_tlb_data { struct vm_area_struct *vma; unsigned long addr1; unsigned long addr2; }; static void flush_tlb_range_ipi(void *info) { struct flush_tlb_data *fd = (struct flush_tlb_data *)info; local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; preempt_disable(); if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { struct flush_tlb_data fd; fd.vma = vma; fd.addr1 = start; fd.addr2 = end; smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); } else { int i; for_each_online_cpu(i) if (smp_processor_id() != i) cpu_context(i, mm) = 0; } local_flush_tlb_range(vma, start, end); preempt_enable(); } static void flush_tlb_kernel_range_ipi(void *info) { struct flush_tlb_data *fd = (struct flush_tlb_data *)info; local_flush_tlb_kernel_range(fd->addr1, fd->addr2); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { struct flush_tlb_data fd; fd.addr1 = start; fd.addr2 = end; on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1); } static void flush_tlb_page_ipi(void *info) { struct flush_tlb_data *fd = (struct flush_tlb_data *)info; local_flush_tlb_page(fd->vma, fd->addr1); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { preempt_disable(); if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { struct flush_tlb_data fd; fd.vma = vma; fd.addr1 = page; smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); } else { int i; for_each_online_cpu(i) if (smp_processor_id() != i) cpu_context(i, vma->vm_mm) = 0; } local_flush_tlb_page(vma, page); preempt_enable(); } static void flush_tlb_one_ipi(void *info) { struct flush_tlb_data *fd = (struct flush_tlb_data *)info; local_flush_tlb_one(fd->addr1, fd->addr2); } void flush_tlb_one(unsigned long asid, unsigned long vaddr) { struct flush_tlb_data fd; fd.addr1 = asid; fd.addr2 = vaddr; smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1); local_flush_tlb_one(asid, vaddr); }
gpl-2.0
ubuntu-chu/linux3.6.9-at91
net/ceph/auth_none.c
804
2963
#include <linux/ceph/ceph_debug.h> #include <linux/err.h> #include <linux/module.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/ceph/decode.h> #include <linux/ceph/auth.h> #include "auth_none.h" static void reset(struct ceph_auth_client *ac) { struct ceph_auth_none_info *xi = ac->private; xi->starting = true; xi->built_authorizer = false; } static void destroy(struct ceph_auth_client *ac) { kfree(ac->private); ac->private = NULL; } static int is_authenticated(struct ceph_auth_client *ac) { struct ceph_auth_none_info *xi = ac->private; return !xi->starting; } static int should_authenticate(struct ceph_auth_client *ac) { struct ceph_auth_none_info *xi = ac->private; return xi->starting; } /* * the generic auth code decode the global_id, and we carry no actual * authenticate state, so nothing happens here. */ static int handle_reply(struct ceph_auth_client *ac, int result, void *buf, void *end) { struct ceph_auth_none_info *xi = ac->private; xi->starting = false; return result; } /* * build an 'authorizer' with our entity_name and global_id. we can * reuse a single static copy since it is identical for all services * we connect to. */ static int ceph_auth_none_create_authorizer( struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth) { struct ceph_auth_none_info *ai = ac->private; struct ceph_none_authorizer *au = &ai->au; void *p, *end; int ret; if (!ai->built_authorizer) { p = au->buf; end = p + sizeof(au->buf); ceph_encode_8(&p, 1); ret = ceph_entity_name_encode(ac->name, &p, end - 8); if (ret < 0) goto bad; ceph_decode_need(&p, end, sizeof(u64), bad2); ceph_encode_64(&p, ac->global_id); au->buf_len = p - (void *)au->buf; ai->built_authorizer = true; dout("built authorizer len %d\n", au->buf_len); } auth->authorizer = (struct ceph_authorizer *) au; auth->authorizer_buf = au->buf; auth->authorizer_buf_len = au->buf_len; auth->authorizer_reply_buf = au->reply_buf; auth->authorizer_reply_buf_len = sizeof (au->reply_buf); return 0; bad2: ret = -ERANGE; bad: return ret; } static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac, struct ceph_authorizer *a) { /* nothing to do */ } static const struct ceph_auth_client_ops ceph_auth_none_ops = { .name = "none", .reset = reset, .destroy = destroy, .is_authenticated = is_authenticated, .should_authenticate = should_authenticate, .handle_reply = handle_reply, .create_authorizer = ceph_auth_none_create_authorizer, .destroy_authorizer = ceph_auth_none_destroy_authorizer, }; int ceph_auth_none_init(struct ceph_auth_client *ac) { struct ceph_auth_none_info *xi; dout("ceph_auth_none_init %p\n", ac); xi = kzalloc(sizeof(*xi), GFP_NOFS); if (!xi) return -ENOMEM; xi->starting = true; xi->built_authorizer = false; ac->protocol = CEPH_AUTH_NONE; ac->private = xi; ac->ops = &ceph_auth_none_ops; return 0; }
gpl-2.0
AudioGod/Gods-Kernel-Huawei-Angler
drivers/mmc/host/dw_mmc-exynos.c
1828
5833
/* * Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver * * Copyright (C) 2012, Samsung Electronics Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/mmc/host.h> #include <linux/mmc/dw_mmc.h> #include <linux/of.h> #include <linux/of_gpio.h> #include "dw_mmc.h" #include "dw_mmc-pltfm.h" #define NUM_PINS(x) (x + 2) #define SDMMC_CLKSEL 0x09C #define SDMMC_CLKSEL_CCLK_SAMPLE(x) (((x) & 7) << 0) #define SDMMC_CLKSEL_CCLK_DRIVE(x) (((x) & 7) << 16) #define SDMMC_CLKSEL_CCLK_DIVIDER(x) (((x) & 7) << 24) #define SDMMC_CLKSEL_GET_DRV_WD3(x) (((x) >> 16) & 0x7) #define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \ SDMMC_CLKSEL_CCLK_DRIVE(y) | \ SDMMC_CLKSEL_CCLK_DIVIDER(z)) #define SDMMC_CMD_USE_HOLD_REG BIT(29) #define EXYNOS4210_FIXED_CIU_CLK_DIV 2 #define EXYNOS4412_FIXED_CIU_CLK_DIV 4 /* Variations in Exynos specific dw-mshc controller */ enum dw_mci_exynos_type { DW_MCI_TYPE_EXYNOS4210, DW_MCI_TYPE_EXYNOS4412, DW_MCI_TYPE_EXYNOS5250, }; /* Exynos implementation specific driver private data */ struct dw_mci_exynos_priv_data { enum dw_mci_exynos_type ctrl_type; u8 ciu_div; u32 sdr_timing; u32 ddr_timing; }; static struct dw_mci_exynos_compatible { char *compatible; enum dw_mci_exynos_type ctrl_type; } exynos_compat[] = { { .compatible = "samsung,exynos4210-dw-mshc", .ctrl_type = DW_MCI_TYPE_EXYNOS4210, }, { .compatible = "samsung,exynos4412-dw-mshc", .ctrl_type = DW_MCI_TYPE_EXYNOS4412, }, { .compatible = "samsung,exynos5250-dw-mshc", .ctrl_type = DW_MCI_TYPE_EXYNOS5250, }, }; static int dw_mci_exynos_priv_init(struct dw_mci *host) { struct dw_mci_exynos_priv_data *priv; int idx; priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(host->dev, "mem alloc failed for private data\n"); return -ENOMEM; } for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) { if (of_device_is_compatible(host->dev->of_node, exynos_compat[idx].compatible)) priv->ctrl_type = exynos_compat[idx].ctrl_type; } host->priv = priv; return 0; } static int dw_mci_exynos_setup_clock(struct dw_mci *host) { struct dw_mci_exynos_priv_data *priv = host->priv; if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5250) host->bus_hz /= (priv->ciu_div + 1); else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412) host->bus_hz /= EXYNOS4412_FIXED_CIU_CLK_DIV; else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210) host->bus_hz /= EXYNOS4210_FIXED_CIU_CLK_DIV; return 0; } static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr) { /* * Exynos4412 and Exynos5250 extends the use of CMD register with the * use of bit 29 (which is reserved on standard MSHC controllers) for * optionally bypassing the HOLD register for command and data. The * HOLD register should be bypassed in case there is no phase shift * applied on CMD/DATA that is sent to the card. */ if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL))) *cmdr |= SDMMC_CMD_USE_HOLD_REG; } static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios) { struct dw_mci_exynos_priv_data *priv = host->priv; if (ios->timing == MMC_TIMING_UHS_DDR50) mci_writel(host, CLKSEL, priv->ddr_timing); else mci_writel(host, CLKSEL, priv->sdr_timing); } static int dw_mci_exynos_parse_dt(struct dw_mci *host) { struct dw_mci_exynos_priv_data *priv = host->priv; struct device_node *np = host->dev->of_node; u32 timing[2]; u32 div = 0; int ret; of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div); priv->ciu_div = div; ret = of_property_read_u32_array(np, "samsung,dw-mshc-sdr-timing", timing, 2); if (ret) return ret; priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); ret = of_property_read_u32_array(np, "samsung,dw-mshc-ddr-timing", timing, 2); if (ret) return ret; priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); return 0; } /* Common capabilities of Exynos4/Exynos5 SoC */ static unsigned long exynos_dwmmc_caps[4] = { MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23, MMC_CAP_CMD23, MMC_CAP_CMD23, MMC_CAP_CMD23, }; static const struct dw_mci_drv_data exynos_drv_data = { .caps = exynos_dwmmc_caps, .init = dw_mci_exynos_priv_init, .setup_clock = dw_mci_exynos_setup_clock, .prepare_command = dw_mci_exynos_prepare_command, .set_ios = dw_mci_exynos_set_ios, .parse_dt = dw_mci_exynos_parse_dt, }; static const struct of_device_id dw_mci_exynos_match[] = { { .compatible = "samsung,exynos4412-dw-mshc", .data = &exynos_drv_data, }, { .compatible = "samsung,exynos5250-dw-mshc", .data = &exynos_drv_data, }, {}, }; MODULE_DEVICE_TABLE(of, dw_mci_exynos_match); static int dw_mci_exynos_probe(struct platform_device *pdev) { const struct dw_mci_drv_data *drv_data; const struct of_device_id *match; match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node); drv_data = match->data; return dw_mci_pltfm_register(pdev, drv_data); } static struct platform_driver dw_mci_exynos_pltfm_driver = { .probe = dw_mci_exynos_probe, .remove = __exit_p(dw_mci_pltfm_remove), .driver = { .name = "dwmmc_exynos", .of_match_table = dw_mci_exynos_match, .pm = &dw_mci_pltfm_pmops, }, }; module_platform_driver(dw_mci_exynos_pltfm_driver); MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension"); MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:dwmmc-exynos");
gpl-2.0
kyupltd/linux
arch/m68k/platform/68000/timers.c
2084
3501
/***************************************************************************/ /* * timers.c - Generic hardware timer support. * * Copyright (C) 1993 Hamish Macdonald * Copyright (C) 1999 D. Jeff Dionne * Copyright (C) 2001 Georges Menie, Ken Desmet * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /***************************************************************************/ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/clocksource.h> #include <linux/rtc.h> #include <asm/setup.h> #include <asm/pgtable.h> #include <asm/machdep.h> #include <asm/MC68VZ328.h> /***************************************************************************/ #if defined(CONFIG_DRAGEN2) /* with a 33.16 MHz clock, this will give usec resolution to the time functions */ #define CLOCK_SOURCE TCTL_CLKSOURCE_SYSCLK #define CLOCK_PRE 7 #define TICKS_PER_JIFFY 41450 #elif defined(CONFIG_XCOPILOT_BUGS) /* * The only thing I know is that CLK32 is not available on Xcopilot * I have little idea about what frequency SYSCLK has on Xcopilot. * The values for prescaler and compare registers were simply * taken from the original source */ #define CLOCK_SOURCE TCTL_CLKSOURCE_SYSCLK #define CLOCK_PRE 2 #define TICKS_PER_JIFFY 0xd7e4 #else /* default to using the 32Khz clock */ #define CLOCK_SOURCE TCTL_CLKSOURCE_32KHZ #define CLOCK_PRE 31 #define TICKS_PER_JIFFY 10 #endif static u32 m68328_tick_cnt; static irq_handler_t timer_interrupt; /***************************************************************************/ static irqreturn_t hw_tick(int irq, void *dummy) { /* Reset Timer1 */ TSTAT &= 0; m68328_tick_cnt += TICKS_PER_JIFFY; return timer_interrupt(irq, dummy); } /***************************************************************************/ static struct irqaction m68328_timer_irq = { .name = "timer", .flags = IRQF_TIMER, .handler = hw_tick, }; /***************************************************************************/ static cycle_t m68328_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles; local_irq_save(flags); cycles = m68328_tick_cnt + TCN; local_irq_restore(flags); return cycles; } /***************************************************************************/ static struct clocksource m68328_clk = { .name = "timer", .rating = 250, .read = m68328_read_clk, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; /***************************************************************************/ void hw_timer_init(irq_handler_t handler) { /* disable timer 1 */ TCTL = 0; /* set ISR */ setup_irq(TMR_IRQ_NUM, &m68328_timer_irq); /* Restart mode, Enable int, Set clock source */ TCTL = TCTL_OM | TCTL_IRQEN | CLOCK_SOURCE; TPRER = CLOCK_PRE; TCMP = TICKS_PER_JIFFY; /* Enable timer 1 */ TCTL |= TCTL_TEN; clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ); timer_interrupt = handler; } /***************************************************************************/ int m68328_hwclk(int set, struct rtc_time *t) { if (!set) { long now = RTCTIME; t->tm_year = t->tm_mon = t->tm_mday = 1; t->tm_hour = (now >> 24) % 24; t->tm_min = (now >> 16) % 60; t->tm_sec = now % 60; } return 0; } /***************************************************************************/
gpl-2.0
deadman96385/android_kernel_asus_Z00A
drivers/rtc/rtc-vt8500.c
2084
8760
/* * drivers/rtc/rtc-vt8500.c * * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> * * Based on rtc-pxa.c * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/bcd.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/of.h> /* * Register definitions */ #define VT8500_RTC_TS 0x00 /* Time set */ #define VT8500_RTC_DS 0x04 /* Date set */ #define VT8500_RTC_AS 0x08 /* Alarm set */ #define VT8500_RTC_CR 0x0c /* Control */ #define VT8500_RTC_TR 0x10 /* Time read */ #define VT8500_RTC_DR 0x14 /* Date read */ #define VT8500_RTC_WS 0x18 /* Write status */ #define VT8500_RTC_CL 0x20 /* Calibration */ #define VT8500_RTC_IS 0x24 /* Interrupt status */ #define VT8500_RTC_ST 0x28 /* Status */ #define INVALID_TIME_BIT (1 << 31) #define DATE_CENTURY_S 19 #define DATE_YEAR_S 11 #define DATE_YEAR_MASK (0xff << DATE_YEAR_S) #define DATE_MONTH_S 6 #define DATE_MONTH_MASK (0x1f << DATE_MONTH_S) #define DATE_DAY_MASK 0x3f #define TIME_DOW_S 20 #define TIME_DOW_MASK (0x07 << TIME_DOW_S) #define TIME_HOUR_S 14 #define TIME_HOUR_MASK (0x3f << TIME_HOUR_S) #define TIME_MIN_S 7 #define TIME_MIN_MASK (0x7f << TIME_MIN_S) #define TIME_SEC_MASK 0x7f #define ALARM_DAY_S 20 #define ALARM_DAY_MASK (0x3f << ALARM_DAY_S) #define ALARM_DAY_BIT (1 << 29) #define ALARM_HOUR_BIT (1 << 28) #define ALARM_MIN_BIT (1 << 27) #define ALARM_SEC_BIT (1 << 26) #define ALARM_ENABLE_MASK (ALARM_DAY_BIT \ | ALARM_HOUR_BIT \ | ALARM_MIN_BIT \ | ALARM_SEC_BIT) #define VT8500_RTC_CR_ENABLE (1 << 0) /* Enable RTC */ #define VT8500_RTC_CR_12H (1 << 1) /* 12h time format */ #define VT8500_RTC_CR_SM_ENABLE (1 << 2) /* Enable periodic irqs */ #define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */ #define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */ #define VT8500_RTC_IS_ALARM (1 << 0) /* Alarm interrupt status */ struct vt8500_rtc { void __iomem *regbase; struct resource *res; int irq_alarm; struct rtc_device *rtc; spinlock_t lock; /* Protects this structure */ }; static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id) { struct vt8500_rtc *vt8500_rtc = dev_id; u32 isr; unsigned long events = 0; spin_lock(&vt8500_rtc->lock); /* clear interrupt sources */ isr = readl(vt8500_rtc->regbase + VT8500_RTC_IS); writel(isr, vt8500_rtc->regbase + VT8500_RTC_IS); spin_unlock(&vt8500_rtc->lock); if (isr & VT8500_RTC_IS_ALARM) events |= RTC_AF | RTC_IRQF; rtc_update_irq(vt8500_rtc->rtc, 1, events); return IRQ_HANDLED; } static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); u32 date, time; date = readl(vt8500_rtc->regbase + VT8500_RTC_DR); time = readl(vt8500_rtc->regbase + VT8500_RTC_TR); tm->tm_sec = bcd2bin(time & TIME_SEC_MASK); tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S); tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S); tm->tm_mday = bcd2bin(date & DATE_DAY_MASK); tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S) - 1; tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S) + ((date >> DATE_CENTURY_S) & 1 ? 200 : 100); tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S; return 0; } static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); if (tm->tm_year < 100) { dev_warn(dev, "Only years 2000-2199 are supported by the " "hardware!\n"); return -EINVAL; } writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S) | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S) | (bin2bcd(tm->tm_mday)) | ((tm->tm_year >= 200) << DATE_CENTURY_S), vt8500_rtc->regbase + VT8500_RTC_DS); writel((bin2bcd(tm->tm_wday) << TIME_DOW_S) | (bin2bcd(tm->tm_hour) << TIME_HOUR_S) | (bin2bcd(tm->tm_min) << TIME_MIN_S) | (bin2bcd(tm->tm_sec)), vt8500_rtc->regbase + VT8500_RTC_TS); return 0; } static int vt8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); u32 isr, alarm; alarm = readl(vt8500_rtc->regbase + VT8500_RTC_AS); isr = readl(vt8500_rtc->regbase + VT8500_RTC_IS); alrm->time.tm_mday = bcd2bin((alarm & ALARM_DAY_MASK) >> ALARM_DAY_S); alrm->time.tm_hour = bcd2bin((alarm & TIME_HOUR_MASK) >> TIME_HOUR_S); alrm->time.tm_min = bcd2bin((alarm & TIME_MIN_MASK) >> TIME_MIN_S); alrm->time.tm_sec = bcd2bin((alarm & TIME_SEC_MASK)); alrm->enabled = (alarm & ALARM_ENABLE_MASK) ? 1 : 0; alrm->pending = (isr & VT8500_RTC_IS_ALARM) ? 1 : 0; return rtc_valid_tm(&alrm->time); } static int vt8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); writel((alrm->enabled ? ALARM_ENABLE_MASK : 0) | (bin2bcd(alrm->time.tm_mday) << ALARM_DAY_S) | (bin2bcd(alrm->time.tm_hour) << TIME_HOUR_S) | (bin2bcd(alrm->time.tm_min) << TIME_MIN_S) | (bin2bcd(alrm->time.tm_sec)), vt8500_rtc->regbase + VT8500_RTC_AS); return 0; } static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_AS); if (enabled) tmp |= ALARM_ENABLE_MASK; else tmp &= ~ALARM_ENABLE_MASK; writel(tmp, vt8500_rtc->regbase + VT8500_RTC_AS); return 0; } static const struct rtc_class_ops vt8500_rtc_ops = { .read_time = vt8500_rtc_read_time, .set_time = vt8500_rtc_set_time, .read_alarm = vt8500_rtc_read_alarm, .set_alarm = vt8500_rtc_set_alarm, .alarm_irq_enable = vt8500_alarm_irq_enable, }; static int vt8500_rtc_probe(struct platform_device *pdev) { struct vt8500_rtc *vt8500_rtc; int ret; vt8500_rtc = devm_kzalloc(&pdev->dev, sizeof(struct vt8500_rtc), GFP_KERNEL); if (!vt8500_rtc) return -ENOMEM; spin_lock_init(&vt8500_rtc->lock); platform_set_drvdata(pdev, vt8500_rtc); vt8500_rtc->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!vt8500_rtc->res) { dev_err(&pdev->dev, "No I/O memory resource defined\n"); return -ENXIO; } vt8500_rtc->irq_alarm = platform_get_irq(pdev, 0); if (vt8500_rtc->irq_alarm < 0) { dev_err(&pdev->dev, "No alarm IRQ resource defined\n"); return -ENXIO; } vt8500_rtc->res = devm_request_mem_region(&pdev->dev, vt8500_rtc->res->start, resource_size(vt8500_rtc->res), "vt8500-rtc"); if (vt8500_rtc->res == NULL) { dev_err(&pdev->dev, "failed to request I/O memory\n"); return -EBUSY; } vt8500_rtc->regbase = devm_ioremap(&pdev->dev, vt8500_rtc->res->start, resource_size(vt8500_rtc->res)); if (!vt8500_rtc->regbase) { dev_err(&pdev->dev, "Unable to map RTC I/O memory\n"); ret = -EBUSY; goto err_return; } /* Enable RTC and set it to 24-hour mode */ writel(VT8500_RTC_CR_ENABLE, vt8500_rtc->regbase + VT8500_RTC_CR); vt8500_rtc->rtc = devm_rtc_device_register(&pdev->dev, "vt8500-rtc", &vt8500_rtc_ops, THIS_MODULE); if (IS_ERR(vt8500_rtc->rtc)) { ret = PTR_ERR(vt8500_rtc->rtc); dev_err(&pdev->dev, "Failed to register RTC device -> %d\n", ret); goto err_return; } ret = devm_request_irq(&pdev->dev, vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0, "rtc alarm", vt8500_rtc); if (ret < 0) { dev_err(&pdev->dev, "can't get irq %i, err %d\n", vt8500_rtc->irq_alarm, ret); goto err_return; } return 0; err_return: return ret; } static int vt8500_rtc_remove(struct platform_device *pdev) { struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev); /* Disable alarm matching */ writel(0, vt8500_rtc->regbase + VT8500_RTC_IS); platform_set_drvdata(pdev, NULL); return 0; } static const struct of_device_id wmt_dt_ids[] = { { .compatible = "via,vt8500-rtc", }, {} }; static struct platform_driver vt8500_rtc_driver = { .probe = vt8500_rtc_probe, .remove = vt8500_rtc_remove, .driver = { .name = "vt8500-rtc", .owner = THIS_MODULE, .of_match_table = of_match_ptr(wmt_dt_ids), }, }; module_platform_driver(vt8500_rtc_driver); MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>"); MODULE_DESCRIPTION("VIA VT8500 SoC Realtime Clock Driver (RTC)"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:vt8500-rtc");
gpl-2.0
aniketroxx/Phantocivic_Nicki
drivers/mfd/pm8821-irq.c
3364
10543
/* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/export.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/mfd/pm8xxx/core.h> #include <linux/mfd/pm8xxx/pm8821-irq.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <mach/mpm.h> #define PM8821_TOTAL_IRQ_MASTERS 2 #define PM8821_BLOCKS_PER_MASTER 7 #define PM8821_IRQ_MASTER1_SET 0x01 #define PM8821_IRQ_CLEAR_OFFSET 0x01 #define PM8821_IRQ_RT_STATUS_OFFSET 0x0F #define PM8821_IRQ_MASK_REG_OFFSET 0x08 #define SSBI_REG_ADDR_IRQ_MASTER0 0x30 #define SSBI_REG_ADDR_IRQ_MASTER1 0xB0 #define MPM_PIN_FOR_8821_IRQ 7 #define SSBI_REG_ADDR_IRQ_IT_STATUS(master_base, block) (master_base + block) /* * Block 0 does not exist in PM8821 IRQ SSBI address space, * IRQ0 is assigned to bit0 of block1. */ #define SSBI_REG_ADDR_IRQ_IT_CLEAR(master_base, block) \ (master_base + PM8821_IRQ_CLEAR_OFFSET + block) #define SSBI_REG_ADDR_IRQ_RT_STATUS(master_base, block) \ (master_base + PM8821_IRQ_RT_STATUS_OFFSET + block) #define SSBI_REG_ADDR_IRQ_MASK(master_base, block) \ (master_base + PM8821_IRQ_MASK_REG_OFFSET + block) struct pm_irq_chip { struct device *dev; spinlock_t pm_irq_lock; unsigned int base_addr; unsigned int devirq; unsigned int irq_base; unsigned int num_irqs; int masters[PM8821_TOTAL_IRQ_MASTERS]; }; static int pm8821_irq_masked_write(struct pm_irq_chip *chip, u16 addr, u8 mask, u8 val) { int rc; u8 reg; rc = pm8xxx_readb(chip->dev, addr, &reg); if (rc) { pr_err("read failed addr = %03X, rc = %d\n", addr, rc); return rc; } reg &= ~mask; reg |= val & mask; rc = pm8xxx_writeb(chip->dev, addr, reg); if (rc) { pr_err("write failed addr = %03X, rc = %d\n", addr, rc); return rc; } return 0; } static int pm8821_read_master_irq(const struct pm_irq_chip *chip, int m, u8 *master) { return pm8xxx_readb(chip->dev, chip->masters[m], master); } static int pm8821_read_block_irq(struct pm_irq_chip *chip, int master, u8 block, u8 *bits) { int rc; spin_lock(&chip->pm_irq_lock); rc = pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_IT_STATUS(chip->masters[master], block), bits); if (rc) pr_err("Failed Reading Status rc=%d\n", rc); spin_unlock(&chip->pm_irq_lock); return rc; } static int pm8821_irq_block_handler(struct pm_irq_chip *chip, int master_number, int block) { int pmirq, irq, i, ret; u8 bits; ret = pm8821_read_block_irq(chip, master_number, block, &bits); if (ret) { pr_err("Failed reading %d block ret=%d", block, ret); return ret; } if (!bits) { pr_err("block bit set in master but no irqs: %d", block); return 0; } /* Convert block offset to global block number */ block += (master_number * PM8821_BLOCKS_PER_MASTER) - 1; /* Check IRQ bits */ for (i = 0; i < 8; i++) { if (bits & BIT(i)) { pmirq = (block << 3) + i; irq = pmirq + chip->irq_base; generic_handle_irq(irq); } } return 0; } static int pm8821_irq_read_master(struct pm_irq_chip *chip, int master_number, u8 master_val) { int ret = 0; int block; for (block = 1; block < 8; block++) { if (master_val & BIT(block)) { ret |= pm8821_irq_block_handler(chip, master_number, block); } } return ret; } static irqreturn_t pm8821_irq_handler(int irq, void *data) { struct pm_irq_chip *chip = data; int ret; u8 master; ret = pm8821_read_master_irq(chip, 0, &master); if (ret) { pr_err("Failed to read master 0 ret=%d\n", ret); return ret; } if (master & ~PM8821_IRQ_MASTER1_SET) pm8821_irq_read_master(chip, 0, master); if (!(master & PM8821_IRQ_MASTER1_SET)) goto done; ret = pm8821_read_master_irq(chip, 1, &master); if (ret) { pr_err("Failed to read master 1 ret=%d\n", ret); return ret; } pm8821_irq_read_master(chip, 1, master); done: return IRQ_HANDLED; } static void pm8821_irq_mask(struct irq_data *d) { struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d); unsigned int pmirq = d->irq - chip->irq_base; int irq_bit, rc; u8 block, master; block = pmirq >> 3; master = block / PM8821_BLOCKS_PER_MASTER; irq_bit = pmirq % 8; block %= PM8821_BLOCKS_PER_MASTER; spin_lock(&chip->pm_irq_lock); rc = pm8821_irq_masked_write(chip, SSBI_REG_ADDR_IRQ_MASK(chip->masters[master], block), BIT(irq_bit), BIT(irq_bit)); if (rc) pr_err("Failed to read/write mask IRQ:%d rc=%d\n", pmirq, rc); spin_unlock(&chip->pm_irq_lock); } static void pm8821_irq_mask_ack(struct irq_data *d) { struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d); unsigned int pmirq = d->irq - chip->irq_base; int irq_bit, rc; u8 block, master; block = pmirq >> 3; master = block / PM8821_BLOCKS_PER_MASTER; irq_bit = pmirq % 8; block %= PM8821_BLOCKS_PER_MASTER; spin_lock(&chip->pm_irq_lock); rc = pm8821_irq_masked_write(chip, SSBI_REG_ADDR_IRQ_MASK(chip->masters[master], block), BIT(irq_bit), BIT(irq_bit)); if (rc) { pr_err("Failed to read/write mask IRQ:%d rc=%d\n", pmirq, rc); goto fail; } rc = pm8821_irq_masked_write(chip, SSBI_REG_ADDR_IRQ_IT_CLEAR(chip->masters[master], block), BIT(irq_bit), BIT(irq_bit)); if (rc) { pr_err("Failed to read/write IT_CLEAR IRQ:%d rc=%d\n", pmirq, rc); } fail: spin_unlock(&chip->pm_irq_lock); } static void pm8821_irq_unmask(struct irq_data *d) { struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d); unsigned int pmirq = d->irq - chip->irq_base; int irq_bit, rc; u8 block, master; block = pmirq >> 3; master = block / PM8821_BLOCKS_PER_MASTER; irq_bit = pmirq % 8; block %= PM8821_BLOCKS_PER_MASTER; spin_lock(&chip->pm_irq_lock); rc = pm8821_irq_masked_write(chip, SSBI_REG_ADDR_IRQ_MASK(chip->masters[master], block), BIT(irq_bit), ~BIT(irq_bit)); if (rc) pr_err("Failed to read/write unmask IRQ:%d rc=%d\n", pmirq, rc); spin_unlock(&chip->pm_irq_lock); } static int pm8821_irq_set_type(struct irq_data *d, unsigned int flow_type) { /* * PM8821 IRQ controller does not have explicit software support for * IRQ flow type. */ return 0; } static int pm8821_irq_set_wake(struct irq_data *d, unsigned int on) { return 0; } static int pm8821_irq_read_line(struct irq_data *d) { struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d); return pm8821_get_irq_stat(chip, d->irq); } static struct irq_chip pm_irq_chip = { .name = "pm8821-irq", .irq_mask = pm8821_irq_mask, .irq_mask_ack = pm8821_irq_mask_ack, .irq_unmask = pm8821_irq_unmask, .irq_set_type = pm8821_irq_set_type, .irq_set_wake = pm8821_irq_set_wake, .irq_read_line = pm8821_irq_read_line, .flags = IRQCHIP_MASK_ON_SUSPEND, }; /** * pm8821_get_irq_stat - get the status of the irq line * @chip: pointer to identify a pmic irq controller * @irq: the irq number * * The pm8821 gpio and mpp rely on the interrupt block to read * the values on their pins. This function is to facilitate reading * the status of a gpio or an mpp line. The caller has to convert the * gpio number to irq number. * * RETURNS: * an int indicating the value read on that line */ int pm8821_get_irq_stat(struct pm_irq_chip *chip, int irq) { int pmirq, rc; u8 block, bits, bit, master; unsigned long flags; if (chip == NULL || irq < chip->irq_base || irq >= chip->irq_base + chip->num_irqs) return -EINVAL; pmirq = irq - chip->irq_base; block = pmirq >> 3; master = block / PM8821_BLOCKS_PER_MASTER; bit = pmirq % 8; block %= PM8821_BLOCKS_PER_MASTER; spin_lock_irqsave(&chip->pm_irq_lock, flags); rc = pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_RT_STATUS(chip->masters[master], block), &bits); if (rc) { pr_err("Failed Configuring irq=%d pmirq=%d blk=%d rc=%d\n", irq, pmirq, block, rc); goto bail_out; } rc = (bits & BIT(bit)) ? 1 : 0; bail_out: spin_unlock_irqrestore(&chip->pm_irq_lock, flags); return rc; } EXPORT_SYMBOL_GPL(pm8821_get_irq_stat); struct pm_irq_chip * __devinit pm8821_irq_init(struct device *dev, const struct pm8xxx_irq_platform_data *pdata) { struct pm_irq_chip *chip; int devirq, rc, blocks, masters; unsigned int pmirq; if (!pdata) { pr_err("No platform data\n"); return ERR_PTR(-EINVAL); } devirq = pdata->devirq; if (devirq < 0) { pr_err("missing devirq\n"); rc = devirq; return ERR_PTR(-EINVAL); } chip = kzalloc(sizeof(struct pm_irq_chip) + sizeof(u8) * pdata->irq_cdata.nirqs, GFP_KERNEL); if (!chip) { pr_err("Cannot alloc pm_irq_chip struct\n"); return ERR_PTR(-EINVAL); } chip->dev = dev; chip->devirq = devirq; chip->irq_base = pdata->irq_base; chip->num_irqs = pdata->irq_cdata.nirqs; chip->base_addr = pdata->irq_cdata.base_addr; blocks = DIV_ROUND_UP(pdata->irq_cdata.nirqs, 8); masters = DIV_ROUND_UP(blocks, PM8821_BLOCKS_PER_MASTER); chip->masters[0] = chip->base_addr + SSBI_REG_ADDR_IRQ_MASTER0; chip->masters[1] = chip->base_addr + SSBI_REG_ADDR_IRQ_MASTER1; if (masters != PM8821_TOTAL_IRQ_MASTERS) { pr_err("Unequal number of masters, passed: %d, " "should have been: %d\n", masters, PM8821_TOTAL_IRQ_MASTERS); kfree(chip); return ERR_PTR(-EINVAL); } spin_lock_init(&chip->pm_irq_lock); for (pmirq = 0; pmirq < chip->num_irqs; pmirq++) { irq_set_chip_and_handler(chip->irq_base + pmirq, &pm_irq_chip, handle_level_irq); irq_set_chip_data(chip->irq_base + pmirq, chip); #ifdef CONFIG_ARM set_irq_flags(chip->irq_base + pmirq, IRQF_VALID); #else irq_set_noprobe(chip->irq_base + pmirq); #endif } if (devirq != 0) { rc = request_irq(devirq, pm8821_irq_handler, pdata->irq_trigger_flag, "pm8821_sec_irq", chip); if (rc) { pr_err("failed to request_irq for %d rc=%d\n", devirq, rc); kfree(chip); return ERR_PTR(rc); } else{ irq_set_irq_wake(devirq, 1); msm_mpm_set_pin_wake(MPM_PIN_FOR_8821_IRQ, 1); msm_mpm_set_pin_type(MPM_PIN_FOR_8821_IRQ, pdata->irq_trigger_flag); } } return chip; } int pm8821_irq_exit(struct pm_irq_chip *chip) { irq_set_chained_handler(chip->devirq, NULL); kfree(chip); return 0; }
gpl-2.0
windxixi/OptiWiz-Kernel-F200-JB
sound/soc/samsung/lowland.c
4900
5682
/* * Lowland audio support * * Copyright 2011 Wolfson Microelectronics * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/jack.h> #include <linux/gpio.h> #include <linux/module.h> #include "../codecs/wm5100.h" #include "../codecs/wm9081.h" #define MCLK1_RATE (44100 * 512) #define CLKOUT_RATE (44100 * 256) static int lowland_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_dai *codec_dai = rtd->codec_dai; int ret; ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; return 0; } static struct snd_soc_ops lowland_ops = { .hw_params = lowland_hw_params, }; static struct snd_soc_jack lowland_headset; /* Headset jack detection DAPM pins */ static struct snd_soc_jack_pin lowland_headset_pins[] = { { .pin = "Headphone", .mask = SND_JACK_HEADPHONE | SND_JACK_LINEOUT, }, { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE, }, }; static int lowland_wm5100_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; int ret; ret = snd_soc_codec_set_sysclk(codec, WM5100_CLK_SYSCLK, WM5100_CLKSRC_MCLK1, MCLK1_RATE, SND_SOC_CLOCK_IN); if (ret < 0) { pr_err("Failed to set SYSCLK clock source: %d\n", ret); return ret; } /* Clock OPCLK, used by the other audio components. */ ret = snd_soc_codec_set_sysclk(codec, WM5100_CLK_OPCLK, 0, CLKOUT_RATE, 0); if (ret < 0) { pr_err("Failed to set OPCLK rate: %d\n", ret); return ret; } ret = snd_soc_jack_new(codec, "Headset", SND_JACK_LINEOUT | SND_JACK_HEADSET | SND_JACK_BTN_0, &lowland_headset); if (ret) return ret; ret = snd_soc_jack_add_pins(&lowland_headset, ARRAY_SIZE(lowland_headset_pins), lowland_headset_pins); if (ret) return ret; wm5100_detect(codec, &lowland_headset); return 0; } static struct snd_soc_dai_link lowland_dai[] = { { .name = "CPU", .stream_name = "CPU", .cpu_dai_name = "samsung-i2s.0", .codec_dai_name = "wm5100-aif1", .platform_name = "samsung-audio", .codec_name = "wm5100.1-001a", .ops = &lowland_ops, .init = lowland_wm5100_init, }, { .name = "Baseband", .stream_name = "Baseband", .cpu_dai_name = "wm5100-aif2", .codec_dai_name = "wm1250-ev1", .codec_name = "wm1250-ev1.1-0027", .ops = &lowland_ops, .ignore_suspend = 1, }, }; static int lowland_wm9081_init(struct snd_soc_dapm_context *dapm) { snd_soc_dapm_nc_pin(dapm, "LINEOUT"); /* At any time the WM9081 is active it will have this clock */ return snd_soc_codec_set_sysclk(dapm->codec, WM9081_SYSCLK_MCLK, 0, CLKOUT_RATE, 0); } static struct snd_soc_aux_dev lowland_aux_dev[] = { { .name = "wm9081", .codec_name = "wm9081.1-006c", .init = lowland_wm9081_init, }, }; static struct snd_soc_codec_conf lowland_codec_conf[] = { { .dev_name = "wm9081.1-006c", .name_prefix = "Sub", }, }; static const struct snd_kcontrol_new controls[] = { SOC_DAPM_PIN_SWITCH("Main Speaker"), SOC_DAPM_PIN_SWITCH("Main DMIC"), SOC_DAPM_PIN_SWITCH("Main AMIC"), SOC_DAPM_PIN_SWITCH("WM1250 Input"), SOC_DAPM_PIN_SWITCH("WM1250 Output"), SOC_DAPM_PIN_SWITCH("Headphone"), }; static struct snd_soc_dapm_widget widgets[] = { SND_SOC_DAPM_HP("Headphone", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_SPK("Main Speaker", NULL), SND_SOC_DAPM_MIC("Main AMIC", NULL), SND_SOC_DAPM_MIC("Main DMIC", NULL), }; static struct snd_soc_dapm_route audio_paths[] = { { "Sub IN1", NULL, "HPOUT2L" }, { "Sub IN2", NULL, "HPOUT2R" }, { "Main Speaker", NULL, "Sub SPKN" }, { "Main Speaker", NULL, "Sub SPKP" }, { "Main Speaker", NULL, "SPKDAT1" }, }; static struct snd_soc_card lowland = { .name = "Lowland", .owner = THIS_MODULE, .dai_link = lowland_dai, .num_links = ARRAY_SIZE(lowland_dai), .aux_dev = lowland_aux_dev, .num_aux_devs = ARRAY_SIZE(lowland_aux_dev), .codec_conf = lowland_codec_conf, .num_configs = ARRAY_SIZE(lowland_codec_conf), .controls = controls, .num_controls = ARRAY_SIZE(controls), .dapm_widgets = widgets, .num_dapm_widgets = ARRAY_SIZE(widgets), .dapm_routes = audio_paths, .num_dapm_routes = ARRAY_SIZE(audio_paths), }; static __devinit int lowland_probe(struct platform_device *pdev) { struct snd_soc_card *card = &lowland; int ret; card->dev = &pdev->dev; ret = snd_soc_register_card(card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); return ret; } return 0; } static int __devexit lowland_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); snd_soc_unregister_card(card); return 0; } static struct platform_driver lowland_driver = { .driver = { .name = "lowland", .owner = THIS_MODULE, .pm = &snd_soc_pm_ops, }, .probe = lowland_probe, .remove = __devexit_p(lowland_remove), }; module_platform_driver(lowland_driver); MODULE_DESCRIPTION("Lowland audio support"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:lowland");
gpl-2.0
csmanjuvijay/usb-next
drivers/net/wireless/ath/key.c
4900
17356
/* * Copyright (c) 2009 Atheros Communications Inc. * Copyright (c) 2010 Bruno Randolf <br1@einfach.org> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/export.h> #include <asm/unaligned.h> #include <net/mac80211.h> #include "ath.h" #include "reg.h" #define REG_READ (common->ops->read) #define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg) #define ENABLE_REGWRITE_BUFFER(_ah) \ if (common->ops->enable_write_buffer) \ common->ops->enable_write_buffer((_ah)); #define REGWRITE_BUFFER_FLUSH(_ah) \ if (common->ops->write_flush) \ common->ops->write_flush((_ah)); #define IEEE80211_WEP_NKID 4 /* number of key ids */ /************************/ /* Key Cache Management */ /************************/ bool ath_hw_keyreset(struct ath_common *common, u16 entry) { u32 keyType; void *ah = common->ah; if (entry >= common->keymax) { ath_err(common, "keyreset: keycache entry %u out of range\n", entry); return false; } keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry)); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0); REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0); REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0); REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0); REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0); REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR); REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0); REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0); if (keyType == AR_KEYTABLE_TYPE_TKIP) { u16 micentry = entry + 64; REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0); REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0); REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0); if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) { REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0); REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), AR_KEYTABLE_TYPE_CLR); } } REGWRITE_BUFFER_FLUSH(ah); return true; } EXPORT_SYMBOL(ath_hw_keyreset); static bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac) { u32 macHi, macLo; u32 unicast_flag = AR_KEYTABLE_VALID; void *ah = common->ah; if (entry >= common->keymax) { ath_err(common, "keysetmac: keycache entry %u out of range\n", entry); return false; } if (mac != NULL) { /* * AR_KEYTABLE_VALID indicates that the address is a unicast * address, which must match the transmitter address for * decrypting frames. * Not setting this bit allows the hardware to use the key * for multicast frame decryption. */ if (mac[0] & 0x01) unicast_flag = 0; macLo = get_unaligned_le32(mac); macHi = get_unaligned_le16(mac + 4); macLo >>= 1; macLo |= (macHi & 1) << 31; macHi >>= 1; } else { macLo = macHi = 0; } ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo); REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag); REGWRITE_BUFFER_FLUSH(ah); return true; } static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry, const struct ath_keyval *k, const u8 *mac) { void *ah = common->ah; u32 key0, key1, key2, key3, key4; u32 keyType; if (entry >= common->keymax) { ath_err(common, "set-entry: keycache entry %u out of range\n", entry); return false; } switch (k->kv_type) { case ATH_CIPHER_AES_OCB: keyType = AR_KEYTABLE_TYPE_AES; break; case ATH_CIPHER_AES_CCM: if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) { ath_dbg(common, ANY, "AES-CCM not supported by this mac rev\n"); return false; } keyType = AR_KEYTABLE_TYPE_CCM; break; case ATH_CIPHER_TKIP: keyType = AR_KEYTABLE_TYPE_TKIP; if (entry + 64 >= common->keymax) { ath_dbg(common, ANY, "entry %u inappropriate for TKIP\n", entry); return false; } break; case ATH_CIPHER_WEP: if (k->kv_len < WLAN_KEY_LEN_WEP40) { ath_dbg(common, ANY, "WEP key length %u too small\n", k->kv_len); return false; } if (k->kv_len <= WLAN_KEY_LEN_WEP40) keyType = AR_KEYTABLE_TYPE_40; else if (k->kv_len <= WLAN_KEY_LEN_WEP104) keyType = AR_KEYTABLE_TYPE_104; else keyType = AR_KEYTABLE_TYPE_128; break; case ATH_CIPHER_CLR: keyType = AR_KEYTABLE_TYPE_CLR; break; default: ath_err(common, "cipher %u not supported\n", k->kv_type); return false; } key0 = get_unaligned_le32(k->kv_val + 0); key1 = get_unaligned_le16(k->kv_val + 4); key2 = get_unaligned_le32(k->kv_val + 6); key3 = get_unaligned_le16(k->kv_val + 10); key4 = get_unaligned_le32(k->kv_val + 12); if (k->kv_len <= WLAN_KEY_LEN_WEP104) key4 &= 0xff; /* * Note: Key cache registers access special memory area that requires * two 32-bit writes to actually update the values in the internal * memory. Consequently, the exact order and pairs used here must be * maintained. */ if (keyType == AR_KEYTABLE_TYPE_TKIP) { u16 micentry = entry + 64; /* * Write inverted key[47:0] first to avoid Michael MIC errors * on frames that could be sent or received at the same time. * The correct key will be written in the end once everything * else is ready. */ REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0); REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1); /* Write key[95:48] */ REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2); REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3); /* Write key[127:96] and key type */ REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); /* Write MAC address for the entry */ (void) ath_hw_keysetmac(common, entry, mac); if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) { /* * TKIP uses two key cache entries: * Michael MIC TX/RX keys in the same key cache entry * (idx = main index + 64): * key0 [31:0] = RX key [31:0] * key1 [15:0] = TX key [31:16] * key1 [31:16] = reserved * key2 [31:0] = RX key [63:32] * key3 [15:0] = TX key [15:0] * key3 [31:16] = reserved * key4 [31:0] = TX key [63:32] */ u32 mic0, mic1, mic2, mic3, mic4; mic0 = get_unaligned_le32(k->kv_mic + 0); mic2 = get_unaligned_le32(k->kv_mic + 4); mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff; mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff; mic4 = get_unaligned_le32(k->kv_txmic + 4); ENABLE_REGWRITE_BUFFER(ah); /* Write RX[31:0] and TX[31:16] */ REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0); REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1); /* Write RX[63:32] and TX[15:0] */ REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2); REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3); /* Write TX[63:32] and keyType(reserved) */ REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4); REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), AR_KEYTABLE_TYPE_CLR); REGWRITE_BUFFER_FLUSH(ah); } else { /* * TKIP uses four key cache entries (two for group * keys): * Michael MIC TX/RX keys are in different key cache * entries (idx = main index + 64 for TX and * main index + 32 + 96 for RX): * key0 [31:0] = TX/RX MIC key [31:0] * key1 [31:0] = reserved * key2 [31:0] = TX/RX MIC key [63:32] * key3 [31:0] = reserved * key4 [31:0] = reserved * * Upper layer code will call this function separately * for TX and RX keys when these registers offsets are * used. */ u32 mic0, mic2; mic0 = get_unaligned_le32(k->kv_mic + 0); mic2 = get_unaligned_le32(k->kv_mic + 4); ENABLE_REGWRITE_BUFFER(ah); /* Write MIC key[31:0] */ REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0); REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); /* Write MIC key[63:32] */ REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2); REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0); /* Write TX[63:32] and keyType(reserved) */ REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0); REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), AR_KEYTABLE_TYPE_CLR); REGWRITE_BUFFER_FLUSH(ah); } ENABLE_REGWRITE_BUFFER(ah); /* MAC address registers are reserved for the MIC entry */ REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0); REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0); /* * Write the correct (un-inverted) key[47:0] last to enable * TKIP now that all other registers are set with correct * values. */ REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); REGWRITE_BUFFER_FLUSH(ah); } else { ENABLE_REGWRITE_BUFFER(ah); /* Write key[47:0] */ REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); /* Write key[95:48] */ REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2); REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3); /* Write key[127:96] and key type */ REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); REGWRITE_BUFFER_FLUSH(ah); /* Write MAC address for the entry */ (void) ath_hw_keysetmac(common, entry, mac); } return true; } static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key, struct ath_keyval *hk, const u8 *addr, bool authenticator) { const u8 *key_rxmic; const u8 *key_txmic; key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY; key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY; if (addr == NULL) { /* * Group key installation - only two key cache entries are used * regardless of splitmic capability since group key is only * used either for TX or RX. */ if (authenticator) { memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic)); memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic)); } else { memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic)); } return ath_hw_set_keycache_entry(common, keyix, hk, addr); } if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) { /* TX and RX keys share the same key cache entry. */ memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic)); return ath_hw_set_keycache_entry(common, keyix, hk, addr); } /* Separate key cache entries for TX and RX */ /* TX key goes at first index, RX key at +32. */ memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic)); if (!ath_hw_set_keycache_entry(common, keyix, hk, NULL)) { /* TX MIC entry failed. No need to proceed further */ ath_err(common, "Setting TX MIC Key Failed\n"); return 0; } memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); /* XXX delete tx key on failure? */ return ath_hw_set_keycache_entry(common, keyix + 32, hk, addr); } static int ath_reserve_key_cache_slot_tkip(struct ath_common *common) { int i; for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) { if (test_bit(i, common->keymap) || test_bit(i + 64, common->keymap)) continue; /* At least one part of TKIP key allocated */ if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) && (test_bit(i + 32, common->keymap) || test_bit(i + 64 + 32, common->keymap))) continue; /* At least one part of TKIP key allocated */ /* Found a free slot for a TKIP key */ return i; } return -1; } static int ath_reserve_key_cache_slot(struct ath_common *common, u32 cipher) { int i; if (cipher == WLAN_CIPHER_SUITE_TKIP) return ath_reserve_key_cache_slot_tkip(common); /* First, try to find slots that would not be available for TKIP. */ if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) { for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) { if (!test_bit(i, common->keymap) && (test_bit(i + 32, common->keymap) || test_bit(i + 64, common->keymap) || test_bit(i + 64 + 32, common->keymap))) return i; if (!test_bit(i + 32, common->keymap) && (test_bit(i, common->keymap) || test_bit(i + 64, common->keymap) || test_bit(i + 64 + 32, common->keymap))) return i + 32; if (!test_bit(i + 64, common->keymap) && (test_bit(i , common->keymap) || test_bit(i + 32, common->keymap) || test_bit(i + 64 + 32, common->keymap))) return i + 64; if (!test_bit(i + 64 + 32, common->keymap) && (test_bit(i, common->keymap) || test_bit(i + 32, common->keymap) || test_bit(i + 64, common->keymap))) return i + 64 + 32; } } else { for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) { if (!test_bit(i, common->keymap) && test_bit(i + 64, common->keymap)) return i; if (test_bit(i, common->keymap) && !test_bit(i + 64, common->keymap)) return i + 64; } } /* No partially used TKIP slots, pick any available slot */ for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) { /* Do not allow slots that could be needed for TKIP group keys * to be used. This limitation could be removed if we know that * TKIP will not be used. */ if (i >= 64 && i < 64 + IEEE80211_WEP_NKID) continue; if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) { if (i >= 32 && i < 32 + IEEE80211_WEP_NKID) continue; if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID) continue; } if (!test_bit(i, common->keymap)) return i; /* Found a free slot for a key */ } /* No free slot found */ return -1; } /* * Configure encryption in the HW. */ int ath_key_config(struct ath_common *common, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct ath_keyval hk; const u8 *mac = NULL; u8 gmac[ETH_ALEN]; int ret = 0; int idx; memset(&hk, 0, sizeof(hk)); switch (key->cipher) { case 0: hk.kv_type = ATH_CIPHER_CLR; break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: hk.kv_type = ATH_CIPHER_WEP; break; case WLAN_CIPHER_SUITE_TKIP: hk.kv_type = ATH_CIPHER_TKIP; break; case WLAN_CIPHER_SUITE_CCMP: hk.kv_type = ATH_CIPHER_AES_CCM; break; default: return -EOPNOTSUPP; } hk.kv_len = key->keylen; if (key->keylen) memcpy(hk.kv_val, key->key, key->keylen); if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { switch (vif->type) { case NL80211_IFTYPE_AP: memcpy(gmac, vif->addr, ETH_ALEN); gmac[0] |= 0x01; mac = gmac; idx = ath_reserve_key_cache_slot(common, key->cipher); break; case NL80211_IFTYPE_ADHOC: if (!sta) { idx = key->keyidx; break; } memcpy(gmac, sta->addr, ETH_ALEN); gmac[0] |= 0x01; mac = gmac; idx = ath_reserve_key_cache_slot(common, key->cipher); break; default: idx = key->keyidx; break; } } else if (key->keyidx) { if (WARN_ON(!sta)) return -EOPNOTSUPP; mac = sta->addr; if (vif->type != NL80211_IFTYPE_AP) { /* Only keyidx 0 should be used with unicast key, but * allow this for client mode for now. */ idx = key->keyidx; } else return -EIO; } else { if (WARN_ON(!sta)) return -EOPNOTSUPP; mac = sta->addr; idx = ath_reserve_key_cache_slot(common, key->cipher); } if (idx < 0) return -ENOSPC; /* no free key cache entries */ if (key->cipher == WLAN_CIPHER_SUITE_TKIP) ret = ath_setkey_tkip(common, idx, key->key, &hk, mac, vif->type == NL80211_IFTYPE_AP); else ret = ath_hw_set_keycache_entry(common, idx, &hk, mac); if (!ret) return -EIO; set_bit(idx, common->keymap); if (key->cipher == WLAN_CIPHER_SUITE_CCMP) set_bit(idx, common->ccmp_keymap); if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { set_bit(idx + 64, common->keymap); set_bit(idx, common->tkip_keymap); set_bit(idx + 64, common->tkip_keymap); if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) { set_bit(idx + 32, common->keymap); set_bit(idx + 64 + 32, common->keymap); set_bit(idx + 32, common->tkip_keymap); set_bit(idx + 64 + 32, common->tkip_keymap); } } return idx; } EXPORT_SYMBOL(ath_key_config); /* * Delete Key. */ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key) { ath_hw_keyreset(common, key->hw_key_idx); if (key->hw_key_idx < IEEE80211_WEP_NKID) return; clear_bit(key->hw_key_idx, common->keymap); clear_bit(key->hw_key_idx, common->ccmp_keymap); if (key->cipher != WLAN_CIPHER_SUITE_TKIP) return; clear_bit(key->hw_key_idx + 64, common->keymap); clear_bit(key->hw_key_idx, common->tkip_keymap); clear_bit(key->hw_key_idx + 64, common->tkip_keymap); if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) { ath_hw_keyreset(common, key->hw_key_idx + 32); clear_bit(key->hw_key_idx + 32, common->keymap); clear_bit(key->hw_key_idx + 64 + 32, common->keymap); clear_bit(key->hw_key_idx + 32, common->tkip_keymap); clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap); } } EXPORT_SYMBOL(ath_key_delete);
gpl-2.0
CyanogenMod/android_kernel_samsung_mondrianwifi
sound/soc/samsung/lowland.c
4900
5682
/* * Lowland audio support * * Copyright 2011 Wolfson Microelectronics * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/jack.h> #include <linux/gpio.h> #include <linux/module.h> #include "../codecs/wm5100.h" #include "../codecs/wm9081.h" #define MCLK1_RATE (44100 * 512) #define CLKOUT_RATE (44100 * 256) static int lowland_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_dai *codec_dai = rtd->codec_dai; int ret; ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; return 0; } static struct snd_soc_ops lowland_ops = { .hw_params = lowland_hw_params, }; static struct snd_soc_jack lowland_headset; /* Headset jack detection DAPM pins */ static struct snd_soc_jack_pin lowland_headset_pins[] = { { .pin = "Headphone", .mask = SND_JACK_HEADPHONE | SND_JACK_LINEOUT, }, { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE, }, }; static int lowland_wm5100_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; int ret; ret = snd_soc_codec_set_sysclk(codec, WM5100_CLK_SYSCLK, WM5100_CLKSRC_MCLK1, MCLK1_RATE, SND_SOC_CLOCK_IN); if (ret < 0) { pr_err("Failed to set SYSCLK clock source: %d\n", ret); return ret; } /* Clock OPCLK, used by the other audio components. */ ret = snd_soc_codec_set_sysclk(codec, WM5100_CLK_OPCLK, 0, CLKOUT_RATE, 0); if (ret < 0) { pr_err("Failed to set OPCLK rate: %d\n", ret); return ret; } ret = snd_soc_jack_new(codec, "Headset", SND_JACK_LINEOUT | SND_JACK_HEADSET | SND_JACK_BTN_0, &lowland_headset); if (ret) return ret; ret = snd_soc_jack_add_pins(&lowland_headset, ARRAY_SIZE(lowland_headset_pins), lowland_headset_pins); if (ret) return ret; wm5100_detect(codec, &lowland_headset); return 0; } static struct snd_soc_dai_link lowland_dai[] = { { .name = "CPU", .stream_name = "CPU", .cpu_dai_name = "samsung-i2s.0", .codec_dai_name = "wm5100-aif1", .platform_name = "samsung-audio", .codec_name = "wm5100.1-001a", .ops = &lowland_ops, .init = lowland_wm5100_init, }, { .name = "Baseband", .stream_name = "Baseband", .cpu_dai_name = "wm5100-aif2", .codec_dai_name = "wm1250-ev1", .codec_name = "wm1250-ev1.1-0027", .ops = &lowland_ops, .ignore_suspend = 1, }, }; static int lowland_wm9081_init(struct snd_soc_dapm_context *dapm) { snd_soc_dapm_nc_pin(dapm, "LINEOUT"); /* At any time the WM9081 is active it will have this clock */ return snd_soc_codec_set_sysclk(dapm->codec, WM9081_SYSCLK_MCLK, 0, CLKOUT_RATE, 0); } static struct snd_soc_aux_dev lowland_aux_dev[] = { { .name = "wm9081", .codec_name = "wm9081.1-006c", .init = lowland_wm9081_init, }, }; static struct snd_soc_codec_conf lowland_codec_conf[] = { { .dev_name = "wm9081.1-006c", .name_prefix = "Sub", }, }; static const struct snd_kcontrol_new controls[] = { SOC_DAPM_PIN_SWITCH("Main Speaker"), SOC_DAPM_PIN_SWITCH("Main DMIC"), SOC_DAPM_PIN_SWITCH("Main AMIC"), SOC_DAPM_PIN_SWITCH("WM1250 Input"), SOC_DAPM_PIN_SWITCH("WM1250 Output"), SOC_DAPM_PIN_SWITCH("Headphone"), }; static struct snd_soc_dapm_widget widgets[] = { SND_SOC_DAPM_HP("Headphone", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_SPK("Main Speaker", NULL), SND_SOC_DAPM_MIC("Main AMIC", NULL), SND_SOC_DAPM_MIC("Main DMIC", NULL), }; static struct snd_soc_dapm_route audio_paths[] = { { "Sub IN1", NULL, "HPOUT2L" }, { "Sub IN2", NULL, "HPOUT2R" }, { "Main Speaker", NULL, "Sub SPKN" }, { "Main Speaker", NULL, "Sub SPKP" }, { "Main Speaker", NULL, "SPKDAT1" }, }; static struct snd_soc_card lowland = { .name = "Lowland", .owner = THIS_MODULE, .dai_link = lowland_dai, .num_links = ARRAY_SIZE(lowland_dai), .aux_dev = lowland_aux_dev, .num_aux_devs = ARRAY_SIZE(lowland_aux_dev), .codec_conf = lowland_codec_conf, .num_configs = ARRAY_SIZE(lowland_codec_conf), .controls = controls, .num_controls = ARRAY_SIZE(controls), .dapm_widgets = widgets, .num_dapm_widgets = ARRAY_SIZE(widgets), .dapm_routes = audio_paths, .num_dapm_routes = ARRAY_SIZE(audio_paths), }; static __devinit int lowland_probe(struct platform_device *pdev) { struct snd_soc_card *card = &lowland; int ret; card->dev = &pdev->dev; ret = snd_soc_register_card(card); if (ret) { dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); return ret; } return 0; } static int __devexit lowland_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); snd_soc_unregister_card(card); return 0; } static struct platform_driver lowland_driver = { .driver = { .name = "lowland", .owner = THIS_MODULE, .pm = &snd_soc_pm_ops, }, .probe = lowland_probe, .remove = __devexit_p(lowland_remove), }; module_platform_driver(lowland_driver); MODULE_DESCRIPTION("Lowland audio support"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:lowland");
gpl-2.0
mlehtima/android_kernel_semc_msm7x30
drivers/isdn/capi/kcapi.c
5156
30391
/* $Id: kcapi.c,v 1.1.2.8 2004/03/26 19:57:20 armin Exp $ * * Kernel CAPI 2.0 Module * * Copyright 1999 by Carsten Paeth <calle@calle.de> * Copyright 2002 by Kai Germaschewski <kai@germaschewski.name> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #define AVMB1_COMPAT #include "kcapi.h" #include <linux/module.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/skbuff.h> #include <linux/workqueue.h> #include <linux/capi.h> #include <linux/kernelcapi.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/delay.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/isdn/capicmd.h> #include <linux/isdn/capiutil.h> #ifdef AVMB1_COMPAT #include <linux/b1lli.h> #endif #include <linux/mutex.h> #include <linux/rcupdate.h> static int showcapimsgs = 0; static struct workqueue_struct *kcapi_wq; MODULE_DESCRIPTION("CAPI4Linux: kernel CAPI layer"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); module_param(showcapimsgs, uint, 0); /* ------------------------------------------------------------- */ struct capictr_event { struct work_struct work; unsigned int type; u32 controller; }; /* ------------------------------------------------------------- */ static struct capi_version driver_version = {2, 0, 1, 1 << 4}; static char driver_serial[CAPI_SERIAL_LEN] = "0004711"; static char capi_manufakturer[64] = "AVM Berlin"; #define NCCI2CTRL(ncci) (((ncci) >> 24) & 0x7f) LIST_HEAD(capi_drivers); DEFINE_MUTEX(capi_drivers_lock); struct capi_ctr *capi_controller[CAPI_MAXCONTR]; DEFINE_MUTEX(capi_controller_lock); struct capi20_appl *capi_applications[CAPI_MAXAPPL]; static int ncontrollers; static BLOCKING_NOTIFIER_HEAD(ctr_notifier_list); /* -------- controller ref counting -------------------------------------- */ static inline struct capi_ctr * capi_ctr_get(struct capi_ctr *ctr) { if (!try_module_get(ctr->owner)) return NULL; return ctr; } static inline void capi_ctr_put(struct capi_ctr *ctr) { module_put(ctr->owner); } /* ------------------------------------------------------------- */ static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr) { if (contr - 1 >= CAPI_MAXCONTR) return NULL; return capi_controller[contr - 1]; } static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid) { lockdep_assert_held(&capi_controller_lock); if (applid - 1 >= CAPI_MAXAPPL) return NULL; return capi_applications[applid - 1]; } static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid) { if (applid - 1 >= CAPI_MAXAPPL) return NULL; return rcu_dereference(capi_applications[applid - 1]); } /* -------- util functions ------------------------------------ */ static inline int capi_cmd_valid(u8 cmd) { switch (cmd) { case CAPI_ALERT: case CAPI_CONNECT: case CAPI_CONNECT_ACTIVE: case CAPI_CONNECT_B3_ACTIVE: case CAPI_CONNECT_B3: case CAPI_CONNECT_B3_T90_ACTIVE: case CAPI_DATA_B3: case CAPI_DISCONNECT_B3: case CAPI_DISCONNECT: case CAPI_FACILITY: case CAPI_INFO: case CAPI_LISTEN: case CAPI_MANUFACTURER: case CAPI_RESET_B3: case CAPI_SELECT_B_PROTOCOL: return 1; } return 0; } static inline int capi_subcmd_valid(u8 subcmd) { switch (subcmd) { case CAPI_REQ: case CAPI_CONF: case CAPI_IND: case CAPI_RESP: return 1; } return 0; } /* ------------------------------------------------------------ */ static void register_appl(struct capi_ctr *ctr, u16 applid, capi_register_params *rparam) { ctr = capi_ctr_get(ctr); if (ctr) ctr->register_appl(ctr, applid, rparam); else printk(KERN_WARNING "%s: cannot get controller resources\n", __func__); } static void release_appl(struct capi_ctr *ctr, u16 applid) { DBG("applid %#x", applid); ctr->release_appl(ctr, applid); capi_ctr_put(ctr); } static void notify_up(u32 contr) { struct capi20_appl *ap; struct capi_ctr *ctr; u16 applid; mutex_lock(&capi_controller_lock); if (showcapimsgs & 1) printk(KERN_DEBUG "kcapi: notify up contr %d\n", contr); ctr = get_capi_ctr_by_nr(contr); if (ctr) { if (ctr->state == CAPI_CTR_RUNNING) goto unlock_out; ctr->state = CAPI_CTR_RUNNING; for (applid = 1; applid <= CAPI_MAXAPPL; applid++) { ap = __get_capi_appl_by_nr(applid); if (ap) register_appl(ctr, applid, &ap->rparam); } wake_up_interruptible_all(&ctr->state_wait_queue); } else printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr); unlock_out: mutex_unlock(&capi_controller_lock); } static void ctr_down(struct capi_ctr *ctr, int new_state) { struct capi20_appl *ap; u16 applid; if (ctr->state == CAPI_CTR_DETECTED || ctr->state == CAPI_CTR_DETACHED) return; ctr->state = new_state; memset(ctr->manu, 0, sizeof(ctr->manu)); memset(&ctr->version, 0, sizeof(ctr->version)); memset(&ctr->profile, 0, sizeof(ctr->profile)); memset(ctr->serial, 0, sizeof(ctr->serial)); for (applid = 1; applid <= CAPI_MAXAPPL; applid++) { ap = __get_capi_appl_by_nr(applid); if (ap) capi_ctr_put(ctr); } wake_up_interruptible_all(&ctr->state_wait_queue); } static void notify_down(u32 contr) { struct capi_ctr *ctr; mutex_lock(&capi_controller_lock); if (showcapimsgs & 1) printk(KERN_DEBUG "kcapi: notify down contr %d\n", contr); ctr = get_capi_ctr_by_nr(contr); if (ctr) ctr_down(ctr, CAPI_CTR_DETECTED); else printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr); mutex_unlock(&capi_controller_lock); } static int notify_handler(struct notifier_block *nb, unsigned long val, void *v) { u32 contr = (long)v; switch (val) { case CAPICTR_UP: notify_up(contr); break; case CAPICTR_DOWN: notify_down(contr); break; } return NOTIFY_OK; } static void do_notify_work(struct work_struct *work) { struct capictr_event *event = container_of(work, struct capictr_event, work); blocking_notifier_call_chain(&ctr_notifier_list, event->type, (void *)(long)event->controller); kfree(event); } /* * The notifier will result in adding/deleteing of devices. Devices can * only removed in user process, not in bh. */ static int notify_push(unsigned int event_type, u32 controller) { struct capictr_event *event = kmalloc(sizeof(*event), GFP_ATOMIC); if (!event) return -ENOMEM; INIT_WORK(&event->work, do_notify_work); event->type = event_type; event->controller = controller; queue_work(kcapi_wq, &event->work); return 0; } int register_capictr_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&ctr_notifier_list, nb); } EXPORT_SYMBOL_GPL(register_capictr_notifier); int unregister_capictr_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&ctr_notifier_list, nb); } EXPORT_SYMBOL_GPL(unregister_capictr_notifier); /* -------- Receiver ------------------------------------------ */ static void recv_handler(struct work_struct *work) { struct sk_buff *skb; struct capi20_appl *ap = container_of(work, struct capi20_appl, recv_work); if ((!ap) || (ap->release_in_progress)) return; mutex_lock(&ap->recv_mtx); while ((skb = skb_dequeue(&ap->recv_queue))) { if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_IND) ap->nrecvdatapkt++; else ap->nrecvctlpkt++; ap->recv_message(ap, skb); } mutex_unlock(&ap->recv_mtx); } /** * capi_ctr_handle_message() - handle incoming CAPI message * @ctr: controller descriptor structure. * @appl: application ID. * @skb: message. * * Called by hardware driver to pass a CAPI message to the application. */ void capi_ctr_handle_message(struct capi_ctr *ctr, u16 appl, struct sk_buff *skb) { struct capi20_appl *ap; int showctl = 0; u8 cmd, subcmd; _cdebbuf *cdb; if (ctr->state != CAPI_CTR_RUNNING) { cdb = capi_message2str(skb->data); if (cdb) { printk(KERN_INFO "kcapi: controller [%03d] not active, got: %s", ctr->cnr, cdb->buf); cdebbuf_free(cdb); } else printk(KERN_INFO "kcapi: controller [%03d] not active, cannot trace\n", ctr->cnr); goto error; } cmd = CAPIMSG_COMMAND(skb->data); subcmd = CAPIMSG_SUBCOMMAND(skb->data); if (cmd == CAPI_DATA_B3 && subcmd == CAPI_IND) { ctr->nrecvdatapkt++; if (ctr->traceflag > 2) showctl |= 2; } else { ctr->nrecvctlpkt++; if (ctr->traceflag) showctl |= 2; } showctl |= (ctr->traceflag & 1); if (showctl & 2) { if (showctl & 1) { printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u\n", ctr->cnr, CAPIMSG_APPID(skb->data), capi_cmd2str(cmd, subcmd), CAPIMSG_LEN(skb->data)); } else { cdb = capi_message2str(skb->data); if (cdb) { printk(KERN_DEBUG "kcapi: got [%03d] %s\n", ctr->cnr, cdb->buf); cdebbuf_free(cdb); } else printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u, cannot trace\n", ctr->cnr, CAPIMSG_APPID(skb->data), capi_cmd2str(cmd, subcmd), CAPIMSG_LEN(skb->data)); } } rcu_read_lock(); ap = get_capi_appl_by_nr(CAPIMSG_APPID(skb->data)); if (!ap) { rcu_read_unlock(); cdb = capi_message2str(skb->data); if (cdb) { printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s)\n", CAPIMSG_APPID(skb->data), cdb->buf); cdebbuf_free(cdb); } else printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s) cannot trace\n", CAPIMSG_APPID(skb->data), capi_cmd2str(cmd, subcmd)); goto error; } skb_queue_tail(&ap->recv_queue, skb); queue_work(kcapi_wq, &ap->recv_work); rcu_read_unlock(); return; error: kfree_skb(skb); } EXPORT_SYMBOL(capi_ctr_handle_message); /** * capi_ctr_ready() - signal CAPI controller ready * @ctr: controller descriptor structure. * * Called by hardware driver to signal that the controller is up and running. */ void capi_ctr_ready(struct capi_ctr *ctr) { printk(KERN_NOTICE "kcapi: controller [%03d] \"%s\" ready.\n", ctr->cnr, ctr->name); notify_push(CAPICTR_UP, ctr->cnr); } EXPORT_SYMBOL(capi_ctr_ready); /** * capi_ctr_down() - signal CAPI controller not ready * @ctr: controller descriptor structure. * * Called by hardware driver to signal that the controller is down and * unavailable for use. */ void capi_ctr_down(struct capi_ctr *ctr) { printk(KERN_NOTICE "kcapi: controller [%03d] down.\n", ctr->cnr); notify_push(CAPICTR_DOWN, ctr->cnr); } EXPORT_SYMBOL(capi_ctr_down); /** * capi_ctr_suspend_output() - suspend controller * @ctr: controller descriptor structure. * * Called by hardware driver to stop data flow. * * Note: The caller is responsible for synchronizing concurrent state changes * as well as invocations of capi_ctr_handle_message. */ void capi_ctr_suspend_output(struct capi_ctr *ctr) { if (!ctr->blocked) { printk(KERN_DEBUG "kcapi: controller [%03d] suspend\n", ctr->cnr); ctr->blocked = 1; } } EXPORT_SYMBOL(capi_ctr_suspend_output); /** * capi_ctr_resume_output() - resume controller * @ctr: controller descriptor structure. * * Called by hardware driver to resume data flow. * * Note: The caller is responsible for synchronizing concurrent state changes * as well as invocations of capi_ctr_handle_message. */ void capi_ctr_resume_output(struct capi_ctr *ctr) { if (ctr->blocked) { printk(KERN_DEBUG "kcapi: controller [%03d] resumed\n", ctr->cnr); ctr->blocked = 0; } } EXPORT_SYMBOL(capi_ctr_resume_output); /* ------------------------------------------------------------- */ /** * attach_capi_ctr() - register CAPI controller * @ctr: controller descriptor structure. * * Called by hardware driver to register a controller with the CAPI subsystem. * Return value: 0 on success, error code < 0 on error */ int attach_capi_ctr(struct capi_ctr *ctr) { int i; mutex_lock(&capi_controller_lock); for (i = 0; i < CAPI_MAXCONTR; i++) { if (!capi_controller[i]) break; } if (i == CAPI_MAXCONTR) { mutex_unlock(&capi_controller_lock); printk(KERN_ERR "kcapi: out of controller slots\n"); return -EBUSY; } capi_controller[i] = ctr; ctr->nrecvctlpkt = 0; ctr->nrecvdatapkt = 0; ctr->nsentctlpkt = 0; ctr->nsentdatapkt = 0; ctr->cnr = i + 1; ctr->state = CAPI_CTR_DETECTED; ctr->blocked = 0; ctr->traceflag = showcapimsgs; init_waitqueue_head(&ctr->state_wait_queue); sprintf(ctr->procfn, "capi/controllers/%d", ctr->cnr); ctr->procent = proc_create_data(ctr->procfn, 0, NULL, ctr->proc_fops, ctr); ncontrollers++; mutex_unlock(&capi_controller_lock); printk(KERN_NOTICE "kcapi: controller [%03d]: %s attached\n", ctr->cnr, ctr->name); return 0; } EXPORT_SYMBOL(attach_capi_ctr); /** * detach_capi_ctr() - unregister CAPI controller * @ctr: controller descriptor structure. * * Called by hardware driver to remove the registration of a controller * with the CAPI subsystem. * Return value: 0 on success, error code < 0 on error */ int detach_capi_ctr(struct capi_ctr *ctr) { int err = 0; mutex_lock(&capi_controller_lock); ctr_down(ctr, CAPI_CTR_DETACHED); if (capi_controller[ctr->cnr - 1] != ctr) { err = -EINVAL; goto unlock_out; } capi_controller[ctr->cnr - 1] = NULL; ncontrollers--; if (ctr->procent) remove_proc_entry(ctr->procfn, NULL); printk(KERN_NOTICE "kcapi: controller [%03d]: %s unregistered\n", ctr->cnr, ctr->name); unlock_out: mutex_unlock(&capi_controller_lock); return err; } EXPORT_SYMBOL(detach_capi_ctr); /** * register_capi_driver() - register CAPI driver * @driver: driver descriptor structure. * * Called by hardware driver to register itself with the CAPI subsystem. */ void register_capi_driver(struct capi_driver *driver) { mutex_lock(&capi_drivers_lock); list_add_tail(&driver->list, &capi_drivers); mutex_unlock(&capi_drivers_lock); } EXPORT_SYMBOL(register_capi_driver); /** * unregister_capi_driver() - unregister CAPI driver * @driver: driver descriptor structure. * * Called by hardware driver to unregister itself from the CAPI subsystem. */ void unregister_capi_driver(struct capi_driver *driver) { mutex_lock(&capi_drivers_lock); list_del(&driver->list); mutex_unlock(&capi_drivers_lock); } EXPORT_SYMBOL(unregister_capi_driver); /* ------------------------------------------------------------- */ /* -------- CAPI2.0 Interface ---------------------------------- */ /* ------------------------------------------------------------- */ /** * capi20_isinstalled() - CAPI 2.0 operation CAPI_INSTALLED * * Return value: CAPI result code (CAPI_NOERROR if at least one ISDN controller * is ready for use, CAPI_REGNOTINSTALLED otherwise) */ u16 capi20_isinstalled(void) { u16 ret = CAPI_REGNOTINSTALLED; int i; mutex_lock(&capi_controller_lock); for (i = 0; i < CAPI_MAXCONTR; i++) if (capi_controller[i] && capi_controller[i]->state == CAPI_CTR_RUNNING) { ret = CAPI_NOERROR; break; } mutex_unlock(&capi_controller_lock); return ret; } EXPORT_SYMBOL(capi20_isinstalled); /** * capi20_register() - CAPI 2.0 operation CAPI_REGISTER * @ap: CAPI application descriptor structure. * * Register an application's presence with CAPI. * A unique application ID is assigned and stored in @ap->applid. * After this function returns successfully, the message receive * callback function @ap->recv_message() may be called at any time * until capi20_release() has been called for the same @ap. * Return value: CAPI result code */ u16 capi20_register(struct capi20_appl *ap) { int i; u16 applid; DBG(""); if (ap->rparam.datablklen < 128) return CAPI_LOGBLKSIZETOSMALL; ap->nrecvctlpkt = 0; ap->nrecvdatapkt = 0; ap->nsentctlpkt = 0; ap->nsentdatapkt = 0; mutex_init(&ap->recv_mtx); skb_queue_head_init(&ap->recv_queue); INIT_WORK(&ap->recv_work, recv_handler); ap->release_in_progress = 0; mutex_lock(&capi_controller_lock); for (applid = 1; applid <= CAPI_MAXAPPL; applid++) { if (capi_applications[applid - 1] == NULL) break; } if (applid > CAPI_MAXAPPL) { mutex_unlock(&capi_controller_lock); return CAPI_TOOMANYAPPLS; } ap->applid = applid; capi_applications[applid - 1] = ap; for (i = 0; i < CAPI_MAXCONTR; i++) { if (!capi_controller[i] || capi_controller[i]->state != CAPI_CTR_RUNNING) continue; register_appl(capi_controller[i], applid, &ap->rparam); } mutex_unlock(&capi_controller_lock); if (showcapimsgs & 1) { printk(KERN_DEBUG "kcapi: appl %d up\n", applid); } return CAPI_NOERROR; } EXPORT_SYMBOL(capi20_register); /** * capi20_release() - CAPI 2.0 operation CAPI_RELEASE * @ap: CAPI application descriptor structure. * * Terminate an application's registration with CAPI. * After this function returns successfully, the message receive * callback function @ap->recv_message() will no longer be called. * Return value: CAPI result code */ u16 capi20_release(struct capi20_appl *ap) { int i; DBG("applid %#x", ap->applid); mutex_lock(&capi_controller_lock); ap->release_in_progress = 1; capi_applications[ap->applid - 1] = NULL; synchronize_rcu(); for (i = 0; i < CAPI_MAXCONTR; i++) { if (!capi_controller[i] || capi_controller[i]->state != CAPI_CTR_RUNNING) continue; release_appl(capi_controller[i], ap->applid); } mutex_unlock(&capi_controller_lock); flush_workqueue(kcapi_wq); skb_queue_purge(&ap->recv_queue); if (showcapimsgs & 1) { printk(KERN_DEBUG "kcapi: appl %d down\n", ap->applid); } return CAPI_NOERROR; } EXPORT_SYMBOL(capi20_release); /** * capi20_put_message() - CAPI 2.0 operation CAPI_PUT_MESSAGE * @ap: CAPI application descriptor structure. * @skb: CAPI message. * * Transfer a single message to CAPI. * Return value: CAPI result code */ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb) { struct capi_ctr *ctr; int showctl = 0; u8 cmd, subcmd; DBG("applid %#x", ap->applid); if (ncontrollers == 0) return CAPI_REGNOTINSTALLED; if ((ap->applid == 0) || ap->release_in_progress) return CAPI_ILLAPPNR; if (skb->len < 12 || !capi_cmd_valid(CAPIMSG_COMMAND(skb->data)) || !capi_subcmd_valid(CAPIMSG_SUBCOMMAND(skb->data))) return CAPI_ILLCMDORSUBCMDORMSGTOSMALL; /* * The controller reference is protected by the existence of the * application passed to us. We assume that the caller properly * synchronizes this service with capi20_release. */ ctr = get_capi_ctr_by_nr(CAPIMSG_CONTROLLER(skb->data)); if (!ctr || ctr->state != CAPI_CTR_RUNNING) return CAPI_REGNOTINSTALLED; if (ctr->blocked) return CAPI_SENDQUEUEFULL; cmd = CAPIMSG_COMMAND(skb->data); subcmd = CAPIMSG_SUBCOMMAND(skb->data); if (cmd == CAPI_DATA_B3 && subcmd == CAPI_REQ) { ctr->nsentdatapkt++; ap->nsentdatapkt++; if (ctr->traceflag > 2) showctl |= 2; } else { ctr->nsentctlpkt++; ap->nsentctlpkt++; if (ctr->traceflag) showctl |= 2; } showctl |= (ctr->traceflag & 1); if (showctl & 2) { if (showctl & 1) { printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u\n", CAPIMSG_CONTROLLER(skb->data), CAPIMSG_APPID(skb->data), capi_cmd2str(cmd, subcmd), CAPIMSG_LEN(skb->data)); } else { _cdebbuf *cdb = capi_message2str(skb->data); if (cdb) { printk(KERN_DEBUG "kcapi: put [%03d] %s\n", CAPIMSG_CONTROLLER(skb->data), cdb->buf); cdebbuf_free(cdb); } else printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u cannot trace\n", CAPIMSG_CONTROLLER(skb->data), CAPIMSG_APPID(skb->data), capi_cmd2str(cmd, subcmd), CAPIMSG_LEN(skb->data)); } } return ctr->send_message(ctr, skb); } EXPORT_SYMBOL(capi20_put_message); /** * capi20_get_manufacturer() - CAPI 2.0 operation CAPI_GET_MANUFACTURER * @contr: controller number. * @buf: result buffer (64 bytes). * * Retrieve information about the manufacturer of the specified ISDN controller * or (for @contr == 0) the driver itself. * Return value: CAPI result code */ u16 capi20_get_manufacturer(u32 contr, u8 *buf) { struct capi_ctr *ctr; u16 ret; if (contr == 0) { strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN); return CAPI_NOERROR; } mutex_lock(&capi_controller_lock); ctr = get_capi_ctr_by_nr(contr); if (ctr && ctr->state == CAPI_CTR_RUNNING) { strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN); ret = CAPI_NOERROR; } else ret = CAPI_REGNOTINSTALLED; mutex_unlock(&capi_controller_lock); return ret; } EXPORT_SYMBOL(capi20_get_manufacturer); /** * capi20_get_version() - CAPI 2.0 operation CAPI_GET_VERSION * @contr: controller number. * @verp: result structure. * * Retrieve version information for the specified ISDN controller * or (for @contr == 0) the driver itself. * Return value: CAPI result code */ u16 capi20_get_version(u32 contr, struct capi_version *verp) { struct capi_ctr *ctr; u16 ret; if (contr == 0) { *verp = driver_version; return CAPI_NOERROR; } mutex_lock(&capi_controller_lock); ctr = get_capi_ctr_by_nr(contr); if (ctr && ctr->state == CAPI_CTR_RUNNING) { memcpy(verp, &ctr->version, sizeof(capi_version)); ret = CAPI_NOERROR; } else ret = CAPI_REGNOTINSTALLED; mutex_unlock(&capi_controller_lock); return ret; } EXPORT_SYMBOL(capi20_get_version); /** * capi20_get_serial() - CAPI 2.0 operation CAPI_GET_SERIAL_NUMBER * @contr: controller number. * @serial: result buffer (8 bytes). * * Retrieve the serial number of the specified ISDN controller * or (for @contr == 0) the driver itself. * Return value: CAPI result code */ u16 capi20_get_serial(u32 contr, u8 *serial) { struct capi_ctr *ctr; u16 ret; if (contr == 0) { strlcpy(serial, driver_serial, CAPI_SERIAL_LEN); return CAPI_NOERROR; } mutex_lock(&capi_controller_lock); ctr = get_capi_ctr_by_nr(contr); if (ctr && ctr->state == CAPI_CTR_RUNNING) { strlcpy(serial, ctr->serial, CAPI_SERIAL_LEN); ret = CAPI_NOERROR; } else ret = CAPI_REGNOTINSTALLED; mutex_unlock(&capi_controller_lock); return ret; } EXPORT_SYMBOL(capi20_get_serial); /** * capi20_get_profile() - CAPI 2.0 operation CAPI_GET_PROFILE * @contr: controller number. * @profp: result structure. * * Retrieve capability information for the specified ISDN controller * or (for @contr == 0) the number of installed controllers. * Return value: CAPI result code */ u16 capi20_get_profile(u32 contr, struct capi_profile *profp) { struct capi_ctr *ctr; u16 ret; if (contr == 0) { profp->ncontroller = ncontrollers; return CAPI_NOERROR; } mutex_lock(&capi_controller_lock); ctr = get_capi_ctr_by_nr(contr); if (ctr && ctr->state == CAPI_CTR_RUNNING) { memcpy(profp, &ctr->profile, sizeof(struct capi_profile)); ret = CAPI_NOERROR; } else ret = CAPI_REGNOTINSTALLED; mutex_unlock(&capi_controller_lock); return ret; } EXPORT_SYMBOL(capi20_get_profile); /* Must be called with capi_controller_lock held. */ static int wait_on_ctr_state(struct capi_ctr *ctr, unsigned int state) { DEFINE_WAIT(wait); int retval = 0; ctr = capi_ctr_get(ctr); if (!ctr) return -ESRCH; for (;;) { prepare_to_wait(&ctr->state_wait_queue, &wait, TASK_INTERRUPTIBLE); if (ctr->state == state) break; if (ctr->state == CAPI_CTR_DETACHED) { retval = -ESRCH; break; } if (signal_pending(current)) { retval = -EINTR; break; } mutex_unlock(&capi_controller_lock); schedule(); mutex_lock(&capi_controller_lock); } finish_wait(&ctr->state_wait_queue, &wait); capi_ctr_put(ctr); return retval; } #ifdef AVMB1_COMPAT static int old_capi_manufacturer(unsigned int cmd, void __user *data) { avmb1_loadandconfigdef ldef; avmb1_extcarddef cdef; avmb1_resetdef rdef; capicardparams cparams; struct capi_ctr *ctr; struct capi_driver *driver = NULL; capiloaddata ldata; struct list_head *l; int retval; switch (cmd) { case AVMB1_ADDCARD: case AVMB1_ADDCARD_WITH_TYPE: if (cmd == AVMB1_ADDCARD) { if ((retval = copy_from_user(&cdef, data, sizeof(avmb1_carddef)))) return -EFAULT; cdef.cardtype = AVM_CARDTYPE_B1; } else { if ((retval = copy_from_user(&cdef, data, sizeof(avmb1_extcarddef)))) return -EFAULT; } cparams.port = cdef.port; cparams.irq = cdef.irq; cparams.cardnr = cdef.cardnr; mutex_lock(&capi_drivers_lock); switch (cdef.cardtype) { case AVM_CARDTYPE_B1: list_for_each(l, &capi_drivers) { driver = list_entry(l, struct capi_driver, list); if (strcmp(driver->name, "b1isa") == 0) break; } break; case AVM_CARDTYPE_T1: list_for_each(l, &capi_drivers) { driver = list_entry(l, struct capi_driver, list); if (strcmp(driver->name, "t1isa") == 0) break; } break; default: driver = NULL; break; } if (!driver) { printk(KERN_ERR "kcapi: driver not loaded.\n"); retval = -EIO; } else if (!driver->add_card) { printk(KERN_ERR "kcapi: driver has no add card function.\n"); retval = -EIO; } else retval = driver->add_card(driver, &cparams); mutex_unlock(&capi_drivers_lock); return retval; case AVMB1_LOAD: case AVMB1_LOAD_AND_CONFIG: if (cmd == AVMB1_LOAD) { if (copy_from_user(&ldef, data, sizeof(avmb1_loaddef))) return -EFAULT; ldef.t4config.len = 0; ldef.t4config.data = NULL; } else { if (copy_from_user(&ldef, data, sizeof(avmb1_loadandconfigdef))) return -EFAULT; } mutex_lock(&capi_controller_lock); ctr = get_capi_ctr_by_nr(ldef.contr); if (!ctr) { retval = -EINVAL; goto load_unlock_out; } if (ctr->load_firmware == NULL) { printk(KERN_DEBUG "kcapi: load: no load function\n"); retval = -ESRCH; goto load_unlock_out; } if (ldef.t4file.len <= 0) { printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len); retval = -EINVAL; goto load_unlock_out; } if (ldef.t4file.data == NULL) { printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n"); retval = -EINVAL; goto load_unlock_out; } ldata.firmware.user = 1; ldata.firmware.data = ldef.t4file.data; ldata.firmware.len = ldef.t4file.len; ldata.configuration.user = 1; ldata.configuration.data = ldef.t4config.data; ldata.configuration.len = ldef.t4config.len; if (ctr->state != CAPI_CTR_DETECTED) { printk(KERN_INFO "kcapi: load: contr=%d not in detect state\n", ldef.contr); retval = -EBUSY; goto load_unlock_out; } ctr->state = CAPI_CTR_LOADING; retval = ctr->load_firmware(ctr, &ldata); if (retval) { ctr->state = CAPI_CTR_DETECTED; goto load_unlock_out; } retval = wait_on_ctr_state(ctr, CAPI_CTR_RUNNING); load_unlock_out: mutex_unlock(&capi_controller_lock); return retval; case AVMB1_RESETCARD: if (copy_from_user(&rdef, data, sizeof(avmb1_resetdef))) return -EFAULT; retval = 0; mutex_lock(&capi_controller_lock); ctr = get_capi_ctr_by_nr(rdef.contr); if (!ctr) { retval = -ESRCH; goto reset_unlock_out; } if (ctr->state == CAPI_CTR_DETECTED) goto reset_unlock_out; if (ctr->reset_ctr == NULL) { printk(KERN_DEBUG "kcapi: reset: no reset function\n"); retval = -ESRCH; goto reset_unlock_out; } ctr->reset_ctr(ctr); retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED); reset_unlock_out: mutex_unlock(&capi_controller_lock); return retval; } return -EINVAL; } #endif /** * capi20_manufacturer() - CAPI 2.0 operation CAPI_MANUFACTURER * @cmd: command. * @data: parameter. * * Perform manufacturer specific command. * Return value: CAPI result code */ int capi20_manufacturer(unsigned int cmd, void __user *data) { struct capi_ctr *ctr; int retval; switch (cmd) { #ifdef AVMB1_COMPAT case AVMB1_LOAD: case AVMB1_LOAD_AND_CONFIG: case AVMB1_RESETCARD: case AVMB1_GET_CARDINFO: case AVMB1_REMOVECARD: return old_capi_manufacturer(cmd, data); #endif case KCAPI_CMD_TRACE: { kcapi_flagdef fdef; if (copy_from_user(&fdef, data, sizeof(kcapi_flagdef))) return -EFAULT; mutex_lock(&capi_controller_lock); ctr = get_capi_ctr_by_nr(fdef.contr); if (ctr) { ctr->traceflag = fdef.flag; printk(KERN_INFO "kcapi: contr [%03d] set trace=%d\n", ctr->cnr, ctr->traceflag); retval = 0; } else retval = -ESRCH; mutex_unlock(&capi_controller_lock); return retval; } case KCAPI_CMD_ADDCARD: { struct list_head *l; struct capi_driver *driver = NULL; capicardparams cparams; kcapi_carddef cdef; if ((retval = copy_from_user(&cdef, data, sizeof(cdef)))) return -EFAULT; cparams.port = cdef.port; cparams.irq = cdef.irq; cparams.membase = cdef.membase; cparams.cardnr = cdef.cardnr; cparams.cardtype = 0; cdef.driver[sizeof(cdef.driver) - 1] = 0; mutex_lock(&capi_drivers_lock); list_for_each(l, &capi_drivers) { driver = list_entry(l, struct capi_driver, list); if (strcmp(driver->name, cdef.driver) == 0) break; } if (driver == NULL) { printk(KERN_ERR "kcapi: driver \"%s\" not loaded.\n", cdef.driver); retval = -ESRCH; } else if (!driver->add_card) { printk(KERN_ERR "kcapi: driver \"%s\" has no add card function.\n", cdef.driver); retval = -EIO; } else retval = driver->add_card(driver, &cparams); mutex_unlock(&capi_drivers_lock); return retval; } default: printk(KERN_ERR "kcapi: manufacturer command %d unknown.\n", cmd); break; } return -EINVAL; } EXPORT_SYMBOL(capi20_manufacturer); /* ------------------------------------------------------------- */ /* -------- Init & Cleanup ------------------------------------- */ /* ------------------------------------------------------------- */ /* * init / exit functions */ static struct notifier_block capictr_nb = { .notifier_call = notify_handler, .priority = INT_MAX, }; static int __init kcapi_init(void) { int err; kcapi_wq = alloc_workqueue("kcapi", 0, 0); if (!kcapi_wq) return -ENOMEM; register_capictr_notifier(&capictr_nb); err = cdebug_init(); if (err) { unregister_capictr_notifier(&capictr_nb); destroy_workqueue(kcapi_wq); return err; } kcapi_proc_init(); return 0; } static void __exit kcapi_exit(void) { kcapi_proc_exit(); unregister_capictr_notifier(&capictr_nb); cdebug_exit(); destroy_workqueue(kcapi_wq); } module_init(kcapi_init); module_exit(kcapi_exit);
gpl-2.0
itsmerajit/Samsung_j2
arch/x86/platform/geode/alix.c
7204
4770
/* * System Specific setup for PCEngines ALIX. * At the moment this means setup of GPIO control of LEDs * on Alix.2/3/6 boards. * * * Copyright (C) 2008 Constantin Baranov <const@mimas.ru> * Copyright (C) 2011 Ed Wildgoose <kernel@wildgooses.com> * and Philip Prindeville <philipp@redfish-solutions.com> * * TODO: There are large similarities with leds-net5501.c * by Alessandro Zummo <a.zummo@towertech.it> * In the future leds-net5501.c should be migrated over to platform * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/string.h> #include <linux/module.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/dmi.h> #include <asm/geode.h> #define BIOS_SIGNATURE_TINYBIOS 0xf0000 #define BIOS_SIGNATURE_COREBOOT 0x500 #define BIOS_REGION_SIZE 0x10000 static bool force = 0; module_param(force, bool, 0444); /* FIXME: Award bios is not automatically detected as Alix platform */ MODULE_PARM_DESC(force, "Force detection as ALIX.2/ALIX.3 platform"); static struct gpio_keys_button alix_gpio_buttons[] = { { .code = KEY_RESTART, .gpio = 24, .active_low = 1, .desc = "Reset button", .type = EV_KEY, .wakeup = 0, .debounce_interval = 100, .can_disable = 0, } }; static struct gpio_keys_platform_data alix_buttons_data = { .buttons = alix_gpio_buttons, .nbuttons = ARRAY_SIZE(alix_gpio_buttons), .poll_interval = 20, }; static struct platform_device alix_buttons_dev = { .name = "gpio-keys-polled", .id = 1, .dev = { .platform_data = &alix_buttons_data, } }; static struct gpio_led alix_leds[] = { { .name = "alix:1", .gpio = 6, .default_trigger = "default-on", .active_low = 1, }, { .name = "alix:2", .gpio = 25, .default_trigger = "default-off", .active_low = 1, }, { .name = "alix:3", .gpio = 27, .default_trigger = "default-off", .active_low = 1, }, }; static struct gpio_led_platform_data alix_leds_data = { .num_leds = ARRAY_SIZE(alix_leds), .leds = alix_leds, }; static struct platform_device alix_leds_dev = { .name = "leds-gpio", .id = -1, .dev.platform_data = &alix_leds_data, }; static struct __initdata platform_device *alix_devs[] = { &alix_buttons_dev, &alix_leds_dev, }; static void __init register_alix(void) { /* Setup LED control through leds-gpio driver */ platform_add_devices(alix_devs, ARRAY_SIZE(alix_devs)); } static bool __init alix_present(unsigned long bios_phys, const char *alix_sig, size_t alix_sig_len) { const size_t bios_len = BIOS_REGION_SIZE; const char *bios_virt; const char *scan_end; const char *p; char name[64]; if (force) { printk(KERN_NOTICE "%s: forced to skip BIOS test, " "assume system is ALIX.2/ALIX.3\n", KBUILD_MODNAME); return true; } bios_virt = phys_to_virt(bios_phys); scan_end = bios_virt + bios_len - (alix_sig_len + 2); for (p = bios_virt; p < scan_end; p++) { const char *tail; char *a; if (memcmp(p, alix_sig, alix_sig_len) != 0) continue; memcpy(name, p, sizeof(name)); /* remove the first \0 character from string */ a = strchr(name, '\0'); if (a) *a = ' '; /* cut the string at a newline */ a = strchr(name, '\r'); if (a) *a = '\0'; tail = p + alix_sig_len; if ((tail[0] == '2' || tail[0] == '3' || tail[0] == '6')) { printk(KERN_INFO "%s: system is recognized as \"%s\"\n", KBUILD_MODNAME, name); return true; } } return false; } static bool __init alix_present_dmi(void) { const char *vendor, *product; vendor = dmi_get_system_info(DMI_SYS_VENDOR); if (!vendor || strcmp(vendor, "PC Engines")) return false; product = dmi_get_system_info(DMI_PRODUCT_NAME); if (!product || (strcmp(product, "ALIX.2D") && strcmp(product, "ALIX.6"))) return false; printk(KERN_INFO "%s: system is recognized as \"%s %s\"\n", KBUILD_MODNAME, vendor, product); return true; } static int __init alix_init(void) { const char tinybios_sig[] = "PC Engines ALIX."; const char coreboot_sig[] = "PC Engines\0ALIX."; if (!is_geode()) return 0; if (alix_present(BIOS_SIGNATURE_TINYBIOS, tinybios_sig, sizeof(tinybios_sig) - 1) || alix_present(BIOS_SIGNATURE_COREBOOT, coreboot_sig, sizeof(coreboot_sig) - 1) || alix_present_dmi()) register_alix(); return 0; } module_init(alix_init); MODULE_AUTHOR("Ed Wildgoose <kernel@wildgooses.com>"); MODULE_DESCRIPTION("PCEngines ALIX System Setup"); MODULE_LICENSE("GPL");
gpl-2.0
SlimRoms/kernel_samsung_d2
arch/mips/loongson/common/cs5536/cs5536_pci.c
8740
2293
/* * read/write operation to the PCI config space of CS5536 * * Copyright (C) 2007 Lemote, Inc. * Author : jlliu, liujl@lemote.com * * Copyright (C) 2009 Lemote, Inc. * Author: Wu Zhangjin, wuzhangjin@gmail.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * the Virtual Support Module(VSM) for virtulizing the PCI * configure space are defined in cs5536_modulename.c respectively, * * after this virtulizing, user can access the PCI configure space * directly as a normal multi-function PCI device which follows * the PCI-2.2 spec. */ #include <linux/types.h> #include <cs5536/cs5536_vsm.h> enum { CS5536_FUNC_START = -1, CS5536_ISA_FUNC, reserved_func, CS5536_IDE_FUNC, CS5536_ACC_FUNC, CS5536_OHCI_FUNC, CS5536_EHCI_FUNC, CS5536_FUNC_END, }; static const cs5536_pci_vsm_write vsm_conf_write[] = { [CS5536_ISA_FUNC] pci_isa_write_reg, [reserved_func] NULL, [CS5536_IDE_FUNC] pci_ide_write_reg, [CS5536_ACC_FUNC] pci_acc_write_reg, [CS5536_OHCI_FUNC] pci_ohci_write_reg, [CS5536_EHCI_FUNC] pci_ehci_write_reg, }; static const cs5536_pci_vsm_read vsm_conf_read[] = { [CS5536_ISA_FUNC] pci_isa_read_reg, [reserved_func] NULL, [CS5536_IDE_FUNC] pci_ide_read_reg, [CS5536_ACC_FUNC] pci_acc_read_reg, [CS5536_OHCI_FUNC] pci_ohci_read_reg, [CS5536_EHCI_FUNC] pci_ehci_read_reg, }; /* * write to PCI config space and transfer it to MSR write. */ void cs5536_pci_conf_write4(int function, int reg, u32 value) { if ((function <= CS5536_FUNC_START) || (function >= CS5536_FUNC_END)) return; if ((reg < 0) || (reg > 0x100) || ((reg & 0x03) != 0)) return; if (vsm_conf_write[function] != NULL) vsm_conf_write[function](reg, value); } /* * read PCI config space and transfer it to MSR access. */ u32 cs5536_pci_conf_read4(int function, int reg) { u32 data = 0; if ((function <= CS5536_FUNC_START) || (function >= CS5536_FUNC_END)) return 0; if ((reg < 0) || ((reg & 0x03) != 0)) return 0; if (reg > 0x100) return 0xffffffff; if (vsm_conf_read[function] != NULL) data = vsm_conf_read[function](reg); return data; }
gpl-2.0
SlimSaber/kernel_sony_msm8974
arch/mips/loongson/lemote-2f/machtype.c
8740
1639
/* * Copyright (C) 2009 Lemote Inc. * Author: Wu Zhangjin, wuzhangjin@gmail.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <asm/bootinfo.h> #include <loongson.h> void __init mach_prom_init_machtype(void) { /* We share the same kernel image file among Lemote 2F family * of machines, and provide the machtype= kernel command line * to users to indicate their machine, this command line will * be passed by the latest PMON automatically. and fortunately, * up to now, we can get the machine type from the PMON_VER= * commandline directly except the NAS machine, In the old * machines, this will help the users a lot. * * If no "machtype=" passed, get machine type from "PMON_VER=". * PMON_VER=LM8089 Lemote 8.9'' netbook * LM8101 Lemote 10.1'' netbook * (The above two netbooks have the same kernel support) * LM6XXX Lemote FuLoong(2F) box series * LM9XXX Lemote LynLoong PC series */ if (strstr(arcs_cmdline, "PMON_VER=LM")) { if (strstr(arcs_cmdline, "PMON_VER=LM8")) mips_machtype = MACH_LEMOTE_YL2F89; else if (strstr(arcs_cmdline, "PMON_VER=LM6")) mips_machtype = MACH_LEMOTE_FL2F; else if (strstr(arcs_cmdline, "PMON_VER=LM9")) mips_machtype = MACH_LEMOTE_LL2F; else mips_machtype = MACH_LEMOTE_NAS; strcat(arcs_cmdline, " machtype="); strcat(arcs_cmdline, get_system_type()); strcat(arcs_cmdline, " "); } }
gpl-2.0
sdnfv/onvm-dpdk
drivers/net/e1000/base/e1000_82541.c
37
35967
/******************************************************************************* Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ /* * 82541EI Gigabit Ethernet Controller * 82541ER Gigabit Ethernet Controller * 82541GI Gigabit Ethernet Controller * 82541PI Gigabit Ethernet Controller * 82547EI Gigabit Ethernet Controller * 82547GI Gigabit Ethernet Controller */ #include "e1000_api.h" STATIC s32 e1000_init_phy_params_82541(struct e1000_hw *hw); STATIC s32 e1000_init_nvm_params_82541(struct e1000_hw *hw); STATIC s32 e1000_init_mac_params_82541(struct e1000_hw *hw); STATIC s32 e1000_reset_hw_82541(struct e1000_hw *hw); STATIC s32 e1000_init_hw_82541(struct e1000_hw *hw); STATIC s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed, u16 *duplex); STATIC s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw); STATIC s32 e1000_setup_copper_link_82541(struct e1000_hw *hw); STATIC s32 e1000_check_for_link_82541(struct e1000_hw *hw); STATIC s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw); STATIC s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, bool active); STATIC s32 e1000_setup_led_82541(struct e1000_hw *hw); STATIC s32 e1000_cleanup_led_82541(struct e1000_hw *hw); STATIC void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw); STATIC s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw, bool link_up); STATIC s32 e1000_phy_init_script_82541(struct e1000_hw *hw); STATIC void e1000_power_down_phy_copper_82541(struct e1000_hw *hw); STATIC const u16 e1000_igp_cable_length_table[] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; #define IGP01E1000_AGC_LENGTH_TABLE_SIZE \ (sizeof(e1000_igp_cable_length_table) / \ sizeof(e1000_igp_cable_length_table[0])) /** * e1000_init_phy_params_82541 - Init PHY func ptrs. * @hw: pointer to the HW structure **/ STATIC s32 e1000_init_phy_params_82541(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; DEBUGFUNC("e1000_init_phy_params_82541"); phy->addr = 1; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->reset_delay_us = 10000; phy->type = e1000_phy_igp; /* Function Pointers */ phy->ops.check_polarity = e1000_check_polarity_igp; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; phy->ops.get_cable_length = e1000_get_cable_length_igp_82541; phy->ops.get_cfg_done = e1000_get_cfg_done_generic; phy->ops.get_info = e1000_get_phy_info_igp; phy->ops.read_reg = e1000_read_phy_reg_igp; phy->ops.reset = e1000_phy_hw_reset_82541; phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82541; phy->ops.write_reg = e1000_write_phy_reg_igp; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_82541; ret_val = e1000_get_phy_id(hw); if (ret_val) goto out; /* Verify phy id */ if (phy->id != IGP01E1000_I_PHY_ID) { ret_val = -E1000_ERR_PHY; goto out; } out: return ret_val; } /** * e1000_init_nvm_params_82541 - Init NVM func ptrs. * @hw: pointer to the HW structure **/ STATIC s32 e1000_init_nvm_params_82541(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; s32 ret_val = E1000_SUCCESS; u32 eecd = E1000_READ_REG(hw, E1000_EECD); u16 size; DEBUGFUNC("e1000_init_nvm_params_82541"); switch (nvm->override) { case e1000_nvm_override_spi_large: nvm->type = e1000_nvm_eeprom_spi; eecd |= E1000_EECD_ADDR_BITS; break; case e1000_nvm_override_spi_small: nvm->type = e1000_nvm_eeprom_spi; eecd &= ~E1000_EECD_ADDR_BITS; break; case e1000_nvm_override_microwire_large: nvm->type = e1000_nvm_eeprom_microwire; eecd |= E1000_EECD_SIZE; break; case e1000_nvm_override_microwire_small: nvm->type = e1000_nvm_eeprom_microwire; eecd &= ~E1000_EECD_SIZE; break; default: nvm->type = eecd & E1000_EECD_TYPE ? e1000_nvm_eeprom_spi : e1000_nvm_eeprom_microwire; break; } if (nvm->type == e1000_nvm_eeprom_spi) { nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 16 : 8; nvm->delay_usec = 1; nvm->opcode_bits = 8; nvm->page_size = (eecd & E1000_EECD_ADDR_BITS) ? 32 : 8; /* Function Pointers */ nvm->ops.acquire = e1000_acquire_nvm_generic; nvm->ops.read = e1000_read_nvm_spi; nvm->ops.release = e1000_release_nvm_generic; nvm->ops.update = e1000_update_nvm_checksum_generic; nvm->ops.valid_led_default = e1000_valid_led_default_generic; nvm->ops.validate = e1000_validate_nvm_checksum_generic; nvm->ops.write = e1000_write_nvm_spi; /* * nvm->word_size must be discovered after the pointers * are set so we can verify the size from the nvm image * itself. Temporarily set it to a dummy value so the * read will work. */ nvm->word_size = 64; ret_val = nvm->ops.read(hw, NVM_CFG, 1, &size); if (ret_val) goto out; size = (size & NVM_SIZE_MASK) >> NVM_SIZE_SHIFT; /* * if size != 0, it can be added to a constant and become * the left-shift value to set the word_size. Otherwise, * word_size stays at 64. */ if (size) { size += NVM_WORD_SIZE_BASE_SHIFT_82541; nvm->word_size = 1 << size; } } else { nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 8 : 6; nvm->delay_usec = 50; nvm->opcode_bits = 3; nvm->word_size = (eecd & E1000_EECD_ADDR_BITS) ? 256 : 64; /* Function Pointers */ nvm->ops.acquire = e1000_acquire_nvm_generic; nvm->ops.read = e1000_read_nvm_microwire; nvm->ops.release = e1000_release_nvm_generic; nvm->ops.update = e1000_update_nvm_checksum_generic; nvm->ops.valid_led_default = e1000_valid_led_default_generic; nvm->ops.validate = e1000_validate_nvm_checksum_generic; nvm->ops.write = e1000_write_nvm_microwire; } out: return ret_val; } /** * e1000_init_mac_params_82541 - Init MAC func ptrs. * @hw: pointer to the HW structure **/ STATIC s32 e1000_init_mac_params_82541(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; DEBUGFUNC("e1000_init_mac_params_82541"); /* Set media type */ hw->phy.media_type = e1000_media_type_copper; /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES; /* Set if part includes ASF firmware */ mac->asf_firmware_present = true; /* Function Pointers */ /* bus type/speed/width */ mac->ops.get_bus_info = e1000_get_bus_info_pci_generic; /* function id */ mac->ops.set_lan_id = e1000_set_lan_id_single_port; /* reset */ mac->ops.reset_hw = e1000_reset_hw_82541; /* hw initialization */ mac->ops.init_hw = e1000_init_hw_82541; /* link setup */ mac->ops.setup_link = e1000_setup_link_generic; /* physical interface link setup */ mac->ops.setup_physical_interface = e1000_setup_copper_link_82541; /* check for link */ mac->ops.check_for_link = e1000_check_for_link_82541; /* link info */ mac->ops.get_link_up_info = e1000_get_link_up_info_82541; /* multicast address update */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; /* writing VFTA */ mac->ops.write_vfta = e1000_write_vfta_generic; /* clearing VFTA */ mac->ops.clear_vfta = e1000_clear_vfta_generic; /* ID LED init */ mac->ops.id_led_init = e1000_id_led_init_generic; /* setup LED */ mac->ops.setup_led = e1000_setup_led_82541; /* cleanup LED */ mac->ops.cleanup_led = e1000_cleanup_led_82541; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_generic; mac->ops.led_off = e1000_led_off_generic; /* clear hardware counters */ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82541; return E1000_SUCCESS; } /** * e1000_init_function_pointers_82541 - Init func ptrs. * @hw: pointer to the HW structure * * Called to initialize all function pointers and parameters. **/ void e1000_init_function_pointers_82541(struct e1000_hw *hw) { DEBUGFUNC("e1000_init_function_pointers_82541"); hw->mac.ops.init_params = e1000_init_mac_params_82541; hw->nvm.ops.init_params = e1000_init_nvm_params_82541; hw->phy.ops.init_params = e1000_init_phy_params_82541; } /** * e1000_reset_hw_82541 - Reset hardware * @hw: pointer to the HW structure * * This resets the hardware into a known state. **/ STATIC s32 e1000_reset_hw_82541(struct e1000_hw *hw) { u32 ledctl, ctrl, manc; DEBUGFUNC("e1000_reset_hw_82541"); DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); E1000_WRITE_REG(hw, E1000_RCTL, 0); E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); E1000_WRITE_FLUSH(hw); /* * Delay to allow any outstanding PCI transactions to complete * before resetting the device. */ msec_delay(10); ctrl = E1000_READ_REG(hw, E1000_CTRL); /* Must reset the Phy before resetting the MAC */ if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_PHY_RST)); E1000_WRITE_FLUSH(hw); msec_delay(5); } DEBUGOUT("Issuing a global reset to 82541/82547 MAC\n"); switch (hw->mac.type) { case e1000_82541: case e1000_82541_rev_2: /* * These controllers can't ack the 64-bit write when * issuing the reset, so we use IO-mapping as a * workaround to issue the reset. */ E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); break; default: E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); break; } /* Wait for NVM reload */ msec_delay(20); /* Disable HW ARPs on ASF enabled adapters */ manc = E1000_READ_REG(hw, E1000_MANC); manc &= ~E1000_MANC_ARP_EN; E1000_WRITE_REG(hw, E1000_MANC, manc); if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { e1000_phy_init_script_82541(hw); /* Configure activity LED after Phy reset */ ledctl = E1000_READ_REG(hw, E1000_LEDCTL); ledctl &= IGP_ACTIVITY_LED_MASK; ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); } /* Once again, mask the interrupts */ DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); /* Clear any pending interrupt events. */ E1000_READ_REG(hw, E1000_ICR); return E1000_SUCCESS; } /** * e1000_init_hw_82541 - Initialize hardware * @hw: pointer to the HW structure * * This inits the hardware readying it for operation. **/ STATIC s32 e1000_init_hw_82541(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; u32 i, txdctl; s32 ret_val; DEBUGFUNC("e1000_init_hw_82541"); /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); if (ret_val) { DEBUGOUT("Error initializing identification LED\n"); /* This is not fatal and we should not stop init due to this */ } /* Storing the Speed Power Down value for later use */ ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO, &dev_spec->spd_default); if (ret_val) goto out; /* Disabling VLAN filtering */ DEBUGOUT("Initializing the IEEE VLAN\n"); mac->ops.clear_vfta(hw); /* Setup the receive address. */ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); /* Zero out the Multicast HASH table */ DEBUGOUT("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) { E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* * Avoid back to back register writes by adding the register * read (flush). This is to protect against some strange * bridge configurations that may issue Memory Write Block * (MWB) to our register space. */ E1000_WRITE_FLUSH(hw); } /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); /* * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_82541(hw); out: return ret_val; } /** * e1000_get_link_up_info_82541 - Report speed and duplex * @hw: pointer to the HW structure * @speed: pointer to speed buffer * @duplex: pointer to duplex buffer * * Retrieve the current speed and duplex configuration. **/ STATIC s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed, u16 *duplex) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; DEBUGFUNC("e1000_get_link_up_info_82541"); ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); if (ret_val) goto out; if (!phy->speed_downgraded) goto out; /* * IGP01 PHY may advertise full duplex operation after speed * downgrade even if it is operating at half duplex. * Here we set the duplex settings to match the duplex in the * link partner's capabilities. */ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_EXP, &data); if (ret_val) goto out; if (!(data & NWAY_ER_LP_NWAY_CAPS)) { *duplex = HALF_DUPLEX; } else { ret_val = phy->ops.read_reg(hw, PHY_LP_ABILITY, &data); if (ret_val) goto out; if (*speed == SPEED_100) { if (!(data & NWAY_LPAR_100TX_FD_CAPS)) *duplex = HALF_DUPLEX; } else if (*speed == SPEED_10) { if (!(data & NWAY_LPAR_10T_FD_CAPS)) *duplex = HALF_DUPLEX; } } out: return ret_val; } /** * e1000_phy_hw_reset_82541 - PHY hardware reset * @hw: pointer to the HW structure * * Verify the reset block is not blocking us from resetting. Acquire * semaphore (if necessary) and read/set/write the device control reset * bit in the PHY. Wait the appropriate delay time for the device to * reset and release the semaphore (if necessary). **/ STATIC s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw) { s32 ret_val; u32 ledctl; DEBUGFUNC("e1000_phy_hw_reset_82541"); ret_val = e1000_phy_hw_reset_generic(hw); if (ret_val) goto out; e1000_phy_init_script_82541(hw); if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { /* Configure activity LED after PHY reset */ ledctl = E1000_READ_REG(hw, E1000_LEDCTL); ledctl &= IGP_ACTIVITY_LED_MASK; ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); } out: return ret_val; } /** * e1000_setup_copper_link_82541 - Configure copper link settings * @hw: pointer to the HW structure * * Calls the appropriate function to configure the link for auto-neg or forced * speed and duplex. Then we check for link, once link is established calls * to configure collision distance and flow control are called. If link is * not established, we return -E1000_ERR_PHY (-2). **/ STATIC s32 e1000_setup_copper_link_82541(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; s32 ret_val; u32 ctrl, ledctl; DEBUGFUNC("e1000_setup_copper_link_82541"); ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); /* Earlier revs of the IGP phy require us to force MDI. */ if (hw->mac.type == e1000_82541 || hw->mac.type == e1000_82547) { dev_spec->dsp_config = e1000_dsp_config_disabled; phy->mdix = 1; } else { dev_spec->dsp_config = e1000_dsp_config_enabled; } ret_val = e1000_copper_link_setup_igp(hw); if (ret_val) goto out; if (hw->mac.autoneg) { if (dev_spec->ffe_config == e1000_ffe_config_active) dev_spec->ffe_config = e1000_ffe_config_enabled; } /* Configure activity LED after Phy reset */ ledctl = E1000_READ_REG(hw, E1000_LEDCTL); ledctl &= IGP_ACTIVITY_LED_MASK; ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); ret_val = e1000_setup_copper_link_generic(hw); out: return ret_val; } /** * e1000_check_for_link_82541 - Check/Store link connection * @hw: pointer to the HW structure * * This checks the link condition of the adapter and stores the * results in the hw->mac structure. **/ STATIC s32 e1000_check_for_link_82541(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; bool link; DEBUGFUNC("e1000_check_for_link_82541"); /* * We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) { ret_val = E1000_SUCCESS; goto out; } /* * First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) goto out; if (!link) { ret_val = e1000_config_dsp_after_link_change_82541(hw, false); goto out; /* No link detected */ } mac->get_link_status = false; /* * Check if there was DownShift, must be checked * immediately after link-up */ e1000_check_downshift_generic(hw); /* * If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ if (!mac->autoneg) { ret_val = -E1000_ERR_CONFIG; goto out; } ret_val = e1000_config_dsp_after_link_change_82541(hw, true); /* * Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ mac->ops.config_collision_dist(hw); /* * Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner. */ ret_val = e1000_config_fc_after_link_up_generic(hw); if (ret_val) DEBUGOUT("Error configuring flow control\n"); out: return ret_val; } /** * e1000_config_dsp_after_link_change_82541 - Config DSP after link * @hw: pointer to the HW structure * @link_up: boolean flag for link up status * * Return E1000_ERR_PHY when failing to read/write the PHY, else E1000_SUCCESS * at any other case. * * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a * gigabit link is achieved to improve link quality. **/ STATIC s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw, bool link_up) { struct e1000_phy_info *phy = &hw->phy; struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; s32 ret_val; u32 idle_errs = 0; u16 phy_data, phy_saved_data, speed, duplex, i; u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { IGP01E1000_PHY_AGC_PARAM_A, IGP01E1000_PHY_AGC_PARAM_B, IGP01E1000_PHY_AGC_PARAM_C, IGP01E1000_PHY_AGC_PARAM_D}; DEBUGFUNC("e1000_config_dsp_after_link_change_82541"); if (link_up) { ret_val = hw->mac.ops.get_link_up_info(hw, &speed, &duplex); if (ret_val) { DEBUGOUT("Error getting link speed and duplex\n"); goto out; } if (speed != SPEED_1000) { ret_val = E1000_SUCCESS; goto out; } ret_val = phy->ops.get_cable_length(hw); if (ret_val) goto out; if ((dev_spec->dsp_config == e1000_dsp_config_enabled) && phy->min_cable_length >= 50) { for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { ret_val = phy->ops.read_reg(hw, dsp_reg_array[i], &phy_data); if (ret_val) goto out; phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; ret_val = phy->ops.write_reg(hw, dsp_reg_array[i], phy_data); if (ret_val) goto out; } dev_spec->dsp_config = e1000_dsp_config_activated; } if ((dev_spec->ffe_config != e1000_ffe_config_enabled) || (phy->min_cable_length >= 50)) { ret_val = E1000_SUCCESS; goto out; } /* clear previous idle error counts */ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); if (ret_val) goto out; for (i = 0; i < ffe_idle_err_timeout; i++) { usec_delay(1000); ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); if (ret_val) goto out; idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { dev_spec->ffe_config = e1000_ffe_config_active; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_DSP_FFE, IGP01E1000_PHY_DSP_FFE_CM_CP); if (ret_val) goto out; break; } if (idle_errs) ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100; } } else { if (dev_spec->dsp_config == e1000_dsp_config_activated) { /* * Save off the current value of register 0x2F5B * to be restored at the end of the routines. */ ret_val = phy->ops.read_reg(hw, 0x2F5B, &phy_saved_data); if (ret_val) goto out; /* Disable the PHY transmitter */ ret_val = phy->ops.write_reg(hw, 0x2F5B, 0x0003); if (ret_val) goto out; msec_delay_irq(20); ret_val = phy->ops.write_reg(hw, 0x0000, IGP01E1000_IEEE_FORCE_GIG); if (ret_val) goto out; for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { ret_val = phy->ops.read_reg(hw, dsp_reg_array[i], &phy_data); if (ret_val) goto out; phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; ret_val = phy->ops.write_reg(hw, dsp_reg_array[i], phy_data); if (ret_val) goto out; } ret_val = phy->ops.write_reg(hw, 0x0000, IGP01E1000_IEEE_RESTART_AUTONEG); if (ret_val) goto out; msec_delay_irq(20); /* Now enable the transmitter */ ret_val = phy->ops.write_reg(hw, 0x2F5B, phy_saved_data); if (ret_val) goto out; dev_spec->dsp_config = e1000_dsp_config_enabled; } if (dev_spec->ffe_config != e1000_ffe_config_active) { ret_val = E1000_SUCCESS; goto out; } /* * Save off the current value of register 0x2F5B * to be restored at the end of the routines. */ ret_val = phy->ops.read_reg(hw, 0x2F5B, &phy_saved_data); if (ret_val) goto out; /* Disable the PHY transmitter */ ret_val = phy->ops.write_reg(hw, 0x2F5B, 0x0003); if (ret_val) goto out; msec_delay_irq(20); ret_val = phy->ops.write_reg(hw, 0x0000, IGP01E1000_IEEE_FORCE_GIG); if (ret_val) goto out; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_DSP_FFE, IGP01E1000_PHY_DSP_FFE_DEFAULT); if (ret_val) goto out; ret_val = phy->ops.write_reg(hw, 0x0000, IGP01E1000_IEEE_RESTART_AUTONEG); if (ret_val) goto out; msec_delay_irq(20); /* Now enable the transmitter */ ret_val = phy->ops.write_reg(hw, 0x2F5B, phy_saved_data); if (ret_val) goto out; dev_spec->ffe_config = e1000_ffe_config_enabled; } out: return ret_val; } /** * e1000_get_cable_length_igp_82541 - Determine cable length for igp PHY * @hw: pointer to the HW structure * * The automatic gain control (agc) normalizes the amplitude of the * received signal, adjusting for the attenuation produced by the * cable. By reading the AGC registers, which represent the * combination of coarse and fine gain value, the value can be put * into a lookup table to obtain the approximate cable length * for each channel. **/ STATIC s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = E1000_SUCCESS; u16 i, data; u16 cur_agc_value, agc_value = 0; u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {IGP01E1000_PHY_AGC_A, IGP01E1000_PHY_AGC_B, IGP01E1000_PHY_AGC_C, IGP01E1000_PHY_AGC_D}; DEBUGFUNC("e1000_get_cable_length_igp_82541"); /* Read the AGC registers for all channels */ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &data); if (ret_val) goto out; cur_agc_value = data >> IGP01E1000_AGC_LENGTH_SHIFT; /* Bounds checking */ if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || (cur_agc_value == 0)) { ret_val = -E1000_ERR_PHY; goto out; } agc_value += cur_agc_value; if (min_agc_value > cur_agc_value) min_agc_value = cur_agc_value; } /* Remove the minimal AGC result for length < 50m */ if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * 50) { agc_value -= min_agc_value; /* Average the three remaining channels for the length. */ agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); } else { /* Average the channels for the length. */ agc_value /= IGP01E1000_PHY_CHANNEL_NUM; } phy->min_cable_length = (e1000_igp_cable_length_table[agc_value] > IGP01E1000_AGC_RANGE) ? (e1000_igp_cable_length_table[agc_value] - IGP01E1000_AGC_RANGE) : 0; phy->max_cable_length = e1000_igp_cable_length_table[agc_value] + IGP01E1000_AGC_RANGE; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; out: return ret_val; } /** * e1000_set_d3_lplu_state_82541 - Sets low power link up state for D3 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * * Success returns 0, Failure returns 1 * * The low power link up (lplu) state is set to the power management level D3 * and SmartSpeed is disabled when active is true, else clear lplu for D3 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU * is used during Dx states where the power conservation is most important. * During driver activity, SmartSpeed should be enabled so performance is * maintained. **/ STATIC s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; DEBUGFUNC("e1000_set_d3_lplu_state_82541"); switch (hw->mac.type) { case e1000_82541_rev_2: case e1000_82547_rev_2: break; default: ret_val = e1000_set_d3_lplu_state_generic(hw, active); goto out; break; } ret_val = phy->ops.read_reg(hw, IGP01E1000_GMII_FIFO, &data); if (ret_val) goto out; if (!active) { data &= ~IGP01E1000_GMII_FLEX_SPD; ret_val = phy->ops.write_reg(hw, IGP01E1000_GMII_FIFO, data); if (ret_val) goto out; /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) goto out; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) goto out; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) goto out; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) goto out; } } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { data |= IGP01E1000_GMII_FLEX_SPD; ret_val = phy->ops.write_reg(hw, IGP01E1000_GMII_FIFO, data); if (ret_val) goto out; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) goto out; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); } out: return ret_val; } /** * e1000_setup_led_82541 - Configures SW controllable LED * @hw: pointer to the HW structure * * This prepares the SW controllable LED for use and saves the current state * of the LED so it can be later restored. **/ STATIC s32 e1000_setup_led_82541(struct e1000_hw *hw) { struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; s32 ret_val; DEBUGFUNC("e1000_setup_led_82541"); ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO, &dev_spec->spd_default); if (ret_val) goto out; ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO, (u16)(dev_spec->spd_default & ~IGP01E1000_GMII_SPD)); if (ret_val) goto out; E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); out: return ret_val; } /** * e1000_cleanup_led_82541 - Set LED config to default operation * @hw: pointer to the HW structure * * Remove the current LED configuration and set the LED configuration * to the default value, saved from the EEPROM. **/ STATIC s32 e1000_cleanup_led_82541(struct e1000_hw *hw) { struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; s32 ret_val; DEBUGFUNC("e1000_cleanup_led_82541"); ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO, dev_spec->spd_default); if (ret_val) goto out; E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); out: return ret_val; } /** * e1000_phy_init_script_82541 - Initialize GbE PHY * @hw: pointer to the HW structure * * Initializes the IGP PHY. **/ STATIC s32 e1000_phy_init_script_82541(struct e1000_hw *hw) { struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; u32 ret_val; u16 phy_saved_data; DEBUGFUNC("e1000_phy_init_script_82541"); if (!dev_spec->phy_init_script) { ret_val = E1000_SUCCESS; goto out; } /* Delay after phy reset to enable NVM configuration to load */ msec_delay(20); /* * Save off the current value of register 0x2F5B to be restored at * the end of this routine. */ ret_val = hw->phy.ops.read_reg(hw, 0x2F5B, &phy_saved_data); /* Disabled the PHY transmitter */ hw->phy.ops.write_reg(hw, 0x2F5B, 0x0003); msec_delay(20); hw->phy.ops.write_reg(hw, 0x0000, 0x0140); msec_delay(5); switch (hw->mac.type) { case e1000_82541: case e1000_82547: hw->phy.ops.write_reg(hw, 0x1F95, 0x0001); hw->phy.ops.write_reg(hw, 0x1F71, 0xBD21); hw->phy.ops.write_reg(hw, 0x1F79, 0x0018); hw->phy.ops.write_reg(hw, 0x1F30, 0x1600); hw->phy.ops.write_reg(hw, 0x1F31, 0x0014); hw->phy.ops.write_reg(hw, 0x1F32, 0x161C); hw->phy.ops.write_reg(hw, 0x1F94, 0x0003); hw->phy.ops.write_reg(hw, 0x1F96, 0x003F); hw->phy.ops.write_reg(hw, 0x2010, 0x0008); break; case e1000_82541_rev_2: case e1000_82547_rev_2: hw->phy.ops.write_reg(hw, 0x1F73, 0x0099); break; default: break; } hw->phy.ops.write_reg(hw, 0x0000, 0x3300); msec_delay(20); /* Now enable the transmitter */ hw->phy.ops.write_reg(hw, 0x2F5B, phy_saved_data); if (hw->mac.type == e1000_82547) { u16 fused, fine, coarse; /* Move to analog registers page */ hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused); if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused); fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10; fine -= IGP01E1000_ANALOG_FUSE_FINE_1; } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH) fine -= IGP01E1000_ANALOG_FUSE_FINE_10; fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK); hw->phy.ops.write_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused); hw->phy.ops.write_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS, IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); } } out: return ret_val; } /** * e1000_init_script_state_82541 - Enable/Disable PHY init script * @hw: pointer to the HW structure * @state: boolean value used to enable/disable PHY init script * * Allows the driver to enable/disable the PHY init script, if the PHY is an * IGP PHY. **/ void e1000_init_script_state_82541(struct e1000_hw *hw, bool state) { struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; DEBUGFUNC("e1000_init_script_state_82541"); if (hw->phy.type != e1000_phy_igp) { DEBUGOUT("Initialization script not necessary.\n"); goto out; } dev_spec->phy_init_script = state; out: return; } /** * e1000_power_down_phy_copper_82541 - Remove link in case of PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ STATIC void e1000_power_down_phy_copper_82541(struct e1000_hw *hw) { /* If the management interface is not enabled, then power down */ if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN)) e1000_power_down_phy_copper(hw); return; } /** * e1000_clear_hw_cntrs_82541 - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. **/ STATIC void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw) { DEBUGFUNC("e1000_clear_hw_cntrs_82541"); e1000_clear_hw_cntrs_base_generic(hw); E1000_READ_REG(hw, E1000_PRC64); E1000_READ_REG(hw, E1000_PRC127); E1000_READ_REG(hw, E1000_PRC255); E1000_READ_REG(hw, E1000_PRC511); E1000_READ_REG(hw, E1000_PRC1023); E1000_READ_REG(hw, E1000_PRC1522); E1000_READ_REG(hw, E1000_PTC64); E1000_READ_REG(hw, E1000_PTC127); E1000_READ_REG(hw, E1000_PTC255); E1000_READ_REG(hw, E1000_PTC511); E1000_READ_REG(hw, E1000_PTC1023); E1000_READ_REG(hw, E1000_PTC1522); E1000_READ_REG(hw, E1000_ALGNERRC); E1000_READ_REG(hw, E1000_RXERRC); E1000_READ_REG(hw, E1000_TNCRS); E1000_READ_REG(hw, E1000_CEXTERR); E1000_READ_REG(hw, E1000_TSCTC); E1000_READ_REG(hw, E1000_TSCTFC); E1000_READ_REG(hw, E1000_MGTPRC); E1000_READ_REG(hw, E1000_MGTPDC); E1000_READ_REG(hw, E1000_MGTPTC); }
gpl-2.0
rmcilroy/Privgrind
memcheck/tests/origin6-fp.c
37
2550
/* Test of origin tracking through floating point code and in the case where there are large amounts of uninitialised data floating around. This program creates 3 matrices of 2300x2300 doubles, makes one value in them undefined, does arithmetic, and tests the result, which is then undefined. This also tests the secondary otag cache (ocacheL2), since the amount of uninitialised data is somewhat over 43MB and it appears that quite a lot of non-zero-otag lines are pushed into ocacheL2. This program needs to be compiled with -O. */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include "../memcheck.h" double** alloc_square_array ( int nArr ) { int i; double** vec; assert(nArr >= 1); vec = malloc(nArr * sizeof(double*)); assert(vec); for (i = 0; i < nArr; i++) { vec[i] = malloc(nArr * sizeof(double)); assert(vec); } return vec; } double** do3x3smooth ( double** arr, int nArr ) { int i, j; double** out; assert(nArr >= 3); out = alloc_square_array(nArr - 2); assert(out); for (i = 1; i < nArr-1; i++) { for (j = 1; j < nArr-1; j++) { double s = arr[i-1][j-1] + arr[i-1][j ] + arr[i-1][j+1] + arr[i ][j-1] + arr[i ][j ] + arr[i ][j+1] + arr[i+1][j-1] + arr[i+1][j ] + arr[i+1][j+1]; out[i-1][j-1] = s / 9.0; } } return out; } double sum ( double** arr, int nArr ) { int i, j; double s = 0.0; assert(nArr >= 1); for (i = 0; i < nArr; i++) { for (j = 0; j < nArr; j++) { s += arr[i][j]; } } return s; } void setup_arr ( /*OUT*/double** arr, int nArr ) { int i, j; assert(nArr >= 1); for (i = 0; i < nArr; i++) { for (j = 0; j < nArr; j++) { arr[i][j] = (double)(i * j); if (i == nArr/2 && j == nArr/2) { unsigned char* p = (unsigned char*)&arr[i][j]; VALGRIND_MAKE_MEM_UNDEFINED(p, 1); } } } } int main ( void ) { int nArr = 2300; int ri; double r, **arr, **arr2, **arr3; arr = alloc_square_array(nArr); setup_arr( arr, nArr ); arr2 = do3x3smooth( arr, nArr ); arr3 = do3x3smooth( arr2, nArr-2 ); r = sum( arr3, nArr-4 ); /* Convert answer to int before testing it, so as to guarantee there's only one conditional branch. */ if (0) fprintf(stderr, "r = %g\n", r ); r /= 10000.0; ri = (int)r; if (0) fprintf(stderr, "ri = %d\n", ri); if (ri == 696565111) { fprintf(stderr, "Test succeeded.\n"); } else { fprintf(stderr, "Test FAILED !\n"); assert(0); } return 0; }
gpl-2.0
cricard13/linux-raspberry-nfc
drivers/power/pda_power.c
1061
11917
/* * Common power driver for PDAs and phones with one or two external * power supplies (AC/USB) connected to main and backup batteries, * and optional builtin charger. * * Copyright © 2007 Anton Vorontsov <cbou@mail.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/power_supply.h> #include <linux/pda_power.h> #include <linux/regulator/consumer.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <linux/usb/otg.h> static inline unsigned int get_irq_flags(struct resource *res) { return IRQF_SHARED | (res->flags & IRQF_TRIGGER_MASK); } static struct device *dev; static struct pda_power_pdata *pdata; static struct resource *ac_irq, *usb_irq; static struct timer_list charger_timer; static struct timer_list supply_timer; static struct timer_list polling_timer; static int polling; static struct power_supply *pda_psy_ac, *pda_psy_usb; #if IS_ENABLED(CONFIG_USB_PHY) static struct usb_phy *transceiver; static struct notifier_block otg_nb; #endif static struct regulator *ac_draw; enum { PDA_PSY_OFFLINE = 0, PDA_PSY_ONLINE = 1, PDA_PSY_TO_CHANGE, }; static int new_ac_status = -1; static int new_usb_status = -1; static int ac_status = -1; static int usb_status = -1; static int pda_power_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { switch (psp) { case POWER_SUPPLY_PROP_ONLINE: if (psy->desc->type == POWER_SUPPLY_TYPE_MAINS) val->intval = pdata->is_ac_online ? pdata->is_ac_online() : 0; else val->intval = pdata->is_usb_online ? pdata->is_usb_online() : 0; break; default: return -EINVAL; } return 0; } static enum power_supply_property pda_power_props[] = { POWER_SUPPLY_PROP_ONLINE, }; static char *pda_power_supplied_to[] = { "main-battery", "backup-battery", }; static const struct power_supply_desc pda_psy_ac_desc = { .name = "ac", .type = POWER_SUPPLY_TYPE_MAINS, .properties = pda_power_props, .num_properties = ARRAY_SIZE(pda_power_props), .get_property = pda_power_get_property, }; static const struct power_supply_desc pda_psy_usb_desc = { .name = "usb", .type = POWER_SUPPLY_TYPE_USB, .properties = pda_power_props, .num_properties = ARRAY_SIZE(pda_power_props), .get_property = pda_power_get_property, }; static void update_status(void) { if (pdata->is_ac_online) new_ac_status = !!pdata->is_ac_online(); if (pdata->is_usb_online) new_usb_status = !!pdata->is_usb_online(); } static void update_charger(void) { static int regulator_enabled; int max_uA = pdata->ac_max_uA; if (pdata->set_charge) { if (new_ac_status > 0) { dev_dbg(dev, "charger on (AC)\n"); pdata->set_charge(PDA_POWER_CHARGE_AC); } else if (new_usb_status > 0) { dev_dbg(dev, "charger on (USB)\n"); pdata->set_charge(PDA_POWER_CHARGE_USB); } else { dev_dbg(dev, "charger off\n"); pdata->set_charge(0); } } else if (ac_draw) { if (new_ac_status > 0) { regulator_set_current_limit(ac_draw, max_uA, max_uA); if (!regulator_enabled) { dev_dbg(dev, "charger on (AC)\n"); WARN_ON(regulator_enable(ac_draw)); regulator_enabled = 1; } } else { if (regulator_enabled) { dev_dbg(dev, "charger off\n"); WARN_ON(regulator_disable(ac_draw)); regulator_enabled = 0; } } } } static void supply_timer_func(unsigned long unused) { if (ac_status == PDA_PSY_TO_CHANGE) { ac_status = new_ac_status; power_supply_changed(pda_psy_ac); } if (usb_status == PDA_PSY_TO_CHANGE) { usb_status = new_usb_status; power_supply_changed(pda_psy_usb); } } static void psy_changed(void) { update_charger(); /* * Okay, charger set. Now wait a bit before notifying supplicants, * charge power should stabilize. */ mod_timer(&supply_timer, jiffies + msecs_to_jiffies(pdata->wait_for_charger)); } static void charger_timer_func(unsigned long unused) { update_status(); psy_changed(); } static irqreturn_t power_changed_isr(int irq, void *power_supply) { if (power_supply == pda_psy_ac) ac_status = PDA_PSY_TO_CHANGE; else if (power_supply == pda_psy_usb) usb_status = PDA_PSY_TO_CHANGE; else return IRQ_NONE; /* * Wait a bit before reading ac/usb line status and setting charger, * because ac/usb status readings may lag from irq. */ mod_timer(&charger_timer, jiffies + msecs_to_jiffies(pdata->wait_for_status)); return IRQ_HANDLED; } static void polling_timer_func(unsigned long unused) { int changed = 0; dev_dbg(dev, "polling...\n"); update_status(); if (!ac_irq && new_ac_status != ac_status) { ac_status = PDA_PSY_TO_CHANGE; changed = 1; } if (!usb_irq && new_usb_status != usb_status) { usb_status = PDA_PSY_TO_CHANGE; changed = 1; } if (changed) psy_changed(); mod_timer(&polling_timer, jiffies + msecs_to_jiffies(pdata->polling_interval)); } #if IS_ENABLED(CONFIG_USB_PHY) static int otg_is_usb_online(void) { return (transceiver->last_event == USB_EVENT_VBUS || transceiver->last_event == USB_EVENT_ENUMERATED); } static int otg_is_ac_online(void) { return (transceiver->last_event == USB_EVENT_CHARGER); } static int otg_handle_notification(struct notifier_block *nb, unsigned long event, void *unused) { switch (event) { case USB_EVENT_CHARGER: ac_status = PDA_PSY_TO_CHANGE; break; case USB_EVENT_VBUS: case USB_EVENT_ENUMERATED: usb_status = PDA_PSY_TO_CHANGE; break; case USB_EVENT_NONE: ac_status = PDA_PSY_TO_CHANGE; usb_status = PDA_PSY_TO_CHANGE; break; default: return NOTIFY_OK; } /* * Wait a bit before reading ac/usb line status and setting charger, * because ac/usb status readings may lag from irq. */ mod_timer(&charger_timer, jiffies + msecs_to_jiffies(pdata->wait_for_status)); return NOTIFY_OK; } #endif static int pda_power_probe(struct platform_device *pdev) { struct power_supply_config psy_cfg = {}; int ret = 0; dev = &pdev->dev; if (pdev->id != -1) { dev_err(dev, "it's meaningless to register several " "pda_powers; use id = -1\n"); ret = -EINVAL; goto wrongid; } pdata = pdev->dev.platform_data; if (pdata->init) { ret = pdata->init(dev); if (ret < 0) goto init_failed; } ac_draw = regulator_get(dev, "ac_draw"); if (IS_ERR(ac_draw)) { dev_dbg(dev, "couldn't get ac_draw regulator\n"); ac_draw = NULL; } update_status(); update_charger(); if (!pdata->wait_for_status) pdata->wait_for_status = 500; if (!pdata->wait_for_charger) pdata->wait_for_charger = 500; if (!pdata->polling_interval) pdata->polling_interval = 2000; if (!pdata->ac_max_uA) pdata->ac_max_uA = 500000; setup_timer(&charger_timer, charger_timer_func, 0); setup_timer(&supply_timer, supply_timer_func, 0); ac_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ac"); usb_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "usb"); if (pdata->supplied_to) { psy_cfg.supplied_to = pdata->supplied_to; psy_cfg.num_supplicants = pdata->num_supplicants; } else { psy_cfg.supplied_to = pda_power_supplied_to; psy_cfg.num_supplicants = ARRAY_SIZE(pda_power_supplied_to); } #if IS_ENABLED(CONFIG_USB_PHY) transceiver = usb_get_phy(USB_PHY_TYPE_USB2); if (!IS_ERR_OR_NULL(transceiver)) { if (!pdata->is_usb_online) pdata->is_usb_online = otg_is_usb_online; if (!pdata->is_ac_online) pdata->is_ac_online = otg_is_ac_online; } #endif if (pdata->is_ac_online) { pda_psy_ac = power_supply_register(&pdev->dev, &pda_psy_ac_desc, &psy_cfg); if (IS_ERR(pda_psy_ac)) { dev_err(dev, "failed to register %s power supply\n", pda_psy_ac_desc.name); ret = PTR_ERR(pda_psy_ac); goto ac_supply_failed; } if (ac_irq) { ret = request_irq(ac_irq->start, power_changed_isr, get_irq_flags(ac_irq), ac_irq->name, pda_psy_ac); if (ret) { dev_err(dev, "request ac irq failed\n"); goto ac_irq_failed; } } else { polling = 1; } } if (pdata->is_usb_online) { pda_psy_usb = power_supply_register(&pdev->dev, &pda_psy_usb_desc, &psy_cfg); if (IS_ERR(pda_psy_usb)) { dev_err(dev, "failed to register %s power supply\n", pda_psy_usb_desc.name); ret = PTR_ERR(pda_psy_usb); goto usb_supply_failed; } if (usb_irq) { ret = request_irq(usb_irq->start, power_changed_isr, get_irq_flags(usb_irq), usb_irq->name, pda_psy_usb); if (ret) { dev_err(dev, "request usb irq failed\n"); goto usb_irq_failed; } } else { polling = 1; } } #if IS_ENABLED(CONFIG_USB_PHY) if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) { otg_nb.notifier_call = otg_handle_notification; ret = usb_register_notifier(transceiver, &otg_nb); if (ret) { dev_err(dev, "failure to register otg notifier\n"); goto otg_reg_notifier_failed; } polling = 0; } #endif if (polling) { dev_dbg(dev, "will poll for status\n"); setup_timer(&polling_timer, polling_timer_func, 0); mod_timer(&polling_timer, jiffies + msecs_to_jiffies(pdata->polling_interval)); } if (ac_irq || usb_irq) device_init_wakeup(&pdev->dev, 1); return 0; #if IS_ENABLED(CONFIG_USB_PHY) otg_reg_notifier_failed: if (pdata->is_usb_online && usb_irq) free_irq(usb_irq->start, pda_psy_usb); #endif usb_irq_failed: if (pdata->is_usb_online) power_supply_unregister(pda_psy_usb); usb_supply_failed: if (pdata->is_ac_online && ac_irq) free_irq(ac_irq->start, pda_psy_ac); #if IS_ENABLED(CONFIG_USB_PHY) if (!IS_ERR_OR_NULL(transceiver)) usb_put_phy(transceiver); #endif ac_irq_failed: if (pdata->is_ac_online) power_supply_unregister(pda_psy_ac); ac_supply_failed: if (ac_draw) { regulator_put(ac_draw); ac_draw = NULL; } if (pdata->exit) pdata->exit(dev); init_failed: wrongid: return ret; } static int pda_power_remove(struct platform_device *pdev) { if (pdata->is_usb_online && usb_irq) free_irq(usb_irq->start, pda_psy_usb); if (pdata->is_ac_online && ac_irq) free_irq(ac_irq->start, pda_psy_ac); if (polling) del_timer_sync(&polling_timer); del_timer_sync(&charger_timer); del_timer_sync(&supply_timer); if (pdata->is_usb_online) power_supply_unregister(pda_psy_usb); if (pdata->is_ac_online) power_supply_unregister(pda_psy_ac); #if IS_ENABLED(CONFIG_USB_PHY) if (!IS_ERR_OR_NULL(transceiver)) usb_put_phy(transceiver); #endif if (ac_draw) { regulator_put(ac_draw); ac_draw = NULL; } if (pdata->exit) pdata->exit(dev); return 0; } #ifdef CONFIG_PM static int ac_wakeup_enabled; static int usb_wakeup_enabled; static int pda_power_suspend(struct platform_device *pdev, pm_message_t state) { if (pdata->suspend) { int ret = pdata->suspend(state); if (ret) return ret; } if (device_may_wakeup(&pdev->dev)) { if (ac_irq) ac_wakeup_enabled = !enable_irq_wake(ac_irq->start); if (usb_irq) usb_wakeup_enabled = !enable_irq_wake(usb_irq->start); } return 0; } static int pda_power_resume(struct platform_device *pdev) { if (device_may_wakeup(&pdev->dev)) { if (usb_irq && usb_wakeup_enabled) disable_irq_wake(usb_irq->start); if (ac_irq && ac_wakeup_enabled) disable_irq_wake(ac_irq->start); } if (pdata->resume) return pdata->resume(); return 0; } #else #define pda_power_suspend NULL #define pda_power_resume NULL #endif /* CONFIG_PM */ static struct platform_driver pda_power_pdrv = { .driver = { .name = "pda-power", }, .probe = pda_power_probe, .remove = pda_power_remove, .suspend = pda_power_suspend, .resume = pda_power_resume, }; module_platform_driver(pda_power_pdrv); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Anton Vorontsov <cbou@mail.ru>"); MODULE_ALIAS("platform:pda-power");
gpl-2.0
SeoDongMin/Gace_kernel
drivers/isdn/hardware/eicon/message.c
1061
487733
/* * Copyright (c) Eicon Networks, 2002. * This source file is supplied for the use with Eicon Networks range of DIVA Server Adapters. * Eicon File Revision : 2.1 * This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "platform.h" #include "di_defs.h" #include "pc.h" #include "capi20.h" #include "divacapi.h" #include "mdm_msg.h" #include "divasync.h" #define FILE_ "MESSAGE.C" #define dprintf /*------------------------------------------------------------------*/ /* This is options supported for all adapters that are server by */ /* XDI driver. Allo it is not necessary to ask it from every adapter*/ /* and it is not necessary to save it separate for every adapter */ /* Macrose defined here have only local meaning */ /*------------------------------------------------------------------*/ static dword diva_xdi_extended_features = 0; #define DIVA_CAPI_USE_CMA 0x00000001 #define DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR 0x00000002 #define DIVA_CAPI_XDI_PROVIDES_NO_CANCEL 0x00000004 #define DIVA_CAPI_XDI_PROVIDES_RX_DMA 0x00000008 /* CAPI can request to process all return codes self only if: protocol code supports this && xdi supports this */ #define DIVA_CAPI_SUPPORTS_NO_CANCEL(__a__) (((__a__)->manufacturer_features&MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)&& ((__a__)->manufacturer_features & MANUFACTURER_FEATURE_OK_FC_LABEL) && (diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_NO_CANCEL)) /*------------------------------------------------------------------*/ /* local function prototypes */ /*------------------------------------------------------------------*/ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci); static void set_group_ind_mask (PLCI *plci); static void clear_group_ind_mask_bit (PLCI *plci, word b); static byte test_group_ind_mask_bit (PLCI *plci, word b); void AutomaticLaw(DIVA_CAPI_ADAPTER *); word CapiRelease(word); word CapiRegister(word); word api_put(APPL *, CAPI_MSG *); static word api_parse(byte *, word, byte *, API_PARSE *); static void api_save_msg(API_PARSE *in, byte *format, API_SAVE *out); static void api_load_msg(API_SAVE *in, API_PARSE *out); word api_remove_start(void); void api_remove_complete(void); static void plci_remove(PLCI *); static void diva_get_extended_adapter_features (DIVA_CAPI_ADAPTER * a); static void diva_ask_for_xdi_sdram_bar (DIVA_CAPI_ADAPTER *, IDI_SYNC_REQ *); void callback(ENTITY *); static void control_rc(PLCI *, byte, byte, byte, byte, byte); static void data_rc(PLCI *, byte); static void data_ack(PLCI *, byte); static void sig_ind(PLCI *); static void SendInfo(PLCI *, dword, byte * *, byte); static void SendSetupInfo(APPL *, PLCI *, dword, byte * *, byte); static void SendSSExtInd(APPL *, PLCI * plci, dword Id, byte * * parms); static void VSwitchReqInd(PLCI *plci, dword Id, byte **parms); static void nl_ind(PLCI *); static byte connect_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte connect_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte connect_a_res(dword,word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte disconnect_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte disconnect_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte listen_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte info_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte info_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte alert_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte facility_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte facility_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte connect_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte connect_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte connect_b3_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte disconnect_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte disconnect_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte data_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte data_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte reset_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte reset_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte connect_b3_t90_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte select_b_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte manufacturer_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static byte manufacturer_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); static word get_plci(DIVA_CAPI_ADAPTER *); static void add_p(PLCI *, byte, byte *); static void add_s(PLCI * plci, byte code, API_PARSE * p); static void add_ss(PLCI * plci, byte code, API_PARSE * p); static void add_ie(PLCI * plci, byte code, byte * p, word p_length); static void add_d(PLCI *, word, byte *); static void add_ai(PLCI *, API_PARSE *); static word add_b1(PLCI *, API_PARSE *, word, word); static word add_b23(PLCI *, API_PARSE *); static word add_modem_b23 (PLCI * plci, API_PARSE* bp_parms); static void sig_req(PLCI *, byte, byte); static void nl_req_ncci(PLCI *, byte, byte); static void send_req(PLCI *); static void send_data(PLCI *); static word plci_remove_check(PLCI *); static void listen_check(DIVA_CAPI_ADAPTER *); static byte AddInfo(byte **, byte **, byte *, byte *); static byte getChannel(API_PARSE *); static void IndParse(PLCI *, word *, byte **, byte); static byte ie_compare(byte *, byte *); static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *); static word CPN_filter_ok(byte *cpn,DIVA_CAPI_ADAPTER *,word); /* XON protocol helpers */ static void channel_flow_control_remove (PLCI * plci); static void channel_x_off (PLCI * plci, byte ch, byte flag); static void channel_x_on (PLCI * plci, byte ch); static void channel_request_xon (PLCI * plci, byte ch); static void channel_xmit_xon (PLCI * plci); static int channel_can_xon (PLCI * plci, byte ch); static void channel_xmit_extended_xon (PLCI * plci); static byte SendMultiIE(PLCI * plci, dword Id, byte * * parms, byte ie_type, dword info_mask, byte setupParse); static word AdvCodecSupport(DIVA_CAPI_ADAPTER *, PLCI *, APPL *, byte); static void CodecIdCheck(DIVA_CAPI_ADAPTER *, PLCI *); static void SetVoiceChannel(PLCI *, byte *, DIVA_CAPI_ADAPTER * ); static void VoiceChannelOff(PLCI *plci); static void adv_voice_write_coefs (PLCI *plci, word write_command); static void adv_voice_clear_config (PLCI *plci); static word get_b1_facilities (PLCI * plci, byte b1_resource); static byte add_b1_facilities (PLCI * plci, byte b1_resource, word b1_facilities); static void adjust_b1_facilities (PLCI *plci, byte new_b1_resource, word new_b1_facilities); static word adjust_b_process (dword Id, PLCI *plci, byte Rc); static void adjust_b1_resource (dword Id, PLCI *plci, API_SAVE *bp_msg, word b1_facilities, word internal_command); static void adjust_b_restore (dword Id, PLCI *plci, byte Rc); static void reset_b3_command (dword Id, PLCI *plci, byte Rc); static void select_b_command (dword Id, PLCI *plci, byte Rc); static void fax_connect_ack_command (dword Id, PLCI *plci, byte Rc); static void fax_edata_ack_command (dword Id, PLCI *plci, byte Rc); static void fax_connect_info_command (dword Id, PLCI *plci, byte Rc); static void fax_adjust_b23_command (dword Id, PLCI *plci, byte Rc); static void fax_disconnect_command (dword Id, PLCI *plci, byte Rc); static void hold_save_command (dword Id, PLCI *plci, byte Rc); static void retrieve_restore_command (dword Id, PLCI *plci, byte Rc); static void init_b1_config (PLCI *plci); static void clear_b1_config (PLCI *plci); static void dtmf_command (dword Id, PLCI *plci, byte Rc); static byte dtmf_request (dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg); static void dtmf_confirmation (dword Id, PLCI *plci); static void dtmf_indication (dword Id, PLCI *plci, byte *msg, word length); static void dtmf_parameter_write (PLCI *plci); static void mixer_set_bchannel_id_esc (PLCI *plci, byte bchannel_id); static void mixer_set_bchannel_id (PLCI *plci, byte *chi); static void mixer_clear_config (PLCI *plci); static void mixer_notify_update (PLCI *plci, byte others); static void mixer_command (dword Id, PLCI *plci, byte Rc); static byte mixer_request (dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg); static void mixer_indication_coefs_set (dword Id, PLCI *plci); static void mixer_indication_xconnect_from (dword Id, PLCI *plci, byte *msg, word length); static void mixer_indication_xconnect_to (dword Id, PLCI *plci, byte *msg, word length); static void mixer_remove (PLCI *plci); static void ec_command (dword Id, PLCI *plci, byte Rc); static byte ec_request (dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg); static void ec_indication (dword Id, PLCI *plci, byte *msg, word length); static void rtp_connect_b3_req_command (dword Id, PLCI *plci, byte Rc); static void rtp_connect_b3_res_command (dword Id, PLCI *plci, byte Rc); static int diva_get_dma_descriptor (PLCI *plci, dword *dma_magic); static void diva_free_dma_descriptor (PLCI *plci, int nr); /*------------------------------------------------------------------*/ /* external function prototypes */ /*------------------------------------------------------------------*/ extern byte MapController (byte); extern byte UnMapController (byte); #define MapId(Id) (((Id) & 0xffffff00L) | MapController ((byte)(Id))) #define UnMapId(Id) (((Id) & 0xffffff00L) | UnMapController ((byte)(Id))) void sendf(APPL *, word, dword, word, byte *, ...); void * TransmitBufferSet(APPL * appl, dword ref); void * TransmitBufferGet(APPL * appl, void * p); void TransmitBufferFree(APPL * appl, void * p); void * ReceiveBufferGet(APPL * appl, int Num); int fax_head_line_time (char *buffer); /*------------------------------------------------------------------*/ /* Global data definitions */ /*------------------------------------------------------------------*/ extern byte max_adapter; extern byte max_appl; extern DIVA_CAPI_ADAPTER * adapter; extern APPL * application; static byte remove_started = false; static PLCI dummy_plci; static struct _ftable { word command; byte * format; byte (* function)(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *); } ftable[] = { {_DATA_B3_R, "dwww", data_b3_req}, {_DATA_B3_I|RESPONSE, "w", data_b3_res}, {_INFO_R, "ss", info_req}, {_INFO_I|RESPONSE, "", info_res}, {_CONNECT_R, "wsssssssss", connect_req}, {_CONNECT_I|RESPONSE, "wsssss", connect_res}, {_CONNECT_ACTIVE_I|RESPONSE, "", connect_a_res}, {_DISCONNECT_R, "s", disconnect_req}, {_DISCONNECT_I|RESPONSE, "", disconnect_res}, {_LISTEN_R, "dddss", listen_req}, {_ALERT_R, "s", alert_req}, {_FACILITY_R, "ws", facility_req}, {_FACILITY_I|RESPONSE, "ws", facility_res}, {_CONNECT_B3_R, "s", connect_b3_req}, {_CONNECT_B3_I|RESPONSE, "ws", connect_b3_res}, {_CONNECT_B3_ACTIVE_I|RESPONSE, "", connect_b3_a_res}, {_DISCONNECT_B3_R, "s", disconnect_b3_req}, {_DISCONNECT_B3_I|RESPONSE, "", disconnect_b3_res}, {_RESET_B3_R, "s", reset_b3_req}, {_RESET_B3_I|RESPONSE, "", reset_b3_res}, {_CONNECT_B3_T90_ACTIVE_I|RESPONSE, "ws", connect_b3_t90_a_res}, {_CONNECT_B3_T90_ACTIVE_I|RESPONSE, "", connect_b3_t90_a_res}, {_SELECT_B_REQ, "s", select_b_req}, {_MANUFACTURER_R, "dws", manufacturer_req}, {_MANUFACTURER_I|RESPONSE, "dws", manufacturer_res}, {_MANUFACTURER_I|RESPONSE, "", manufacturer_res} }; static byte * cip_bc[29][2] = { { "", "" }, /* 0 */ { "\x03\x80\x90\xa3", "\x03\x80\x90\xa2" }, /* 1 */ { "\x02\x88\x90", "\x02\x88\x90" }, /* 2 */ { "\x02\x89\x90", "\x02\x89\x90" }, /* 3 */ { "\x03\x90\x90\xa3", "\x03\x90\x90\xa2" }, /* 4 */ { "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 5 */ { "\x02\x98\x90", "\x02\x98\x90" }, /* 6 */ { "\x04\x88\xc0\xc6\xe6", "\x04\x88\xc0\xc6\xe6" }, /* 7 */ { "\x04\x88\x90\x21\x8f", "\x04\x88\x90\x21\x8f" }, /* 8 */ { "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 9 */ { "", "" }, /* 10 */ { "", "" }, /* 11 */ { "", "" }, /* 12 */ { "", "" }, /* 13 */ { "", "" }, /* 14 */ { "", "" }, /* 15 */ { "\x03\x80\x90\xa3", "\x03\x80\x90\xa2" }, /* 16 */ { "\x03\x90\x90\xa3", "\x03\x90\x90\xa2" }, /* 17 */ { "\x02\x88\x90", "\x02\x88\x90" }, /* 18 */ { "\x02\x88\x90", "\x02\x88\x90" }, /* 19 */ { "\x02\x88\x90", "\x02\x88\x90" }, /* 20 */ { "\x02\x88\x90", "\x02\x88\x90" }, /* 21 */ { "\x02\x88\x90", "\x02\x88\x90" }, /* 22 */ { "\x02\x88\x90", "\x02\x88\x90" }, /* 23 */ { "\x02\x88\x90", "\x02\x88\x90" }, /* 24 */ { "\x02\x88\x90", "\x02\x88\x90" }, /* 25 */ { "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 26 */ { "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 27 */ { "\x02\x88\x90", "\x02\x88\x90" } /* 28 */ }; static byte * cip_hlc[29] = { "", /* 0 */ "", /* 1 */ "", /* 2 */ "", /* 3 */ "", /* 4 */ "", /* 5 */ "", /* 6 */ "", /* 7 */ "", /* 8 */ "", /* 9 */ "", /* 10 */ "", /* 11 */ "", /* 12 */ "", /* 13 */ "", /* 14 */ "", /* 15 */ "\x02\x91\x81", /* 16 */ "\x02\x91\x84", /* 17 */ "\x02\x91\xa1", /* 18 */ "\x02\x91\xa4", /* 19 */ "\x02\x91\xa8", /* 20 */ "\x02\x91\xb1", /* 21 */ "\x02\x91\xb2", /* 22 */ "\x02\x91\xb5", /* 23 */ "\x02\x91\xb8", /* 24 */ "\x02\x91\xc1", /* 25 */ "\x02\x91\x81", /* 26 */ "\x03\x91\xe0\x01", /* 27 */ "\x03\x91\xe0\x02" /* 28 */ }; /*------------------------------------------------------------------*/ #define V120_HEADER_LENGTH 1 #define V120_HEADER_EXTEND_BIT 0x80 #define V120_HEADER_BREAK_BIT 0x40 #define V120_HEADER_C1_BIT 0x04 #define V120_HEADER_C2_BIT 0x08 #define V120_HEADER_FLUSH_COND (V120_HEADER_BREAK_BIT | V120_HEADER_C1_BIT | V120_HEADER_C2_BIT) static byte v120_default_header[] = { 0x83 /* Ext, BR , res, res, C2 , C1 , B , F */ }; static byte v120_break_header[] = { 0xc3 | V120_HEADER_BREAK_BIT /* Ext, BR , res, res, C2 , C1 , B , F */ }; /*------------------------------------------------------------------*/ /* API_PUT function */ /*------------------------------------------------------------------*/ word api_put(APPL * appl, CAPI_MSG * msg) { word i, j, k, l, n; word ret; byte c; byte controller; DIVA_CAPI_ADAPTER * a; PLCI * plci; NCCI * ncci_ptr; word ncci; CAPI_MSG *m; API_PARSE msg_parms[MAX_MSG_PARMS+1]; if (msg->header.length < sizeof (msg->header) || msg->header.length > MAX_MSG_SIZE) { dbug(1,dprintf("bad len")); return _BAD_MSG; } controller = (byte)((msg->header.controller &0x7f)-1); /* controller starts with 0 up to (max_adapter - 1) */ if ( controller >= max_adapter ) { dbug(1,dprintf("invalid ctrl")); return _BAD_MSG; } a = &adapter[controller]; plci = NULL; if ((msg->header.plci != 0) && (msg->header.plci <= a->max_plci) && !a->adapter_disabled) { dbug(1,dprintf("plci=%x",msg->header.plci)); plci = &a->plci[msg->header.plci-1]; ncci = GET_WORD(&msg->header.ncci); if (plci->Id && (plci->appl || (plci->State == INC_CON_PENDING) || (plci->State == INC_CON_ALERT) || (msg->header.command == (_DISCONNECT_I|RESPONSE))) && ((ncci == 0) || (msg->header.command == (_DISCONNECT_B3_I|RESPONSE)) || ((ncci < MAX_NCCI+1) && (a->ncci_plci[ncci] == plci->Id)))) { i = plci->msg_in_read_pos; j = plci->msg_in_write_pos; if (j >= i) { if (j + msg->header.length + MSG_IN_OVERHEAD <= MSG_IN_QUEUE_SIZE) i += MSG_IN_QUEUE_SIZE - j; else j = 0; } else { n = (((CAPI_MSG *)(plci->msg_in_queue))->header.length + MSG_IN_OVERHEAD + 3) & 0xfffc; if (i > MSG_IN_QUEUE_SIZE - n) i = MSG_IN_QUEUE_SIZE - n + 1; i -= j; } if (i <= ((msg->header.length + MSG_IN_OVERHEAD + 3) & 0xfffc)) { dbug(0,dprintf("Q-FULL1(msg) - len=%d write=%d read=%d wrap=%d free=%d", msg->header.length, plci->msg_in_write_pos, plci->msg_in_read_pos, plci->msg_in_wrap_pos, i)); return _QUEUE_FULL; } c = false; if ((((byte *) msg) < ((byte *)(plci->msg_in_queue))) || (((byte *) msg) >= ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue))) { if (plci->msg_in_write_pos != plci->msg_in_read_pos) c = true; } if (msg->header.command == _DATA_B3_R) { if (msg->header.length < 20) { dbug(1,dprintf("DATA_B3 REQ wrong length %d", msg->header.length)); return _BAD_MSG; } ncci_ptr = &(a->ncci[ncci]); n = ncci_ptr->data_pending; l = ncci_ptr->data_ack_pending; k = plci->msg_in_read_pos; while (k != plci->msg_in_write_pos) { if (k == plci->msg_in_wrap_pos) k = 0; if ((((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.command == _DATA_B3_R) && (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.ncci == ncci)) { n++; if (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->info.data_b3_req.Flags & 0x0004) l++; } k += (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.length + MSG_IN_OVERHEAD + 3) & 0xfffc; } if ((n >= MAX_DATA_B3) || (l >= MAX_DATA_ACK)) { dbug(0,dprintf("Q-FULL2(data) - pending=%d/%d ack_pending=%d/%d", ncci_ptr->data_pending, n, ncci_ptr->data_ack_pending, l)); return _QUEUE_FULL; } if (plci->req_in || plci->internal_command) { if ((((byte *) msg) >= ((byte *)(plci->msg_in_queue))) && (((byte *) msg) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue))) { dbug(0,dprintf("Q-FULL3(requeue)")); return _QUEUE_FULL; } c = true; } } else { if (plci->req_in || plci->internal_command) c = true; else { plci->command = msg->header.command; plci->number = msg->header.number; } } if (c) { dbug(1,dprintf("enqueue msg(0x%04x,0x%x,0x%x) - len=%d write=%d read=%d wrap=%d free=%d", msg->header.command, plci->req_in, plci->internal_command, msg->header.length, plci->msg_in_write_pos, plci->msg_in_read_pos, plci->msg_in_wrap_pos, i)); if (j == 0) plci->msg_in_wrap_pos = plci->msg_in_write_pos; m = (CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]); for (i = 0; i < msg->header.length; i++) ((byte *)(plci->msg_in_queue))[j++] = ((byte *) msg)[i]; if (m->header.command == _DATA_B3_R) { m->info.data_b3_req.Data = (dword)(long)(TransmitBufferSet (appl, m->info.data_b3_req.Data)); } j = (j + 3) & 0xfffc; *((APPL * *)(&((byte *)(plci->msg_in_queue))[j])) = appl; plci->msg_in_write_pos = j + MSG_IN_OVERHEAD; return 0; } } else { plci = NULL; } } dbug(1,dprintf("com=%x",msg->header.command)); for(j=0;j<MAX_MSG_PARMS+1;j++) msg_parms[j].length = 0; for(i=0, ret = _BAD_MSG; i < ARRAY_SIZE(ftable); i++) { if(ftable[i].command==msg->header.command) { /* break loop if the message is correct, otherwise continue scan */ /* (for example: CONNECT_B3_T90_ACT_RES has two specifications) */ if(!api_parse(msg->info.b,(word)(msg->header.length-12),ftable[i].format,msg_parms)) { ret = 0; break; } for(j=0;j<MAX_MSG_PARMS+1;j++) msg_parms[j].length = 0; } } if(ret) { dbug(1,dprintf("BAD_MSG")); if(plci) plci->command = 0; return ret; } c = ftable[i].function(GET_DWORD(&msg->header.controller), msg->header.number, a, plci, appl, msg_parms); channel_xmit_extended_xon (plci); if(c==1) send_req(plci); if(c==2 && plci) plci->req_in = plci->req_in_start = plci->req_out = 0; if(plci && !plci->req_in) plci->command = 0; return 0; } /*------------------------------------------------------------------*/ /* api_parse function, check the format of api messages */ /*------------------------------------------------------------------*/ static word api_parse(byte *msg, word length, byte *format, API_PARSE *parms) { word i; word p; for(i=0,p=0; format[i]; i++) { if(parms) { parms[i].info = &msg[p]; } switch(format[i]) { case 'b': p +=1; break; case 'w': p +=2; break; case 'd': p +=4; break; case 's': if(msg[p]==0xff) { parms[i].info +=2; parms[i].length = msg[p+1] + (msg[p+2]<<8); p +=(parms[i].length +3); } else { parms[i].length = msg[p]; p +=(parms[i].length +1); } break; } if(p>length) return true; } if(parms) parms[i].info = NULL; return false; } static void api_save_msg(API_PARSE *in, byte *format, API_SAVE *out) { word i, j, n = 0; byte *p; p = out->info; for (i = 0; format[i] != '\0'; i++) { out->parms[i].info = p; out->parms[i].length = in[i].length; switch (format[i]) { case 'b': n = 1; break; case 'w': n = 2; break; case 'd': n = 4; break; case 's': n = in[i].length + 1; break; } for (j = 0; j < n; j++) *(p++) = in[i].info[j]; } out->parms[i].info = NULL; out->parms[i].length = 0; } static void api_load_msg(API_SAVE *in, API_PARSE *out) { word i; i = 0; do { out[i].info = in->parms[i].info; out[i].length = in->parms[i].length; } while (in->parms[i++].info); } /*------------------------------------------------------------------*/ /* CAPI remove function */ /*------------------------------------------------------------------*/ word api_remove_start(void) { word i; word j; if(!remove_started) { remove_started = true; for(i=0;i<max_adapter;i++) { if(adapter[i].request) { for(j=0;j<adapter[i].max_plci;j++) { if(adapter[i].plci[j].Sig.Id) plci_remove(&adapter[i].plci[j]); } } } return 1; } else { for(i=0;i<max_adapter;i++) { if(adapter[i].request) { for(j=0;j<adapter[i].max_plci;j++) { if(adapter[i].plci[j].Sig.Id) return 1; } } } } api_remove_complete(); return 0; } /*------------------------------------------------------------------*/ /* internal command queue */ /*------------------------------------------------------------------*/ static void init_internal_command_queue (PLCI *plci) { word i; dbug (1, dprintf ("%s,%d: init_internal_command_queue", (char *)(FILE_), __LINE__)); plci->internal_command = 0; for (i = 0; i < MAX_INTERNAL_COMMAND_LEVELS; i++) plci->internal_command_queue[i] = NULL; } static void start_internal_command (dword Id, PLCI *plci, t_std_internal_command command_function) { word i; dbug (1, dprintf ("[%06lx] %s,%d: start_internal_command", UnMapId (Id), (char *)(FILE_), __LINE__)); if (plci->internal_command == 0) { plci->internal_command_queue[0] = command_function; (* command_function)(Id, plci, OK); } else { i = 1; while (plci->internal_command_queue[i] != NULL) i++; plci->internal_command_queue[i] = command_function; } } static void next_internal_command (dword Id, PLCI *plci) { word i; dbug (1, dprintf ("[%06lx] %s,%d: next_internal_command", UnMapId (Id), (char *)(FILE_), __LINE__)); plci->internal_command = 0; plci->internal_command_queue[0] = NULL; while (plci->internal_command_queue[1] != NULL) { for (i = 0; i < MAX_INTERNAL_COMMAND_LEVELS - 1; i++) plci->internal_command_queue[i] = plci->internal_command_queue[i+1]; plci->internal_command_queue[MAX_INTERNAL_COMMAND_LEVELS - 1] = NULL; (*(plci->internal_command_queue[0]))(Id, plci, OK); if (plci->internal_command != 0) return; plci->internal_command_queue[0] = NULL; } } /*------------------------------------------------------------------*/ /* NCCI allocate/remove function */ /*------------------------------------------------------------------*/ static dword ncci_mapping_bug = 0; static word get_ncci (PLCI *plci, byte ch, word force_ncci) { DIVA_CAPI_ADAPTER *a; word ncci, i, j, k; a = plci->adapter; if (!ch || a->ch_ncci[ch]) { ncci_mapping_bug++; dbug(1,dprintf("NCCI mapping exists %ld %02x %02x %02x-%02x", ncci_mapping_bug, ch, force_ncci, a->ncci_ch[a->ch_ncci[ch]], a->ch_ncci[ch])); ncci = ch; } else { if (force_ncci) ncci = force_ncci; else { if ((ch < MAX_NCCI+1) && !a->ncci_ch[ch]) ncci = ch; else { ncci = 1; while ((ncci < MAX_NCCI+1) && a->ncci_ch[ncci]) ncci++; if (ncci == MAX_NCCI+1) { ncci_mapping_bug++; i = 1; do { j = 1; while ((j < MAX_NCCI+1) && (a->ncci_ch[j] != i)) j++; k = j; if (j < MAX_NCCI+1) { do { j++; } while ((j < MAX_NCCI+1) && (a->ncci_ch[j] != i)); } } while ((i < MAX_NL_CHANNEL+1) && (j < MAX_NCCI+1)); if (i < MAX_NL_CHANNEL+1) { dbug(1,dprintf("NCCI mapping overflow %ld %02x %02x %02x-%02x-%02x", ncci_mapping_bug, ch, force_ncci, i, k, j)); } else { dbug(1,dprintf("NCCI mapping overflow %ld %02x %02x", ncci_mapping_bug, ch, force_ncci)); } ncci = ch; } } a->ncci_plci[ncci] = plci->Id; a->ncci_state[ncci] = IDLE; if (!plci->ncci_ring_list) plci->ncci_ring_list = ncci; else a->ncci_next[ncci] = a->ncci_next[plci->ncci_ring_list]; a->ncci_next[plci->ncci_ring_list] = (byte) ncci; } a->ncci_ch[ncci] = ch; a->ch_ncci[ch] = (byte) ncci; dbug(1,dprintf("NCCI mapping established %ld %02x %02x %02x-%02x", ncci_mapping_bug, ch, force_ncci, ch, ncci)); } return (ncci); } static void ncci_free_receive_buffers (PLCI *plci, word ncci) { DIVA_CAPI_ADAPTER *a; APPL *appl; word i, ncci_code; dword Id; a = plci->adapter; Id = (((dword) ncci) << 16) | (((word)(plci->Id)) << 8) | a->Id; if (ncci) { if (a->ncci_plci[ncci] == plci->Id) { if (!plci->appl) { ncci_mapping_bug++; dbug(1,dprintf("NCCI mapping appl expected %ld %08lx", ncci_mapping_bug, Id)); } else { appl = plci->appl; ncci_code = ncci | (((word) a->Id) << 8); for (i = 0; i < appl->MaxBuffer; i++) { if ((appl->DataNCCI[i] == ncci_code) && (((byte)(appl->DataFlags[i] >> 8)) == plci->Id)) { appl->DataNCCI[i] = 0; } } } } } else { for (ncci = 1; ncci < MAX_NCCI+1; ncci++) { if (a->ncci_plci[ncci] == plci->Id) { if (!plci->appl) { ncci_mapping_bug++; dbug(1,dprintf("NCCI mapping no appl %ld %08lx", ncci_mapping_bug, Id)); } else { appl = plci->appl; ncci_code = ncci | (((word) a->Id) << 8); for (i = 0; i < appl->MaxBuffer; i++) { if ((appl->DataNCCI[i] == ncci_code) && (((byte)(appl->DataFlags[i] >> 8)) == plci->Id)) { appl->DataNCCI[i] = 0; } } } } } } } static void cleanup_ncci_data (PLCI *plci, word ncci) { NCCI *ncci_ptr; if (ncci && (plci->adapter->ncci_plci[ncci] == plci->Id)) { ncci_ptr = &(plci->adapter->ncci[ncci]); if (plci->appl) { while (ncci_ptr->data_pending != 0) { if (!plci->data_sent || (ncci_ptr->DBuffer[ncci_ptr->data_out].P != plci->data_sent_ptr)) TransmitBufferFree (plci->appl, ncci_ptr->DBuffer[ncci_ptr->data_out].P); (ncci_ptr->data_out)++; if (ncci_ptr->data_out == MAX_DATA_B3) ncci_ptr->data_out = 0; (ncci_ptr->data_pending)--; } } ncci_ptr->data_out = 0; ncci_ptr->data_pending = 0; ncci_ptr->data_ack_out = 0; ncci_ptr->data_ack_pending = 0; } } static void ncci_remove (PLCI *plci, word ncci, byte preserve_ncci) { DIVA_CAPI_ADAPTER *a; dword Id; word i; a = plci->adapter; Id = (((dword) ncci) << 16) | (((word)(plci->Id)) << 8) | a->Id; if (!preserve_ncci) ncci_free_receive_buffers (plci, ncci); if (ncci) { if (a->ncci_plci[ncci] != plci->Id) { ncci_mapping_bug++; dbug(1,dprintf("NCCI mapping doesn't exist %ld %08lx %02x", ncci_mapping_bug, Id, preserve_ncci)); } else { cleanup_ncci_data (plci, ncci); dbug(1,dprintf("NCCI mapping released %ld %08lx %02x %02x-%02x", ncci_mapping_bug, Id, preserve_ncci, a->ncci_ch[ncci], ncci)); a->ch_ncci[a->ncci_ch[ncci]] = 0; if (!preserve_ncci) { a->ncci_ch[ncci] = 0; a->ncci_plci[ncci] = 0; a->ncci_state[ncci] = IDLE; i = plci->ncci_ring_list; while ((i != 0) && (a->ncci_next[i] != plci->ncci_ring_list) && (a->ncci_next[i] != ncci)) i = a->ncci_next[i]; if ((i != 0) && (a->ncci_next[i] == ncci)) { if (i == ncci) plci->ncci_ring_list = 0; else if (plci->ncci_ring_list == ncci) plci->ncci_ring_list = i; a->ncci_next[i] = a->ncci_next[ncci]; } a->ncci_next[ncci] = 0; } } } else { for (ncci = 1; ncci < MAX_NCCI+1; ncci++) { if (a->ncci_plci[ncci] == plci->Id) { cleanup_ncci_data (plci, ncci); dbug(1,dprintf("NCCI mapping released %ld %08lx %02x %02x-%02x", ncci_mapping_bug, Id, preserve_ncci, a->ncci_ch[ncci], ncci)); a->ch_ncci[a->ncci_ch[ncci]] = 0; if (!preserve_ncci) { a->ncci_ch[ncci] = 0; a->ncci_plci[ncci] = 0; a->ncci_state[ncci] = IDLE; a->ncci_next[ncci] = 0; } } } if (!preserve_ncci) plci->ncci_ring_list = 0; } } /*------------------------------------------------------------------*/ /* PLCI remove function */ /*------------------------------------------------------------------*/ static void plci_free_msg_in_queue (PLCI *plci) { word i; if (plci->appl) { i = plci->msg_in_read_pos; while (i != plci->msg_in_write_pos) { if (i == plci->msg_in_wrap_pos) i = 0; if (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->header.command == _DATA_B3_R) { TransmitBufferFree (plci->appl, (byte *)(long)(((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->info.data_b3_req.Data)); } i += (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->header.length + MSG_IN_OVERHEAD + 3) & 0xfffc; } } plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE; plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE; plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE; } static void plci_remove(PLCI * plci) { if(!plci) { dbug(1,dprintf("plci_remove(no plci)")); return; } init_internal_command_queue (plci); dbug(1,dprintf("plci_remove(%x,tel=%x)",plci->Id,plci->tel)); if(plci_remove_check(plci)) { return; } if (plci->Sig.Id == 0xff) { dbug(1,dprintf("D-channel X.25 plci->NL.Id:%0x", plci->NL.Id)); if (plci->NL.Id && !plci->nl_remove_id) { nl_req_ncci(plci,REMOVE,0); send_req(plci); } } else { if (!plci->sig_remove_id && (plci->Sig.Id || (plci->req_in!=plci->req_out) || (plci->nl_req || plci->sig_req))) { sig_req(plci,HANGUP,0); send_req(plci); } } ncci_remove (plci, 0, false); plci_free_msg_in_queue (plci); plci->channels = 0; plci->appl = NULL; if ((plci->State == INC_CON_PENDING) || (plci->State == INC_CON_ALERT)) plci->State = OUTG_DIS_PENDING; } /*------------------------------------------------------------------*/ /* Application Group function helpers */ /*------------------------------------------------------------------*/ static void set_group_ind_mask (PLCI *plci) { word i; for (i = 0; i < C_IND_MASK_DWORDS; i++) plci->group_optimization_mask_table[i] = 0xffffffffL; } static void clear_group_ind_mask_bit (PLCI *plci, word b) { plci->group_optimization_mask_table[b >> 5] &= ~(1L << (b & 0x1f)); } static byte test_group_ind_mask_bit (PLCI *plci, word b) { return ((plci->group_optimization_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0); } /*------------------------------------------------------------------*/ /* c_ind_mask operations for arbitrary MAX_APPL */ /*------------------------------------------------------------------*/ static void clear_c_ind_mask (PLCI *plci) { word i; for (i = 0; i < C_IND_MASK_DWORDS; i++) plci->c_ind_mask_table[i] = 0; } static byte c_ind_mask_empty (PLCI *plci) { word i; i = 0; while ((i < C_IND_MASK_DWORDS) && (plci->c_ind_mask_table[i] == 0)) i++; return (i == C_IND_MASK_DWORDS); } static void set_c_ind_mask_bit (PLCI *plci, word b) { plci->c_ind_mask_table[b >> 5] |= (1L << (b & 0x1f)); } static void clear_c_ind_mask_bit (PLCI *plci, word b) { plci->c_ind_mask_table[b >> 5] &= ~(1L << (b & 0x1f)); } static byte test_c_ind_mask_bit (PLCI *plci, word b) { return ((plci->c_ind_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0); } static void dump_c_ind_mask (PLCI *plci) { static char hex_digit_table[0x10] = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; word i, j, k; dword d; char *p; char buf[40]; for (i = 0; i < C_IND_MASK_DWORDS; i += 4) { p = buf + 36; *p = '\0'; for (j = 0; j < 4; j++) { if (i+j < C_IND_MASK_DWORDS) { d = plci->c_ind_mask_table[i+j]; for (k = 0; k < 8; k++) { *(--p) = hex_digit_table[d & 0xf]; d >>= 4; } } else if (i != 0) { for (k = 0; k < 8; k++) *(--p) = ' '; } *(--p) = ' '; } dbug(1,dprintf ("c_ind_mask =%s", (char *) p)); } } #define dump_plcis(a) /*------------------------------------------------------------------*/ /* translation function for each message */ /*------------------------------------------------------------------*/ static byte connect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { word ch; word i; word Info; word CIP; byte LinkLayer; API_PARSE * ai; API_PARSE * bp; API_PARSE ai_parms[5]; word channel = 0; dword ch_mask; byte m; static byte esc_chi[35] = {0x02,0x18,0x01}; static byte lli[2] = {0x01,0x00}; byte noCh = 0; word dir = 0; byte *p_chi = ""; for(i=0;i<5;i++) ai_parms[i].length = 0; dbug(1,dprintf("connect_req(%d)",parms->length)); Info = _WRONG_IDENTIFIER; if(a) { if(a->adapter_disabled) { dbug(1,dprintf("adapter disabled")); Id = ((word)1<<8)|a->Id; sendf(appl,_CONNECT_R|CONFIRM,Id,Number,"w",0); sendf(appl, _DISCONNECT_I, Id, 0, "w", _L1_ERROR); return false; } Info = _OUT_OF_PLCI; if((i=get_plci(a))) { Info = 0; plci = &a->plci[i-1]; plci->appl = appl; plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE; /* check 'external controller' bit for codec support */ if(Id & EXT_CONTROLLER) { if(AdvCodecSupport(a, plci, appl, 0) ) { plci->Id = 0; sendf(appl, _CONNECT_R|CONFIRM, Id, Number, "w", _WRONG_IDENTIFIER); return 2; } } ai = &parms[9]; bp = &parms[5]; ch = 0; if(bp->length)LinkLayer = bp->info[3]; else LinkLayer = 0; if(ai->length) { ch=0xffff; if(!api_parse(&ai->info[1],(word)ai->length,"ssss",ai_parms)) { ch = 0; if(ai_parms[0].length) { ch = GET_WORD(ai_parms[0].info+1); if(ch>4) ch=0; /* safety -> ignore ChannelID */ if(ch==4) /* explizit CHI in message */ { /* check length of B-CH struct */ if((ai_parms[0].info)[3]>=1) { if((ai_parms[0].info)[4]==CHI) { p_chi = &((ai_parms[0].info)[5]); } else { p_chi = &((ai_parms[0].info)[3]); } if(p_chi[0]>35) /* check length of channel ID */ { Info = _WRONG_MESSAGE_FORMAT; } } else Info = _WRONG_MESSAGE_FORMAT; } if(ch==3 && ai_parms[0].length>=7 && ai_parms[0].length<=36) { dir = GET_WORD(ai_parms[0].info+3); ch_mask = 0; m = 0x3f; for(i=0; i+5<=ai_parms[0].length; i++) { if(ai_parms[0].info[i+5]!=0) { if((ai_parms[0].info[i+5] | m) != 0xff) Info = _WRONG_MESSAGE_FORMAT; else { if (ch_mask == 0) channel = i; ch_mask |= 1L << i; } } m = 0; } if (ch_mask == 0) Info = _WRONG_MESSAGE_FORMAT; if (!Info) { if ((ai_parms[0].length == 36) || (ch_mask != ((dword)(1L << channel)))) { esc_chi[0] = (byte)(ai_parms[0].length - 2); for(i=0; i+5<=ai_parms[0].length; i++) esc_chi[i+3] = ai_parms[0].info[i+5]; } else esc_chi[0] = 2; esc_chi[2] = (byte)channel; plci->b_channel = (byte)channel; /* not correct for ETSI ch 17..31 */ add_p(plci,LLI,lli); add_p(plci,ESC,esc_chi); plci->State = LOCAL_CONNECT; if(!dir) plci->call_dir |= CALL_DIR_FORCE_OUTG_NL; /* dir 0=DTE, 1=DCE */ } } } } else Info = _WRONG_MESSAGE_FORMAT; } dbug(1,dprintf("ch=%x,dir=%x,p_ch=%d",ch,dir,channel)); plci->command = _CONNECT_R; plci->number = Number; /* x.31 or D-ch free SAPI in LinkLayer? */ if(ch==1 && LinkLayer!=3 && LinkLayer!=12) noCh = true; if((ch==0 || ch==2 || noCh || ch==3 || ch==4) && !Info) { /* B-channel used for B3 connections (ch==0), or no B channel */ /* is used (ch==2) or perm. connection (3) is used do a CALL */ if(noCh) Info = add_b1(plci,&parms[5],2,0); /* no resource */ else Info = add_b1(plci,&parms[5],ch,0); add_s(plci,OAD,&parms[2]); add_s(plci,OSA,&parms[4]); add_s(plci,BC,&parms[6]); add_s(plci,LLC,&parms[7]); add_s(plci,HLC,&parms[8]); CIP = GET_WORD(parms[0].info); if (a->Info_Mask[appl->Id-1] & 0x200) { /* early B3 connect (CIP mask bit 9) no release after a disc */ add_p(plci,LLI,"\x01\x01"); } if(GET_WORD(parms[0].info)<29) { add_p(plci,BC,cip_bc[GET_WORD(parms[0].info)][a->u_law]); add_p(plci,HLC,cip_hlc[GET_WORD(parms[0].info)]); } add_p(plci,UID,"\x06\x43\x61\x70\x69\x32\x30"); sig_req(plci,ASSIGN,DSIG_ID); } else if(ch==1) { /* D-Channel used for B3 connections */ plci->Sig.Id = 0xff; Info = 0; } if(!Info && ch!=2 && !noCh ) { Info = add_b23(plci,&parms[5]); if(!Info) { if(!(plci->tel && !plci->adv_nl))nl_req_ncci(plci,ASSIGN,0); } } if(!Info) { if(ch==0 || ch==2 || ch==3 || noCh || ch==4) { if(plci->spoofed_msg==SPOOFING_REQUIRED) { api_save_msg(parms, "wsssssssss", &plci->saved_msg); plci->spoofed_msg = CALL_REQ; plci->internal_command = BLOCK_PLCI; plci->command = 0; dbug(1,dprintf("Spoof")); send_req(plci); return false; } if(ch==4)add_p(plci,CHI,p_chi); add_s(plci,CPN,&parms[1]); add_s(plci,DSA,&parms[3]); if(noCh) add_p(plci,ESC,"\x02\x18\xfd"); /* D-channel, no B-L3 */ add_ai(plci,&parms[9]); if(!dir)sig_req(plci,CALL_REQ,0); else { plci->command = PERM_LIST_REQ; plci->appl = appl; sig_req(plci,LISTEN_REQ,0); send_req(plci); return false; } } send_req(plci); return false; } plci->Id = 0; } } sendf(appl, _CONNECT_R|CONFIRM, Id, Number, "w",Info); return 2; } static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { word i, Info; word Reject; static byte cau_t[] = {0,0,0x90,0x91,0xac,0x9d,0x86,0xd8,0x9b}; static byte esc_t[] = {0x03,0x08,0x00,0x00}; API_PARSE * ai; API_PARSE ai_parms[5]; word ch=0; if(!plci) { dbug(1,dprintf("connect_res(no plci)")); return 0; /* no plci, no send */ } dbug(1,dprintf("connect_res(State=0x%x)",plci->State)); for(i=0;i<5;i++) ai_parms[i].length = 0; ai = &parms[5]; dbug(1,dprintf("ai->length=%d",ai->length)); if(ai->length) { if(!api_parse(&ai->info[1],(word)ai->length,"ssss",ai_parms)) { dbug(1,dprintf("ai_parms[0].length=%d/0x%x",ai_parms[0].length,GET_WORD(ai_parms[0].info+1))); ch = 0; if(ai_parms[0].length) { ch = GET_WORD(ai_parms[0].info+1); dbug(1,dprintf("BCH-I=0x%x",ch)); } } } if(plci->State==INC_CON_CONNECTED_ALERT) { dbug(1,dprintf("Connected Alert Call_Res")); if (a->Info_Mask[appl->Id-1] & 0x200) { /* early B3 connect (CIP mask bit 9) no release after a disc */ add_p(plci,LLI,"\x01\x01"); } add_s(plci, CONN_NR, &parms[2]); add_s(plci, LLC, &parms[4]); add_ai(plci, &parms[5]); plci->State = INC_CON_ACCEPT; sig_req(plci, CALL_RES,0); return 1; } else if(plci->State==INC_CON_PENDING || plci->State==INC_CON_ALERT) { clear_c_ind_mask_bit (plci, (word)(appl->Id-1)); dump_c_ind_mask (plci); Reject = GET_WORD(parms[0].info); dbug(1,dprintf("Reject=0x%x",Reject)); if(Reject) { if(c_ind_mask_empty (plci)) { if((Reject&0xff00)==0x3400) { esc_t[2] = ((byte)(Reject&0x00ff)) | 0x80; add_p(plci,ESC,esc_t); add_ai(plci, &parms[5]); sig_req(plci,REJECT,0); } else if(Reject==1 || Reject>9) { add_ai(plci, &parms[5]); sig_req(plci,HANGUP,0); } else { esc_t[2] = cau_t[(Reject&0x000f)]; add_p(plci,ESC,esc_t); add_ai(plci, &parms[5]); sig_req(plci,REJECT,0); } plci->appl = appl; } else { sendf(appl, _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); } } else { plci->appl = appl; if(Id & EXT_CONTROLLER){ if(AdvCodecSupport(a, plci, appl, 0)){ dbug(1,dprintf("connect_res(error from AdvCodecSupport)")); sig_req(plci,HANGUP,0); return 1; } if(plci->tel == ADV_VOICE && a->AdvCodecPLCI) { Info = add_b23(plci, &parms[1]); if (Info) { dbug(1,dprintf("connect_res(error from add_b23)")); sig_req(plci,HANGUP,0); return 1; } if(plci->adv_nl) { nl_req_ncci(plci, ASSIGN, 0); } } } else { plci->tel = 0; if(ch!=2) { Info = add_b23(plci, &parms[1]); if (Info) { dbug(1,dprintf("connect_res(error from add_b23 2)")); sig_req(plci,HANGUP,0); return 1; } } nl_req_ncci(plci, ASSIGN, 0); } if(plci->spoofed_msg==SPOOFING_REQUIRED) { api_save_msg(parms, "wsssss", &plci->saved_msg); plci->spoofed_msg = CALL_RES; plci->internal_command = BLOCK_PLCI; plci->command = 0; dbug(1,dprintf("Spoof")); } else { add_b1 (plci, &parms[1], ch, plci->B1_facilities); if (a->Info_Mask[appl->Id-1] & 0x200) { /* early B3 connect (CIP mask bit 9) no release after a disc */ add_p(plci,LLI,"\x01\x01"); } add_s(plci, CONN_NR, &parms[2]); add_s(plci, LLC, &parms[4]); add_ai(plci, &parms[5]); plci->State = INC_CON_ACCEPT; sig_req(plci, CALL_RES,0); } for(i=0; i<max_appl; i++) { if(test_c_ind_mask_bit (plci, i)) { sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); } } } } return 1; } static byte connect_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg) { dbug(1,dprintf("connect_a_res")); return false; } static byte disconnect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg) { word Info; word i; dbug(1,dprintf("disconnect_req")); Info = _WRONG_IDENTIFIER; if(plci) { if(plci->State==INC_CON_PENDING || plci->State==INC_CON_ALERT) { clear_c_ind_mask_bit (plci, (word)(appl->Id-1)); plci->appl = appl; for(i=0; i<max_appl; i++) { if(test_c_ind_mask_bit (plci, i)) sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); } plci->State = OUTG_DIS_PENDING; } if(plci->Sig.Id && plci->appl) { Info = 0; if(plci->Sig.Id!=0xff) { if(plci->State!=INC_DIS_PENDING) { add_ai(plci, &msg[0]); sig_req(plci,HANGUP,0); plci->State = OUTG_DIS_PENDING; return 1; } } else { if (plci->NL.Id && !plci->nl_remove_id) { mixer_remove (plci); nl_req_ncci(plci,REMOVE,0); sendf(appl,_DISCONNECT_R|CONFIRM,Id,Number,"w",0); sendf(appl, _DISCONNECT_I, Id, 0, "w", 0); plci->State = INC_DIS_PENDING; } return 1; } } } if(!appl) return false; sendf(appl, _DISCONNECT_R|CONFIRM, Id, Number, "w",Info); return false; } static byte disconnect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg) { dbug(1,dprintf("disconnect_res")); if(plci) { /* clear ind mask bit, just in case of collsion of */ /* DISCONNECT_IND and CONNECT_RES */ clear_c_ind_mask_bit (plci, (word)(appl->Id-1)); ncci_free_receive_buffers (plci, 0); if(plci_remove_check(plci)) { return 0; } if(plci->State==INC_DIS_PENDING || plci->State==SUSPENDING) { if(c_ind_mask_empty (plci)) { if(plci->State!=SUSPENDING)plci->State = IDLE; dbug(1,dprintf("chs=%d",plci->channels)); if(!plci->channels) { plci_remove(plci); } } } } return 0; } static byte listen_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { word Info; byte i; dbug(1,dprintf("listen_req(Appl=0x%x)",appl->Id)); Info = _WRONG_IDENTIFIER; if(a) { Info = 0; a->Info_Mask[appl->Id-1] = GET_DWORD(parms[0].info); a->CIP_Mask[appl->Id-1] = GET_DWORD(parms[1].info); dbug(1,dprintf("CIP_MASK=0x%lx",GET_DWORD(parms[1].info))); if (a->Info_Mask[appl->Id-1] & 0x200){ /* early B3 connect provides */ a->Info_Mask[appl->Id-1] |= 0x10; /* call progression infos */ } /* check if external controller listen and switch listen on or off*/ if(Id&EXT_CONTROLLER && GET_DWORD(parms[1].info)){ if(a->profile.Global_Options & ON_BOARD_CODEC) { dummy_plci.State = IDLE; a->codec_listen[appl->Id-1] = &dummy_plci; a->TelOAD[0] = (byte)(parms[3].length); for(i=1;parms[3].length>=i && i<22;i++) { a->TelOAD[i] = parms[3].info[i]; } a->TelOAD[i] = 0; a->TelOSA[0] = (byte)(parms[4].length); for(i=1;parms[4].length>=i && i<22;i++) { a->TelOSA[i] = parms[4].info[i]; } a->TelOSA[i] = 0; } else Info = 0x2002; /* wrong controller, codec not supported */ } else{ /* clear listen */ a->codec_listen[appl->Id-1] = (PLCI *)0; } } sendf(appl, _LISTEN_R|CONFIRM, Id, Number, "w",Info); if (a) listen_check(a); return false; } static byte info_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg) { word i; API_PARSE * ai; PLCI * rc_plci = NULL; API_PARSE ai_parms[5]; word Info = 0; dbug(1,dprintf("info_req")); for(i=0;i<5;i++) ai_parms[i].length = 0; ai = &msg[1]; if(ai->length) { if(api_parse(&ai->info[1],(word)ai->length,"ssss",ai_parms)) { dbug(1,dprintf("AddInfo wrong")); Info = _WRONG_MESSAGE_FORMAT; } } if(!a) Info = _WRONG_STATE; if(!Info && plci) { /* no fac, with CPN, or KEY */ rc_plci = plci; if(!ai_parms[3].length && plci->State && (msg[0].length || ai_parms[1].length) ) { /* overlap sending option */ dbug(1,dprintf("OvlSnd")); add_s(plci,CPN,&msg[0]); add_s(plci,KEY,&ai_parms[1]); sig_req(plci,INFO_REQ,0); send_req(plci); return false; } if(plci->State && ai_parms[2].length) { /* User_Info option */ dbug(1,dprintf("UUI")); add_s(plci,UUI,&ai_parms[2]); sig_req(plci,USER_DATA,0); } else if(plci->State && ai_parms[3].length) { /* Facility option */ dbug(1,dprintf("FAC")); add_s(plci,CPN,&msg[0]); add_ai(plci, &msg[1]); sig_req(plci,FACILITY_REQ,0); } else { Info = _WRONG_STATE; } } else if((ai_parms[1].length || ai_parms[2].length || ai_parms[3].length) && !Info) { /* NCR_Facility option -> send UUI and Keypad too */ dbug(1,dprintf("NCR_FAC")); if((i=get_plci(a))) { rc_plci = &a->plci[i-1]; appl->NullCREnable = true; rc_plci->internal_command = C_NCR_FAC_REQ; rc_plci->appl = appl; add_p(rc_plci,CAI,"\x01\x80"); add_p(rc_plci,UID,"\x06\x43\x61\x70\x69\x32\x30"); sig_req(rc_plci,ASSIGN,DSIG_ID); send_req(rc_plci); } else { Info = _OUT_OF_PLCI; } if(!Info) { add_s(rc_plci,CPN,&msg[0]); add_ai(rc_plci, &msg[1]); sig_req(rc_plci,NCR_FACILITY,0); send_req(rc_plci); return false; /* for application controlled supplementary services */ } } if (!rc_plci) { Info = _WRONG_MESSAGE_FORMAT; } if(!Info) { send_req(rc_plci); } else { /* appl is not assigned to a PLCI or error condition */ dbug(1,dprintf("localInfoCon")); sendf(appl, _INFO_R|CONFIRM, Id, Number, "w",Info); } return false; } static byte info_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg) { dbug(1,dprintf("info_res")); return false; } static byte alert_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg) { word Info; byte ret; dbug(1,dprintf("alert_req")); Info = _WRONG_IDENTIFIER; ret = false; if(plci) { Info = _ALERT_IGNORED; if(plci->State!=INC_CON_ALERT) { Info = _WRONG_STATE; if(plci->State==INC_CON_PENDING) { Info = 0; plci->State=INC_CON_ALERT; add_ai(plci, &msg[0]); sig_req(plci,CALL_ALERT,0); ret = 1; } } } sendf(appl, _ALERT_R|CONFIRM, Id, Number, "w",Info); return ret; } static byte facility_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg) { word Info = 0; word i = 0; word selector; word SSreq; long relatedPLCIvalue; DIVA_CAPI_ADAPTER * relatedadapter; byte * SSparms = ""; byte RCparms[] = "\x05\x00\x00\x02\x00\x00"; byte SSstruct[] = "\x09\x00\x00\x06\x00\x00\x00\x00\x00\x00"; API_PARSE * parms; API_PARSE ss_parms[11]; PLCI *rplci; byte cai[15]; dword d; API_PARSE dummy; dbug(1,dprintf("facility_req")); for(i=0;i<9;i++) ss_parms[i].length = 0; parms = &msg[1]; if(!a) { dbug(1,dprintf("wrong Ctrl")); Info = _WRONG_IDENTIFIER; } selector = GET_WORD(msg[0].info); if(!Info) { switch(selector) { case SELECTOR_HANDSET: Info = AdvCodecSupport(a, plci, appl, HOOK_SUPPORT); break; case SELECTOR_SU_SERV: if(!msg[1].length) { Info = _WRONG_MESSAGE_FORMAT; break; } SSreq = GET_WORD(&(msg[1].info[1])); PUT_WORD(&RCparms[1],SSreq); SSparms = RCparms; switch(SSreq) { case S_GET_SUPPORTED_SERVICES: if((i=get_plci(a))) { rplci = &a->plci[i-1]; rplci->appl = appl; add_p(rplci,CAI,"\x01\x80"); add_p(rplci,UID,"\x06\x43\x61\x70\x69\x32\x30"); sig_req(rplci,ASSIGN,DSIG_ID); send_req(rplci); } else { PUT_DWORD(&SSstruct[6], MASK_TERMINAL_PORTABILITY); SSparms = (byte *)SSstruct; break; } rplci->internal_command = GETSERV_REQ_PEND; rplci->number = Number; rplci->appl = appl; sig_req(rplci,S_SUPPORTED,0); send_req(rplci); return false; break; case S_LISTEN: if(parms->length==7) { if(api_parse(&parms->info[1],(word)parms->length,"wbd",ss_parms)) { dbug(1,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; break; } } else { Info = _WRONG_MESSAGE_FORMAT; break; } a->Notification_Mask[appl->Id-1] = GET_DWORD(ss_parms[2].info); if(a->Notification_Mask[appl->Id-1] & SMASK_MWI) /* MWI active? */ { if((i=get_plci(a))) { rplci = &a->plci[i-1]; rplci->appl = appl; add_p(rplci,CAI,"\x01\x80"); add_p(rplci,UID,"\x06\x43\x61\x70\x69\x32\x30"); sig_req(rplci,ASSIGN,DSIG_ID); send_req(rplci); } else { break; } rplci->internal_command = GET_MWI_STATE; rplci->number = Number; sig_req(rplci,MWI_POLL,0); send_req(rplci); } break; case S_HOLD: api_parse(&parms->info[1],(word)parms->length,"ws",ss_parms); if(plci && plci->State && plci->SuppState==IDLE) { plci->SuppState = HOLD_REQUEST; plci->command = C_HOLD_REQ; add_s(plci,CAI,&ss_parms[1]); sig_req(plci,CALL_HOLD,0); send_req(plci); return false; } else Info = 0x3010; /* wrong state */ break; case S_RETRIEVE: if(plci && plci->State && plci->SuppState==CALL_HELD) { if(Id & EXT_CONTROLLER) { if(AdvCodecSupport(a, plci, appl, 0)) { Info = 0x3010; /* wrong state */ break; } } else plci->tel = 0; plci->SuppState = RETRIEVE_REQUEST; plci->command = C_RETRIEVE_REQ; if(plci->spoofed_msg==SPOOFING_REQUIRED) { plci->spoofed_msg = CALL_RETRIEVE; plci->internal_command = BLOCK_PLCI; plci->command = 0; dbug(1,dprintf("Spoof")); return false; } else { sig_req(plci,CALL_RETRIEVE,0); send_req(plci); return false; } } else Info = 0x3010; /* wrong state */ break; case S_SUSPEND: if(parms->length) { if(api_parse(&parms->info[1],(word)parms->length,"wbs",ss_parms)) { dbug(1,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; break; } } if(plci && plci->State) { add_s(plci,CAI,&ss_parms[2]); plci->command = SUSPEND_REQ; sig_req(plci,SUSPEND,0); plci->State = SUSPENDING; send_req(plci); } else Info = 0x3010; /* wrong state */ break; case S_RESUME: if(!(i=get_plci(a)) ) { Info = _OUT_OF_PLCI; break; } rplci = &a->plci[i-1]; rplci->appl = appl; rplci->number = Number; rplci->tel = 0; rplci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE; /* check 'external controller' bit for codec support */ if(Id & EXT_CONTROLLER) { if(AdvCodecSupport(a, rplci, appl, 0) ) { rplci->Id = 0; Info = 0x300A; break; } } if(parms->length) { if(api_parse(&parms->info[1],(word)parms->length,"wbs",ss_parms)) { dbug(1,dprintf("format wrong")); rplci->Id = 0; Info = _WRONG_MESSAGE_FORMAT; break; } } dummy.length = 0; dummy.info = "\x00"; add_b1(rplci, &dummy, 0, 0); if (a->Info_Mask[appl->Id-1] & 0x200) { /* early B3 connect (CIP mask bit 9) no release after a disc */ add_p(rplci,LLI,"\x01\x01"); } add_p(rplci,UID,"\x06\x43\x61\x70\x69\x32\x30"); sig_req(rplci,ASSIGN,DSIG_ID); send_req(rplci); add_s(rplci,CAI,&ss_parms[2]); rplci->command = RESUME_REQ; sig_req(rplci,RESUME,0); rplci->State = RESUMING; send_req(rplci); break; case S_CONF_BEGIN: /* Request */ case S_CONF_DROP: case S_CONF_ISOLATE: case S_CONF_REATTACH: if(api_parse(&parms->info[1],(word)parms->length,"wbd",ss_parms)) { dbug(1,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; break; } if(plci && plci->State && ((plci->SuppState==IDLE)||(plci->SuppState==CALL_HELD))) { d = GET_DWORD(ss_parms[2].info); if(d>=0x80) { dbug(1,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; break; } plci->ptyState = (byte)SSreq; plci->command = 0; cai[0] = 2; switch(SSreq) { case S_CONF_BEGIN: cai[1] = CONF_BEGIN; plci->internal_command = CONF_BEGIN_REQ_PEND; break; case S_CONF_DROP: cai[1] = CONF_DROP; plci->internal_command = CONF_DROP_REQ_PEND; break; case S_CONF_ISOLATE: cai[1] = CONF_ISOLATE; plci->internal_command = CONF_ISOLATE_REQ_PEND; break; case S_CONF_REATTACH: cai[1] = CONF_REATTACH; plci->internal_command = CONF_REATTACH_REQ_PEND; break; } cai[2] = (byte)d; /* Conference Size resp. PartyId */ add_p(plci,CAI,cai); sig_req(plci,S_SERVICE,0); send_req(plci); return false; } else Info = 0x3010; /* wrong state */ break; case S_ECT: case S_3PTY_BEGIN: case S_3PTY_END: case S_CONF_ADD: if(parms->length==7) { if(api_parse(&parms->info[1],(word)parms->length,"wbd",ss_parms)) { dbug(1,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; break; } } else if(parms->length==8) /* workaround for the T-View-S */ { if(api_parse(&parms->info[1],(word)parms->length,"wbdb",ss_parms)) { dbug(1,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; break; } } else { Info = _WRONG_MESSAGE_FORMAT; break; } if(!msg[1].length) { Info = _WRONG_MESSAGE_FORMAT; break; } if (!plci) { Info = _WRONG_IDENTIFIER; break; } relatedPLCIvalue = GET_DWORD(ss_parms[2].info); relatedPLCIvalue &= 0x0000FFFF; dbug(1,dprintf("PTY/ECT/addCONF,relPLCI=%lx",relatedPLCIvalue)); /* controller starts with 0 up to (max_adapter - 1) */ if (((relatedPLCIvalue & 0x7f) == 0) || (MapController ((byte)(relatedPLCIvalue & 0x7f)) == 0) || (MapController ((byte)(relatedPLCIvalue & 0x7f)) > max_adapter)) { if(SSreq==S_3PTY_END) { dbug(1, dprintf("wrong Controller use 2nd PLCI=PLCI")); rplci = plci; } else { Info = 0x3010; /* wrong state */ break; } } else { relatedadapter = &adapter[MapController ((byte)(relatedPLCIvalue & 0x7f))-1]; relatedPLCIvalue >>=8; /* find PLCI PTR*/ for(i=0,rplci=NULL;i<relatedadapter->max_plci;i++) { if(relatedadapter->plci[i].Id == (byte)relatedPLCIvalue) { rplci = &relatedadapter->plci[i]; } } if(!rplci || !relatedPLCIvalue) { if(SSreq==S_3PTY_END) { dbug(1, dprintf("use 2nd PLCI=PLCI")); rplci = plci; } else { Info = 0x3010; /* wrong state */ break; } } } /* dbug(1,dprintf("rplci:%x",rplci)); dbug(1,dprintf("plci:%x",plci)); dbug(1,dprintf("rplci->ptyState:%x",rplci->ptyState)); dbug(1,dprintf("plci->ptyState:%x",plci->ptyState)); dbug(1,dprintf("SSreq:%x",SSreq)); dbug(1,dprintf("rplci->internal_command:%x",rplci->internal_command)); dbug(1,dprintf("rplci->appl:%x",rplci->appl)); dbug(1,dprintf("rplci->Id:%x",rplci->Id)); */ /* send PTY/ECT req, cannot check all states because of US stuff */ if( !rplci->internal_command && rplci->appl ) { plci->command = 0; rplci->relatedPTYPLCI = plci; plci->relatedPTYPLCI = rplci; rplci->ptyState = (byte)SSreq; if(SSreq==S_ECT) { rplci->internal_command = ECT_REQ_PEND; cai[1] = ECT_EXECUTE; rplci->vswitchstate=0; rplci->vsprot=0; rplci->vsprotdialect=0; plci->vswitchstate=0; plci->vsprot=0; plci->vsprotdialect=0; } else if(SSreq==S_CONF_ADD) { rplci->internal_command = CONF_ADD_REQ_PEND; cai[1] = CONF_ADD; } else { rplci->internal_command = PTY_REQ_PEND; cai[1] = (byte)(SSreq-3); } rplci->number = Number; if(plci!=rplci) /* explicit invocation */ { cai[0] = 2; cai[2] = plci->Sig.Id; dbug(1,dprintf("explicit invocation")); } else { dbug(1,dprintf("implicit invocation")); cai[0] = 1; } add_p(rplci,CAI,cai); sig_req(rplci,S_SERVICE,0); send_req(rplci); return false; } else { dbug(0,dprintf("Wrong line")); Info = 0x3010; /* wrong state */ break; } break; case S_CALL_DEFLECTION: if(api_parse(&parms->info[1],(word)parms->length,"wbwss",ss_parms)) { dbug(1,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; break; } if (!plci) { Info = _WRONG_IDENTIFIER; break; } /* reuse unused screening indicator */ ss_parms[3].info[3] = (byte)GET_WORD(&(ss_parms[2].info[0])); plci->command = 0; plci->internal_command = CD_REQ_PEND; appl->CDEnable = true; cai[0] = 1; cai[1] = CALL_DEFLECTION; add_p(plci,CAI,cai); add_p(plci,CPN,ss_parms[3].info); sig_req(plci,S_SERVICE,0); send_req(plci); return false; break; case S_CALL_FORWARDING_START: if(api_parse(&parms->info[1],(word)parms->length,"wbdwwsss",ss_parms)) { dbug(1,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; break; } if((i=get_plci(a))) { rplci = &a->plci[i-1]; rplci->appl = appl; add_p(rplci,CAI,"\x01\x80"); add_p(rplci,UID,"\x06\x43\x61\x70\x69\x32\x30"); sig_req(rplci,ASSIGN,DSIG_ID); send_req(rplci); } else { Info = _OUT_OF_PLCI; break; } /* reuse unused screening indicator */ rplci->internal_command = CF_START_PEND; rplci->appl = appl; rplci->number = Number; appl->S_Handle = GET_DWORD(&(ss_parms[2].info[0])); cai[0] = 2; cai[1] = 0x70|(byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */ cai[2] = (byte)GET_WORD(&(ss_parms[4].info[0])); /* Basic Service */ add_p(rplci,CAI,cai); add_p(rplci,OAD,ss_parms[5].info); add_p(rplci,CPN,ss_parms[6].info); sig_req(rplci,S_SERVICE,0); send_req(rplci); return false; break; case S_INTERROGATE_DIVERSION: case S_INTERROGATE_NUMBERS: case S_CALL_FORWARDING_STOP: case S_CCBS_REQUEST: case S_CCBS_DEACTIVATE: case S_CCBS_INTERROGATE: switch(SSreq) { case S_INTERROGATE_NUMBERS: if(api_parse(&parms->info[1],(word)parms->length,"wbd",ss_parms)) { dbug(0,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; } break; case S_CCBS_REQUEST: case S_CCBS_DEACTIVATE: if(api_parse(&parms->info[1],(word)parms->length,"wbdw",ss_parms)) { dbug(0,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; } break; case S_CCBS_INTERROGATE: if(api_parse(&parms->info[1],(word)parms->length,"wbdws",ss_parms)) { dbug(0,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; } break; default: if(api_parse(&parms->info[1],(word)parms->length,"wbdwws",ss_parms)) { dbug(0,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; break; } break; } if(Info) break; if((i=get_plci(a))) { rplci = &a->plci[i-1]; switch(SSreq) { case S_INTERROGATE_DIVERSION: /* use cai with S_SERVICE below */ cai[1] = 0x60|(byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */ rplci->internal_command = INTERR_DIVERSION_REQ_PEND; /* move to rplci if assigned */ break; case S_INTERROGATE_NUMBERS: /* use cai with S_SERVICE below */ cai[1] = DIVERSION_INTERROGATE_NUM; /* Function */ rplci->internal_command = INTERR_NUMBERS_REQ_PEND; /* move to rplci if assigned */ break; case S_CALL_FORWARDING_STOP: rplci->internal_command = CF_STOP_PEND; cai[1] = 0x80|(byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */ break; case S_CCBS_REQUEST: cai[1] = CCBS_REQUEST; rplci->internal_command = CCBS_REQUEST_REQ_PEND; break; case S_CCBS_DEACTIVATE: cai[1] = CCBS_DEACTIVATE; rplci->internal_command = CCBS_DEACTIVATE_REQ_PEND; break; case S_CCBS_INTERROGATE: cai[1] = CCBS_INTERROGATE; rplci->internal_command = CCBS_INTERROGATE_REQ_PEND; break; default: cai[1] = 0; break; } rplci->appl = appl; rplci->number = Number; add_p(rplci,CAI,"\x01\x80"); add_p(rplci,UID,"\x06\x43\x61\x70\x69\x32\x30"); sig_req(rplci,ASSIGN,DSIG_ID); send_req(rplci); } else { Info = _OUT_OF_PLCI; break; } appl->S_Handle = GET_DWORD(&(ss_parms[2].info[0])); switch(SSreq) { case S_INTERROGATE_NUMBERS: cai[0] = 1; add_p(rplci,CAI,cai); break; case S_CCBS_REQUEST: case S_CCBS_DEACTIVATE: cai[0] = 3; PUT_WORD(&cai[2],GET_WORD(&(ss_parms[3].info[0]))); add_p(rplci,CAI,cai); break; case S_CCBS_INTERROGATE: cai[0] = 3; PUT_WORD(&cai[2],GET_WORD(&(ss_parms[3].info[0]))); add_p(rplci,CAI,cai); add_p(rplci,OAD,ss_parms[4].info); break; default: cai[0] = 2; cai[2] = (byte)GET_WORD(&(ss_parms[4].info[0])); /* Basic Service */ add_p(rplci,CAI,cai); add_p(rplci,OAD,ss_parms[5].info); break; } sig_req(rplci,S_SERVICE,0); send_req(rplci); return false; break; case S_MWI_ACTIVATE: if(api_parse(&parms->info[1],(word)parms->length,"wbwdwwwssss",ss_parms)) { dbug(1,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; break; } if(!plci) { if((i=get_plci(a))) { rplci = &a->plci[i-1]; rplci->appl = appl; rplci->cr_enquiry=true; add_p(rplci,CAI,"\x01\x80"); add_p(rplci,UID,"\x06\x43\x61\x70\x69\x32\x30"); sig_req(rplci,ASSIGN,DSIG_ID); send_req(rplci); } else { Info = _OUT_OF_PLCI; break; } } else { rplci = plci; rplci->cr_enquiry=false; } rplci->command = 0; rplci->internal_command = MWI_ACTIVATE_REQ_PEND; rplci->appl = appl; rplci->number = Number; cai[0] = 13; cai[1] = ACTIVATION_MWI; /* Function */ PUT_WORD(&cai[2],GET_WORD(&(ss_parms[2].info[0]))); /* Basic Service */ PUT_DWORD(&cai[4],GET_DWORD(&(ss_parms[3].info[0]))); /* Number of Messages */ PUT_WORD(&cai[8],GET_WORD(&(ss_parms[4].info[0]))); /* Message Status */ PUT_WORD(&cai[10],GET_WORD(&(ss_parms[5].info[0]))); /* Message Reference */ PUT_WORD(&cai[12],GET_WORD(&(ss_parms[6].info[0]))); /* Invocation Mode */ add_p(rplci,CAI,cai); add_p(rplci,CPN,ss_parms[7].info); /* Receiving User Number */ add_p(rplci,OAD,ss_parms[8].info); /* Controlling User Number */ add_p(rplci,OSA,ss_parms[9].info); /* Controlling User Provided Number */ add_p(rplci,UID,ss_parms[10].info); /* Time */ sig_req(rplci,S_SERVICE,0); send_req(rplci); return false; case S_MWI_DEACTIVATE: if(api_parse(&parms->info[1],(word)parms->length,"wbwwss",ss_parms)) { dbug(1,dprintf("format wrong")); Info = _WRONG_MESSAGE_FORMAT; break; } if(!plci) { if((i=get_plci(a))) { rplci = &a->plci[i-1]; rplci->appl = appl; rplci->cr_enquiry=true; add_p(rplci,CAI,"\x01\x80"); add_p(rplci,UID,"\x06\x43\x61\x70\x69\x32\x30"); sig_req(rplci,ASSIGN,DSIG_ID); send_req(rplci); } else { Info = _OUT_OF_PLCI; break; } } else { rplci = plci; rplci->cr_enquiry=false; } rplci->command = 0; rplci->internal_command = MWI_DEACTIVATE_REQ_PEND; rplci->appl = appl; rplci->number = Number; cai[0] = 5; cai[1] = DEACTIVATION_MWI; /* Function */ PUT_WORD(&cai[2],GET_WORD(&(ss_parms[2].info[0]))); /* Basic Service */ PUT_WORD(&cai[4],GET_WORD(&(ss_parms[3].info[0]))); /* Invocation Mode */ add_p(rplci,CAI,cai); add_p(rplci,CPN,ss_parms[4].info); /* Receiving User Number */ add_p(rplci,OAD,ss_parms[5].info); /* Controlling User Number */ sig_req(rplci,S_SERVICE,0); send_req(rplci); return false; default: Info = 0x300E; /* not supported */ break; } break; /* case SELECTOR_SU_SERV: end */ case SELECTOR_DTMF: return (dtmf_request (Id, Number, a, plci, appl, msg)); case SELECTOR_LINE_INTERCONNECT: return (mixer_request (Id, Number, a, plci, appl, msg)); case PRIV_SELECTOR_ECHO_CANCELLER: appl->appl_flags |= APPL_FLAG_PRIV_EC_SPEC; return (ec_request (Id, Number, a, plci, appl, msg)); case SELECTOR_ECHO_CANCELLER: appl->appl_flags &= ~APPL_FLAG_PRIV_EC_SPEC; return (ec_request (Id, Number, a, plci, appl, msg)); case SELECTOR_V42BIS: default: Info = _FACILITY_NOT_SUPPORTED; break; } /* end of switch(selector) */ } dbug(1,dprintf("SendFacRc")); sendf(appl, _FACILITY_R|CONFIRM, Id, Number, "wws",Info,selector,SSparms); return false; } static byte facility_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg) { dbug(1,dprintf("facility_res")); return false; } static byte connect_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { word Info = 0; byte req; byte len; word w; word fax_control_bits, fax_feature_bits, fax_info_change; API_PARSE * ncpi; byte pvc[2]; API_PARSE fax_parms[9]; word i; dbug(1,dprintf("connect_b3_req")); if(plci) { if ((plci->State == IDLE) || (plci->State == OUTG_DIS_PENDING) || (plci->State == INC_DIS_PENDING) || (plci->SuppState != IDLE)) { Info = _WRONG_STATE; } else { /* local reply if assign unsuccessfull or B3 protocol allows only one layer 3 connection and already connected or B2 protocol not any LAPD and connect_b3_req contradicts originate/answer direction */ if (!plci->NL.Id || (((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE)) && ((plci->channels != 0) || (((plci->B2_prot != B2_SDLC) && (plci->B2_prot != B2_LAPD) && (plci->B2_prot != B2_LAPD_FREE_SAPI_SEL)) && ((plci->call_dir & CALL_DIR_ANSWER) && !(plci->call_dir & CALL_DIR_FORCE_OUTG_NL)))))) { dbug(1,dprintf("B3 already connected=%d or no NL.Id=0x%x, dir=%d sstate=0x%x", plci->channels,plci->NL.Id,plci->call_dir,plci->SuppState)); Info = _WRONG_STATE; sendf(appl, _CONNECT_B3_R|CONFIRM, Id, Number, "w",Info); return false; } plci->requested_options_conn = 0; req = N_CONNECT; ncpi = &parms[0]; if(plci->B3_prot==2 || plci->B3_prot==3) { if(ncpi->length>2) { /* check for PVC */ if(ncpi->info[2] || ncpi->info[3]) { pvc[0] = ncpi->info[3]; pvc[1] = ncpi->info[2]; add_d(plci,2,pvc); req = N_RESET; } else { if(ncpi->info[1] &1) req = N_CONNECT | N_D_BIT; add_d(plci,(word)(ncpi->length-3),&ncpi->info[4]); } } } else if(plci->B3_prot==5) { if (plci->NL.Id && !plci->nl_remove_id) { fax_control_bits = GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low); fax_feature_bits = GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->feature_bits_low); if (!(fax_control_bits & T30_CONTROL_BIT_MORE_DOCUMENTS) || (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS)) { len = offsetof(T30_INFO, universal_6); fax_info_change = false; if (ncpi->length >= 4) { w = GET_WORD(&ncpi->info[3]); if ((w & 0x0001) != ((word)(((T30_INFO *)(plci->fax_connect_info_buffer))->resolution & 0x0001))) { ((T30_INFO *)(plci->fax_connect_info_buffer))->resolution = (byte)((((T30_INFO *)(plci->fax_connect_info_buffer))->resolution & ~T30_RESOLUTION_R8_0770_OR_200) | ((w & 0x0001) ? T30_RESOLUTION_R8_0770_OR_200 : 0)); fax_info_change = true; } fax_control_bits &= ~(T30_CONTROL_BIT_REQUEST_POLLING | T30_CONTROL_BIT_MORE_DOCUMENTS); if (w & 0x0002) /* Fax-polling request */ fax_control_bits |= T30_CONTROL_BIT_REQUEST_POLLING; if ((w & 0x0004) /* Request to send / poll another document */ && (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_MORE_DOCUMENTS)) { fax_control_bits |= T30_CONTROL_BIT_MORE_DOCUMENTS; } if (ncpi->length >= 6) { w = GET_WORD(&ncpi->info[5]); if (((byte) w) != ((T30_INFO *)(plci->fax_connect_info_buffer))->data_format) { ((T30_INFO *)(plci->fax_connect_info_buffer))->data_format = (byte) w; fax_info_change = true; } if ((a->man_profile.private_options & (1L << PRIVATE_FAX_SUB_SEP_PWD)) && (GET_WORD(&ncpi->info[5]) & 0x8000)) /* Private SEP/SUB/PWD enable */ { plci->requested_options_conn |= (1L << PRIVATE_FAX_SUB_SEP_PWD); } if ((a->man_profile.private_options & (1L << PRIVATE_FAX_NONSTANDARD)) && (GET_WORD(&ncpi->info[5]) & 0x4000)) /* Private non-standard facilities enable */ { plci->requested_options_conn |= (1L << PRIVATE_FAX_NONSTANDARD); } fax_control_bits &= ~(T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_SEL_POLLING | T30_CONTROL_BIT_ACCEPT_PASSWORD); if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id-1]) & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD))) { if (api_parse (&ncpi->info[1], ncpi->length, "wwwwsss", fax_parms)) Info = _WRONG_MESSAGE_FORMAT; else { if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id-1]) & (1L << PRIVATE_FAX_SUB_SEP_PWD)) { fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_PASSWORD; if (fax_control_bits & T30_CONTROL_BIT_ACCEPT_POLLING) fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING; } w = fax_parms[4].length; if (w > 20) w = 20; ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = (byte) w; for (i = 0; i < w; i++) ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id[i] = fax_parms[4].info[1+i]; ((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0; len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH; w = fax_parms[5].length; if (w > 20) w = 20; plci->fax_connect_info_buffer[len++] = (byte) w; for (i = 0; i < w; i++) plci->fax_connect_info_buffer[len++] = fax_parms[5].info[1+i]; w = fax_parms[6].length; if (w > 20) w = 20; plci->fax_connect_info_buffer[len++] = (byte) w; for (i = 0; i < w; i++) plci->fax_connect_info_buffer[len++] = fax_parms[6].info[1+i]; if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id-1]) & (1L << PRIVATE_FAX_NONSTANDARD)) { if (api_parse (&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms)) { dbug(1,dprintf("non-standard facilities info missing or wrong format")); plci->fax_connect_info_buffer[len++] = 0; } else { if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2)) plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]); plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length); for (i = 0; i < fax_parms[7].length; i++) plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1+i]; } } } } else { len = offsetof(T30_INFO, universal_6); } fax_info_change = true; } if (fax_control_bits != GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low)) { PUT_WORD (&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low, fax_control_bits); fax_info_change = true; } } if (Info == GOOD) { plci->fax_connect_info_length = len; if (fax_info_change) { if (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS) { start_internal_command (Id, plci, fax_connect_info_command); return false; } else { start_internal_command (Id, plci, fax_adjust_b23_command); return false; } } } } else Info = _WRONG_STATE; } else Info = _WRONG_STATE; } else if (plci->B3_prot == B3_RTP) { plci->internal_req_buffer[0] = ncpi->length + 1; plci->internal_req_buffer[1] = UDATA_REQUEST_RTP_RECONFIGURE; for (w = 0; w < ncpi->length; w++) plci->internal_req_buffer[2+w] = ncpi->info[1+w]; start_internal_command (Id, plci, rtp_connect_b3_req_command); return false; } if(!Info) { nl_req_ncci(plci,req,0); return 1; } } } else Info = _WRONG_IDENTIFIER; sendf(appl, _CONNECT_B3_R|CONFIRM, Id, Number, "w",Info); return false; } static byte connect_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { word ncci; API_PARSE * ncpi; byte req; word w; API_PARSE fax_parms[9]; word i; byte len; dbug(1,dprintf("connect_b3_res")); ncci = (word)(Id>>16); if(plci && ncci) { if(a->ncci_state[ncci]==INC_CON_PENDING) { if (GET_WORD (&parms[0].info[0]) != 0) { a->ncci_state[ncci] = OUTG_REJ_PENDING; channel_request_xon (plci, a->ncci_ch[ncci]); channel_xmit_xon (plci); cleanup_ncci_data (plci, ncci); nl_req_ncci(plci,N_DISC,(byte)ncci); return 1; } a->ncci_state[ncci] = INC_ACT_PENDING; req = N_CONNECT_ACK; ncpi = &parms[1]; if ((plci->B3_prot == 4) || (plci->B3_prot == 5) || (plci->B3_prot == 7)) { if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id-1]) & (1L << PRIVATE_FAX_NONSTANDARD)) { if (((plci->B3_prot == 4) || (plci->B3_prot == 5)) && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF) && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP)) { len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH; if (plci->fax_connect_info_length < len) { ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0; ((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0; } if (api_parse (&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms)) { dbug(1,dprintf("non-standard facilities info missing or wrong format")); } else { if (plci->fax_connect_info_length <= len) plci->fax_connect_info_buffer[len] = 0; len += 1 + plci->fax_connect_info_buffer[len]; if (plci->fax_connect_info_length <= len) plci->fax_connect_info_buffer[len] = 0; len += 1 + plci->fax_connect_info_buffer[len]; if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2)) plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]); plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length); for (i = 0; i < fax_parms[7].length; i++) plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1+i]; } plci->fax_connect_info_length = len; ((T30_INFO *)(plci->fax_connect_info_buffer))->code = 0; start_internal_command (Id, plci, fax_connect_ack_command); return false; } } nl_req_ncci(plci,req,(byte)ncci); if ((plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT) && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT)) { if (plci->B3_prot == 4) sendf(appl,_CONNECT_B3_ACTIVE_I,Id,0,"s",""); else sendf(appl,_CONNECT_B3_ACTIVE_I,Id,0,"S",plci->ncpi_buffer); plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT; } } else if (plci->B3_prot == B3_RTP) { plci->internal_req_buffer[0] = ncpi->length + 1; plci->internal_req_buffer[1] = UDATA_REQUEST_RTP_RECONFIGURE; for (w = 0; w < ncpi->length; w++) plci->internal_req_buffer[2+w] = ncpi->info[1+w]; start_internal_command (Id, plci, rtp_connect_b3_res_command); return false; } else { if(ncpi->length>2) { if(ncpi->info[1] &1) req = N_CONNECT_ACK | N_D_BIT; add_d(plci,(word)(ncpi->length-3),&ncpi->info[4]); } nl_req_ncci(plci,req,(byte)ncci); sendf(appl,_CONNECT_B3_ACTIVE_I,Id,0,"s",""); if (plci->adjust_b_restore) { plci->adjust_b_restore = false; start_internal_command (Id, plci, adjust_b_restore); } } return 1; } } return false; } static byte connect_b3_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { word ncci; ncci = (word)(Id>>16); dbug(1,dprintf("connect_b3_a_res(ncci=0x%x)",ncci)); if (plci && ncci && (plci->State != IDLE) && (plci->State != INC_DIS_PENDING) && (plci->State != OUTG_DIS_PENDING)) { if(a->ncci_state[ncci]==INC_ACT_PENDING) { a->ncci_state[ncci] = CONNECTED; if(plci->State!=INC_CON_CONNECTED_ALERT) plci->State = CONNECTED; channel_request_xon (plci, a->ncci_ch[ncci]); channel_xmit_xon (plci); } } return false; } static byte disconnect_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { word Info; word ncci; API_PARSE * ncpi; dbug(1,dprintf("disconnect_b3_req")); Info = _WRONG_IDENTIFIER; ncci = (word)(Id>>16); if (plci && ncci) { Info = _WRONG_STATE; if ((a->ncci_state[ncci] == CONNECTED) || (a->ncci_state[ncci] == OUTG_CON_PENDING) || (a->ncci_state[ncci] == INC_CON_PENDING) || (a->ncci_state[ncci] == INC_ACT_PENDING)) { a->ncci_state[ncci] = OUTG_DIS_PENDING; channel_request_xon (plci, a->ncci_ch[ncci]); channel_xmit_xon (plci); if (a->ncci[ncci].data_pending && ((plci->B3_prot == B3_TRANSPARENT) || (plci->B3_prot == B3_T30) || (plci->B3_prot == B3_T30_WITH_EXTENSIONS))) { plci->send_disc = (byte)ncci; plci->command = 0; return false; } else { cleanup_ncci_data (plci, ncci); if(plci->B3_prot==2 || plci->B3_prot==3) { ncpi = &parms[0]; if(ncpi->length>3) { add_d(plci, (word)(ncpi->length - 3) ,(byte *)&(ncpi->info[4])); } } nl_req_ncci(plci,N_DISC,(byte)ncci); } return 1; } } sendf(appl, _DISCONNECT_B3_R|CONFIRM, Id, Number, "w",Info); return false; } static byte disconnect_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { word ncci; word i; ncci = (word)(Id>>16); dbug(1,dprintf("disconnect_b3_res(ncci=0x%x",ncci)); if(plci && ncci) { plci->requested_options_conn = 0; plci->fax_connect_info_length = 0; plci->ncpi_state = 0x00; if (((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE)) && ((plci->B2_prot != B2_LAPD) && (plci->B2_prot != B2_LAPD_FREE_SAPI_SEL))) { plci->call_dir |= CALL_DIR_FORCE_OUTG_NL; } for(i=0; i<MAX_CHANNELS_PER_PLCI && plci->inc_dis_ncci_table[i]!=(byte)ncci; i++); if(i<MAX_CHANNELS_PER_PLCI) { if(plci->channels)plci->channels--; for(; i<MAX_CHANNELS_PER_PLCI-1; i++) plci->inc_dis_ncci_table[i] = plci->inc_dis_ncci_table[i+1]; plci->inc_dis_ncci_table[MAX_CHANNELS_PER_PLCI-1] = 0; ncci_free_receive_buffers (plci, ncci); if((plci->State==IDLE || plci->State==SUSPENDING) && !plci->channels){ if(plci->State == SUSPENDING){ sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", (word)3, "\x03\x04\x00\x00"); sendf(plci->appl, _DISCONNECT_I, Id & 0xffffL, 0, "w", 0); } plci_remove(plci); plci->State=IDLE; } } else { if ((a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS) && ((plci->B3_prot == 4) || (plci->B3_prot == 5)) && (a->ncci_state[ncci] == INC_DIS_PENDING)) { ncci_free_receive_buffers (plci, ncci); nl_req_ncci(plci,N_EDATA,(byte)ncci); plci->adapter->ncci_state[ncci] = IDLE; start_internal_command (Id, plci, fax_disconnect_command); return 1; } } } return false; } static byte data_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { NCCI *ncci_ptr; DATA_B3_DESC *data; word Info; word ncci; word i; dbug(1,dprintf("data_b3_req")); Info = _WRONG_IDENTIFIER; ncci = (word)(Id>>16); dbug(1,dprintf("ncci=0x%x, plci=0x%x",ncci,plci)); if (plci && ncci) { Info = _WRONG_STATE; if ((a->ncci_state[ncci] == CONNECTED) || (a->ncci_state[ncci] == INC_ACT_PENDING)) { /* queue data */ ncci_ptr = &(a->ncci[ncci]); i = ncci_ptr->data_out + ncci_ptr->data_pending; if (i >= MAX_DATA_B3) i -= MAX_DATA_B3; data = &(ncci_ptr->DBuffer[i]); data->Number = Number; if ((((byte *)(parms[0].info)) >= ((byte *)(plci->msg_in_queue))) && (((byte *)(parms[0].info)) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue))) { data->P = (byte *)(long)(*((dword *)(parms[0].info))); } else data->P = TransmitBufferSet(appl,*(dword *)parms[0].info); data->Length = GET_WORD(parms[1].info); data->Handle = GET_WORD(parms[2].info); data->Flags = GET_WORD(parms[3].info); (ncci_ptr->data_pending)++; /* check for delivery confirmation */ if (data->Flags & 0x0004) { i = ncci_ptr->data_ack_out + ncci_ptr->data_ack_pending; if (i >= MAX_DATA_ACK) i -= MAX_DATA_ACK; ncci_ptr->DataAck[i].Number = data->Number; ncci_ptr->DataAck[i].Handle = data->Handle; (ncci_ptr->data_ack_pending)++; } send_data(plci); return false; } } if (appl) { if (plci) { if ((((byte *)(parms[0].info)) >= ((byte *)(plci->msg_in_queue))) && (((byte *)(parms[0].info)) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue))) { TransmitBufferFree (appl, (byte *)(long)(*((dword *)(parms[0].info)))); } } sendf(appl, _DATA_B3_R|CONFIRM, Id, Number, "ww",GET_WORD(parms[2].info),Info); } return false; } static byte data_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { word n; word ncci; word NCCIcode; dbug(1,dprintf("data_b3_res")); ncci = (word)(Id>>16); if(plci && ncci) { n = GET_WORD(parms[0].info); dbug(1,dprintf("free(%d)",n)); NCCIcode = ncci | (((word) a->Id) << 8); if(n<appl->MaxBuffer && appl->DataNCCI[n]==NCCIcode && (byte)(appl->DataFlags[n]>>8)==plci->Id) { dbug(1,dprintf("found")); appl->DataNCCI[n] = 0; if (channel_can_xon (plci, a->ncci_ch[ncci])) { channel_request_xon (plci, a->ncci_ch[ncci]); } channel_xmit_xon (plci); if(appl->DataFlags[n] &4) { nl_req_ncci(plci,N_DATA_ACK,(byte)ncci); return 1; } } } return false; } static byte reset_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { word Info; word ncci; dbug(1,dprintf("reset_b3_req")); Info = _WRONG_IDENTIFIER; ncci = (word)(Id>>16); if(plci && ncci) { Info = _WRONG_STATE; switch (plci->B3_prot) { case B3_ISO8208: case B3_X25_DCE: if(a->ncci_state[ncci]==CONNECTED) { nl_req_ncci(plci,N_RESET,(byte)ncci); send_req(plci); Info = GOOD; } break; case B3_TRANSPARENT: if(a->ncci_state[ncci]==CONNECTED) { start_internal_command (Id, plci, reset_b3_command); Info = GOOD; } break; } } /* reset_b3 must result in a reset_b3_con & reset_b3_Ind */ sendf(appl, _RESET_B3_R|CONFIRM, Id, Number, "w",Info); return false; } static byte reset_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { word ncci; dbug(1,dprintf("reset_b3_res")); ncci = (word)(Id>>16); if(plci && ncci) { switch (plci->B3_prot) { case B3_ISO8208: case B3_X25_DCE: if(a->ncci_state[ncci]==INC_RES_PENDING) { a->ncci_state[ncci] = CONNECTED; nl_req_ncci(plci,N_RESET_ACK,(byte)ncci); return true; } break; } } return false; } static byte connect_b3_t90_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { word ncci; API_PARSE * ncpi; byte req; dbug(1,dprintf("connect_b3_t90_a_res")); ncci = (word)(Id>>16); if(plci && ncci) { if(a->ncci_state[ncci]==INC_ACT_PENDING) { a->ncci_state[ncci] = CONNECTED; } else if(a->ncci_state[ncci]==INC_CON_PENDING) { a->ncci_state[ncci] = CONNECTED; req = N_CONNECT_ACK; /* parms[0]==0 for CAPI original message definition! */ if(parms[0].info) { ncpi = &parms[1]; if(ncpi->length>2) { if(ncpi->info[1] &1) req = N_CONNECT_ACK | N_D_BIT; add_d(plci,(word)(ncpi->length-3),&ncpi->info[4]); } } nl_req_ncci(plci,req,(byte)ncci); return 1; } } return false; } static byte select_b_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg) { word Info=0; word i; byte tel; API_PARSE bp_parms[7]; if(!plci || !msg) { Info = _WRONG_IDENTIFIER; } else { dbug(1,dprintf("select_b_req[%d],PLCI=0x%x,Tel=0x%x,NL=0x%x,appl=0x%x,sstate=0x%x", msg->length,plci->Id,plci->tel,plci->NL.Id,plci->appl,plci->SuppState)); dbug(1,dprintf("PlciState=0x%x",plci->State)); for(i=0;i<7;i++) bp_parms[i].length = 0; /* check if no channel is open, no B3 connected only */ if((plci->State == IDLE) || (plci->State == OUTG_DIS_PENDING) || (plci->State == INC_DIS_PENDING) || (plci->SuppState != IDLE) || plci->channels || plci->nl_remove_id) { Info = _WRONG_STATE; } /* check message format and fill bp_parms pointer */ else if(msg->length && api_parse(&msg->info[1], (word)msg->length, "wwwsss", bp_parms)) { Info = _WRONG_MESSAGE_FORMAT; } else { if((plci->State==INC_CON_PENDING) || (plci->State==INC_CON_ALERT)) /* send alert tone inband to the network, */ { /* e.g. Qsig or RBS or Cornet-N or xess PRI */ if(Id & EXT_CONTROLLER) { sendf(appl, _SELECT_B_REQ|CONFIRM, Id, Number, "w", 0x2002); /* wrong controller */ return 0; } plci->State=INC_CON_CONNECTED_ALERT; plci->appl = appl; clear_c_ind_mask_bit (plci, (word)(appl->Id-1)); dump_c_ind_mask (plci); for(i=0; i<max_appl; i++) /* disconnect the other appls */ { /* its quasi a connect */ if(test_c_ind_mask_bit (plci, i)) sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); } } api_save_msg(msg, "s", &plci->saved_msg); tel = plci->tel; if(Id & EXT_CONTROLLER) { if(tel) /* external controller in use by this PLCI */ { if(a->AdvSignalAppl && a->AdvSignalAppl!=appl) { dbug(1,dprintf("Ext_Ctrl in use 1")); Info = _WRONG_STATE; } } else /* external controller NOT in use by this PLCI ? */ { if(a->AdvSignalPLCI) { dbug(1,dprintf("Ext_Ctrl in use 2")); Info = _WRONG_STATE; } else /* activate the codec */ { dbug(1,dprintf("Ext_Ctrl start")); if(AdvCodecSupport(a, plci, appl, 0) ) { dbug(1,dprintf("Error in codec procedures")); Info = _WRONG_STATE; } else if(plci->spoofed_msg==SPOOFING_REQUIRED) /* wait until codec is active */ { plci->spoofed_msg = AWAITING_SELECT_B; plci->internal_command = BLOCK_PLCI; /* lock other commands */ plci->command = 0; dbug(1,dprintf("continue if codec loaded")); return false; } } } } else /* external controller bit is OFF */ { if(tel) /* external controller in use, need to switch off */ { if(a->AdvSignalAppl==appl) { CodecIdCheck(a, plci); plci->tel = 0; plci->adv_nl = 0; dbug(1,dprintf("Ext_Ctrl disable")); } else { dbug(1,dprintf("Ext_Ctrl not requested")); } } } if (!Info) { if (plci->call_dir & CALL_DIR_OUT) plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE; else if (plci->call_dir & CALL_DIR_IN) plci->call_dir = CALL_DIR_IN | CALL_DIR_ANSWER; start_internal_command (Id, plci, select_b_command); return false; } } } sendf(appl, _SELECT_B_REQ|CONFIRM, Id, Number, "w", Info); return false; } static byte manufacturer_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *parms) { word command; word i; word ncci; API_PARSE * m; API_PARSE m_parms[5]; word codec; byte req; byte ch; byte dir; static byte chi[2] = {0x01,0x00}; static byte lli[2] = {0x01,0x00}; static byte codec_cai[2] = {0x01,0x01}; static byte null_msg = {0}; static API_PARSE null_parms = { 0, &null_msg }; PLCI * v_plci; word Info=0; dbug(1,dprintf("manufacturer_req")); for(i=0;i<5;i++) m_parms[i].length = 0; if(GET_DWORD(parms[0].info)!=_DI_MANU_ID) { Info = _WRONG_MESSAGE_FORMAT; } command = GET_WORD(parms[1].info); m = &parms[2]; if (!Info) { switch(command) { case _DI_ASSIGN_PLCI: if(api_parse(&m->info[1],(word)m->length,"wbbs",m_parms)) { Info = _WRONG_MESSAGE_FORMAT; break; } codec = GET_WORD(m_parms[0].info); ch = m_parms[1].info[0]; dir = m_parms[2].info[0]; if((i=get_plci(a))) { plci = &a->plci[i-1]; plci->appl = appl; plci->command = _MANUFACTURER_R; plci->m_command = command; plci->number = Number; plci->State = LOCAL_CONNECT; Id = ( ((word)plci->Id<<8)|plci->adapter->Id|0x80); dbug(1,dprintf("ManCMD,plci=0x%x",Id)); if((ch==1 || ch==2) && (dir<=2)) { chi[1] = (byte)(0x80|ch); lli[1] = 0; plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE; switch(codec) { case 0: Info = add_b1(plci,&m_parms[3],0,0); break; case 1: add_p(plci,CAI,codec_cai); break; /* manual 'swich on' to the codec support without signalling */ /* first 'assign plci' with this function, then use */ case 2: if(AdvCodecSupport(a, plci, appl, 0) ) { Info = _RESOURCE_ERROR; } else { Info = add_b1(plci,&null_parms,0,B1_FACILITY_LOCAL); lli[1] = 0x10; /* local call codec stream */ } break; } plci->State = LOCAL_CONNECT; plci->manufacturer = true; plci->command = _MANUFACTURER_R; plci->m_command = command; plci->number = Number; if(!Info) { add_p(plci,LLI,lli); add_p(plci,CHI,chi); add_p(plci,UID,"\x06\x43\x61\x70\x69\x32\x30"); sig_req(plci,ASSIGN,DSIG_ID); if(!codec) { Info = add_b23(plci,&m_parms[3]); if(!Info) { nl_req_ncci(plci,ASSIGN,0); send_req(plci); } } if(!Info) { dbug(1,dprintf("dir=0x%x,spoof=0x%x",dir,plci->spoofed_msg)); if (plci->spoofed_msg==SPOOFING_REQUIRED) { api_save_msg (m_parms, "wbbs", &plci->saved_msg); plci->spoofed_msg = AWAITING_MANUF_CON; plci->internal_command = BLOCK_PLCI; /* reject other req meanwhile */ plci->command = 0; send_req(plci); return false; } if(dir==1) { sig_req(plci,CALL_REQ,0); } else if(!dir){ sig_req(plci,LISTEN_REQ,0); } send_req(plci); } else { sendf(appl, _MANUFACTURER_R|CONFIRM, Id, Number, "dww",_DI_MANU_ID,command,Info); return 2; } } } } else Info = _OUT_OF_PLCI; break; case _DI_IDI_CTRL: if(!plci) { Info = _WRONG_IDENTIFIER; break; } if(api_parse(&m->info[1],(word)m->length,"bs",m_parms)) { Info = _WRONG_MESSAGE_FORMAT; break; } req = m_parms[0].info[0]; plci->command = _MANUFACTURER_R; plci->m_command = command; plci->number = Number; if(req==CALL_REQ) { plci->b_channel = getChannel(&m_parms[1]); mixer_set_bchannel_id_esc (plci, plci->b_channel); if(plci->spoofed_msg==SPOOFING_REQUIRED) { plci->spoofed_msg = CALL_REQ | AWAITING_MANUF_CON; plci->internal_command = BLOCK_PLCI; /* reject other req meanwhile */ plci->command = 0; break; } } else if(req==LAW_REQ) { plci->cr_enquiry = true; } add_ss(plci,FTY,&m_parms[1]); sig_req(plci,req,0); send_req(plci); if(req==HANGUP) { if (plci->NL.Id && !plci->nl_remove_id) { if (plci->channels) { for (ncci = 1; ncci < MAX_NCCI+1; ncci++) { if ((a->ncci_plci[ncci] == plci->Id) && (a->ncci_state[ncci] == CONNECTED)) { a->ncci_state[ncci] = OUTG_DIS_PENDING; cleanup_ncci_data (plci, ncci); nl_req_ncci(plci,N_DISC,(byte)ncci); } } } mixer_remove (plci); nl_req_ncci(plci,REMOVE,0); send_req(plci); } } break; case _DI_SIG_CTRL: /* signalling control for loop activation B-channel */ if(!plci) { Info = _WRONG_IDENTIFIER; break; } if(m->length){ plci->command = _MANUFACTURER_R; plci->number = Number; add_ss(plci,FTY,m); sig_req(plci,SIG_CTRL,0); send_req(plci); } else Info = _WRONG_MESSAGE_FORMAT; break; case _DI_RXT_CTRL: /* activation control for receiver/transmitter B-channel */ if(!plci) { Info = _WRONG_IDENTIFIER; break; } if(m->length){ plci->command = _MANUFACTURER_R; plci->number = Number; add_ss(plci,FTY,m); sig_req(plci,DSP_CTRL,0); send_req(plci); } else Info = _WRONG_MESSAGE_FORMAT; break; case _DI_ADV_CODEC: case _DI_DSP_CTRL: /* TEL_CTRL commands to support non standard adjustments: */ /* Ring on/off, Handset micro volume, external micro vol. */ /* handset+external speaker volume, receiver+transm. gain,*/ /* handsfree on (hookinfo off), set mixer command */ if(command == _DI_ADV_CODEC) { if(!a->AdvCodecPLCI) { Info = _WRONG_STATE; break; } v_plci = a->AdvCodecPLCI; } else { if (plci && (m->length >= 3) && (m->info[1] == 0x1c) && (m->info[2] >= 1)) { if (m->info[3] == DSP_CTRL_OLD_SET_MIXER_COEFFICIENTS) { if ((plci->tel != ADV_VOICE) || (plci != a->AdvSignalPLCI)) { Info = _WRONG_STATE; break; } a->adv_voice_coef_length = m->info[2] - 1; if (a->adv_voice_coef_length > m->length - 3) a->adv_voice_coef_length = (byte)(m->length - 3); if (a->adv_voice_coef_length > ADV_VOICE_COEF_BUFFER_SIZE) a->adv_voice_coef_length = ADV_VOICE_COEF_BUFFER_SIZE; for (i = 0; i < a->adv_voice_coef_length; i++) a->adv_voice_coef_buffer[i] = m->info[4 + i]; if (plci->B1_facilities & B1_FACILITY_VOICE) adv_voice_write_coefs (plci, ADV_VOICE_WRITE_UPDATE); break; } else if (m->info[3] == DSP_CTRL_SET_DTMF_PARAMETERS) { if (!(a->manufacturer_features & MANUFACTURER_FEATURE_DTMF_PARAMETERS)) { Info = _FACILITY_NOT_SUPPORTED; break; } plci->dtmf_parameter_length = m->info[2] - 1; if (plci->dtmf_parameter_length > m->length - 3) plci->dtmf_parameter_length = (byte)(m->length - 3); if (plci->dtmf_parameter_length > DTMF_PARAMETER_BUFFER_SIZE) plci->dtmf_parameter_length = DTMF_PARAMETER_BUFFER_SIZE; for (i = 0; i < plci->dtmf_parameter_length; i++) plci->dtmf_parameter_buffer[i] = m->info[4+i]; if (plci->B1_facilities & B1_FACILITY_DTMFR) dtmf_parameter_write (plci); break; } } v_plci = plci; } if(!v_plci) { Info = _WRONG_IDENTIFIER; break; } if(m->length){ add_ss(v_plci,FTY,m); sig_req(v_plci,TEL_CTRL,0); send_req(v_plci); } else Info = _WRONG_MESSAGE_FORMAT; break; case _DI_OPTIONS_REQUEST: if(api_parse(&m->info[1],(word)m->length,"d",m_parms)) { Info = _WRONG_MESSAGE_FORMAT; break; } if (GET_DWORD (m_parms[0].info) & ~a->man_profile.private_options) { Info = _FACILITY_NOT_SUPPORTED; break; } a->requested_options_table[appl->Id-1] = GET_DWORD (m_parms[0].info); break; default: Info = _WRONG_MESSAGE_FORMAT; break; } } sendf(appl, _MANUFACTURER_R|CONFIRM, Id, Number, "dww",_DI_MANU_ID,command,Info); return false; } static byte manufacturer_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg) { word indication; API_PARSE m_parms[3]; API_PARSE *ncpi; API_PARSE fax_parms[9]; word i; byte len; dbug(1,dprintf("manufacturer_res")); if ((msg[0].length == 0) || (msg[1].length == 0) || (GET_DWORD(msg[0].info)!=_DI_MANU_ID)) { return false; } indication = GET_WORD(msg[1].info); switch (indication) { case _DI_NEGOTIATE_B3: if(!plci) break; if (((plci->B3_prot != 4) && (plci->B3_prot != 5)) || !(plci->ncpi_state & NCPI_NEGOTIATE_B3_SENT)) { dbug(1,dprintf("wrong state for NEGOTIATE_B3 parameters")); break; } if (api_parse (&msg[2].info[1], msg[2].length, "ws", m_parms)) { dbug(1,dprintf("wrong format in NEGOTIATE_B3 parameters")); break; } ncpi = &m_parms[1]; len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH; if (plci->fax_connect_info_length < len) { ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0; ((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0; } if (api_parse (&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms)) { dbug(1,dprintf("non-standard facilities info missing or wrong format")); } else { if (plci->fax_connect_info_length <= len) plci->fax_connect_info_buffer[len] = 0; len += 1 + plci->fax_connect_info_buffer[len]; if (plci->fax_connect_info_length <= len) plci->fax_connect_info_buffer[len] = 0; len += 1 + plci->fax_connect_info_buffer[len]; if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2)) plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]); plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length); for (i = 0; i < fax_parms[7].length; i++) plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1+i]; } plci->fax_connect_info_length = len; plci->fax_edata_ack_length = plci->fax_connect_info_length; start_internal_command (Id, plci, fax_edata_ack_command); break; } return false; } /*------------------------------------------------------------------*/ /* IDI callback function */ /*------------------------------------------------------------------*/ void callback(ENTITY * e) { DIVA_CAPI_ADAPTER * a; APPL * appl; PLCI * plci; CAPI_MSG *m; word i, j; byte rc; byte ch; byte req; byte global_req; int no_cancel_rc; dbug(1,dprintf("%x:CB(%x:Req=%x,Rc=%x,Ind=%x)", (e->user[0]+1)&0x7fff,e->Id,e->Req,e->Rc,e->Ind)); a = &(adapter[(byte)e->user[0]]); plci = &(a->plci[e->user[1]]); no_cancel_rc = DIVA_CAPI_SUPPORTS_NO_CANCEL(a); /* If new protocol code and new XDI is used then CAPI should work fully in accordance with IDI cpec an look on callback field instead of Rc field for return codes. */ if (((e->complete == 0xff) && no_cancel_rc) || (e->Rc && !no_cancel_rc)) { rc = e->Rc; ch = e->RcCh; req = e->Req; e->Rc = 0; if (e->user[0] & 0x8000) { /* If REMOVE request was sent then we have to wait until return code with Id set to zero arrives. All other return codes should be ignored. */ if (req == REMOVE) { if (e->Id) { dbug(1,dprintf("cancel RC in REMOVE state")); return; } channel_flow_control_remove (plci); for (i = 0; i < 256; i++) { if (a->FlowControlIdTable[i] == plci->nl_remove_id) a->FlowControlIdTable[i] = 0; } plci->nl_remove_id = 0; if (plci->rx_dma_descriptor > 0) { diva_free_dma_descriptor (plci, plci->rx_dma_descriptor - 1); plci->rx_dma_descriptor = 0; } } if (rc == OK_FC) { a->FlowControlIdTable[ch] = e->Id; a->FlowControlSkipTable[ch] = 0; a->ch_flow_control[ch] |= N_OK_FC_PENDING; a->ch_flow_plci[ch] = plci->Id; plci->nl_req = 0; } else { /* Cancel return codes self, if feature was requested */ if (no_cancel_rc && (a->FlowControlIdTable[ch] == e->Id) && e->Id) { a->FlowControlIdTable[ch] = 0; if ((rc == OK) && a->FlowControlSkipTable[ch]) { dbug(3,dprintf ("XDI CAPI: RC cancelled Id:0x02, Ch:%02x", e->Id, ch)); return; } } if (a->ch_flow_control[ch] & N_OK_FC_PENDING) { a->ch_flow_control[ch] &= ~N_OK_FC_PENDING; if (ch == e->ReqCh) plci->nl_req = 0; } else plci->nl_req = 0; } if (plci->nl_req) control_rc (plci, 0, rc, ch, 0, true); else { if (req == N_XON) { channel_x_on (plci, ch); if (plci->internal_command) control_rc (plci, req, rc, ch, 0, true); } else { if (plci->nl_global_req) { global_req = plci->nl_global_req; plci->nl_global_req = 0; if (rc != ASSIGN_OK) { e->Id = 0; if (plci->rx_dma_descriptor > 0) { diva_free_dma_descriptor (plci, plci->rx_dma_descriptor - 1); plci->rx_dma_descriptor = 0; } } channel_xmit_xon (plci); control_rc (plci, 0, rc, ch, global_req, true); } else if (plci->data_sent) { channel_xmit_xon (plci); plci->data_sent = false; plci->NL.XNum = 1; data_rc (plci, ch); if (plci->internal_command) control_rc (plci, req, rc, ch, 0, true); } else { channel_xmit_xon (plci); control_rc (plci, req, rc, ch, 0, true); } } } } else { /* If REMOVE request was sent then we have to wait until return code with Id set to zero arrives. All other return codes should be ignored. */ if (req == REMOVE) { if (e->Id) { dbug(1,dprintf("cancel RC in REMOVE state")); return; } plci->sig_remove_id = 0; } plci->sig_req = 0; if (plci->sig_global_req) { global_req = plci->sig_global_req; plci->sig_global_req = 0; if (rc != ASSIGN_OK) e->Id = 0; channel_xmit_xon (plci); control_rc (plci, 0, rc, ch, global_req, false); } else { channel_xmit_xon (plci); control_rc (plci, req, rc, ch, 0, false); } } /* Again: in accordance with IDI spec Rc and Ind can't be delivered in the same callback. Also if new XDI and protocol code used then jump direct to finish. */ if (no_cancel_rc) { channel_xmit_xon(plci); goto capi_callback_suffix; } } channel_xmit_xon(plci); if (e->Ind) { if (e->user[0] &0x8000) { byte Ind = e->Ind & 0x0f; byte Ch = e->IndCh; if (((Ind==N_DISC) || (Ind==N_DISC_ACK)) && (a->ch_flow_plci[Ch] == plci->Id)) { if (a->ch_flow_control[Ch] & N_RX_FLOW_CONTROL_MASK) { dbug(3,dprintf ("XDI CAPI: I: pending N-XON Ch:%02x", Ch)); } a->ch_flow_control[Ch] &= ~N_RX_FLOW_CONTROL_MASK; } nl_ind(plci); if ((e->RNR != 1) && (a->ch_flow_plci[Ch] == plci->Id) && (a->ch_flow_control[Ch] & N_RX_FLOW_CONTROL_MASK)) { a->ch_flow_control[Ch] &= ~N_RX_FLOW_CONTROL_MASK; dbug(3,dprintf ("XDI CAPI: I: remove faked N-XON Ch:%02x", Ch)); } } else { sig_ind(plci); } e->Ind = 0; } capi_callback_suffix: while (!plci->req_in && !plci->internal_command && (plci->msg_in_write_pos != plci->msg_in_read_pos)) { j = (plci->msg_in_read_pos == plci->msg_in_wrap_pos) ? 0 : plci->msg_in_read_pos; i = (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]))->header.length + 3) & 0xfffc; m = (CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]); appl = *((APPL * *)(&((byte *)(plci->msg_in_queue))[j+i])); dbug(1,dprintf("dequeue msg(0x%04x) - write=%d read=%d wrap=%d", m->header.command, plci->msg_in_write_pos, plci->msg_in_read_pos, plci->msg_in_wrap_pos)); if (plci->msg_in_read_pos == plci->msg_in_wrap_pos) { plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE; plci->msg_in_read_pos = i + MSG_IN_OVERHEAD; } else { plci->msg_in_read_pos = j + i + MSG_IN_OVERHEAD; } if (plci->msg_in_read_pos == plci->msg_in_write_pos) { plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE; plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE; } else if (plci->msg_in_read_pos == plci->msg_in_wrap_pos) { plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE; plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE; } i = api_put (appl, m); if (i != 0) { if (m->header.command == _DATA_B3_R) TransmitBufferFree (appl, (byte *)(long)(m->info.data_b3_req.Data)); dbug(1,dprintf("Error 0x%04x from msg(0x%04x)", i, m->header.command)); break; } if (plci->li_notify_update) { plci->li_notify_update = false; mixer_notify_update (plci, false); } } send_data(plci); send_req(plci); } static void control_rc(PLCI *plci, byte req, byte rc, byte ch, byte global_req, byte nl_rc) { dword Id; dword rId; word Number; word Info=0; word i; word ncci; DIVA_CAPI_ADAPTER * a; APPL * appl; PLCI * rplci; byte SSparms[] = "\x05\x00\x00\x02\x00\x00"; byte SSstruct[] = "\x09\x00\x00\x06\x00\x00\x00\x00\x00\x00"; if (!plci) { dbug(0,dprintf("A: control_rc, no plci %02x:%02x:%02x:%02x:%02x", req, rc, ch, global_req, nl_rc)); return; } dbug(1,dprintf("req0_in/out=%d/%d",plci->req_in,plci->req_out)); if(plci->req_in!=plci->req_out) { if (nl_rc || (global_req != ASSIGN) || (rc == ASSIGN_OK)) { dbug(1,dprintf("req_1return")); return; } /* cancel outstanding request on the PLCI after SIG ASSIGN failure */ } plci->req_in = plci->req_in_start = plci->req_out = 0; dbug(1,dprintf("control_rc")); appl = plci->appl; a = plci->adapter; ncci = a->ch_ncci[ch]; if(appl) { Id = (((dword)(ncci ? ncci : ch)) << 16) | ((word)plci->Id << 8) | a->Id; if(plci->tel && plci->SuppState!=CALL_HELD) Id|=EXT_CONTROLLER; Number = plci->number; dbug(1,dprintf("Contr_RC-Id=%08lx,plci=%x,tel=%x, entity=0x%x, command=0x%x, int_command=0x%x",Id,plci->Id,plci->tel,plci->Sig.Id,plci->command,plci->internal_command)); dbug(1,dprintf("channels=0x%x",plci->channels)); if (plci_remove_check(plci)) return; if(req==REMOVE && rc==ASSIGN_OK) { sig_req(plci,HANGUP,0); sig_req(plci,REMOVE,0); send_req(plci); } if(plci->command) { switch(plci->command) { case C_HOLD_REQ: dbug(1,dprintf("HoldRC=0x%x",rc)); SSparms[1] = (byte)S_HOLD; if(rc!=OK) { plci->SuppState = IDLE; Info = 0x2001; } sendf(appl,_FACILITY_R|CONFIRM,Id,Number,"wws",Info,3,SSparms); break; case C_RETRIEVE_REQ: dbug(1,dprintf("RetrieveRC=0x%x",rc)); SSparms[1] = (byte)S_RETRIEVE; if(rc!=OK) { plci->SuppState = CALL_HELD; Info = 0x2001; } sendf(appl,_FACILITY_R|CONFIRM,Id,Number,"wws",Info,3,SSparms); break; case _INFO_R: dbug(1,dprintf("InfoRC=0x%x",rc)); if(rc!=OK) Info=_WRONG_STATE; sendf(appl,_INFO_R|CONFIRM,Id,Number,"w",Info); break; case _CONNECT_R: dbug(1,dprintf("Connect_R=0x%x/0x%x/0x%x/0x%x",req,rc,global_req,nl_rc)); if (plci->State == INC_DIS_PENDING) break; if(plci->Sig.Id!=0xff) { if (((global_req == ASSIGN) && (rc != ASSIGN_OK)) || (!nl_rc && (req == CALL_REQ) && (rc != OK))) { dbug(1,dprintf("No more IDs/Call_Req failed")); sendf(appl,_CONNECT_R|CONFIRM,Id&0xffL,Number,"w",_OUT_OF_PLCI); plci_remove(plci); plci->State = IDLE; break; } if(plci->State!=LOCAL_CONNECT)plci->State = OUTG_CON_PENDING; sendf(appl,_CONNECT_R|CONFIRM,Id,Number,"w",0); } else /* D-ch activation */ { if (rc != ASSIGN_OK) { dbug(1,dprintf("No more IDs/X.25 Call_Req failed")); sendf(appl,_CONNECT_R|CONFIRM,Id&0xffL,Number,"w",_OUT_OF_PLCI); plci_remove(plci); plci->State = IDLE; break; } sendf(appl,_CONNECT_R|CONFIRM,Id,Number,"w",0); sendf(plci->appl,_CONNECT_ACTIVE_I,Id,0,"sss","","",""); plci->State = INC_ACT_PENDING; } break; case _CONNECT_I|RESPONSE: if (plci->State != INC_DIS_PENDING) plci->State = INC_CON_ACCEPT; break; case _DISCONNECT_R: if (plci->State == INC_DIS_PENDING) break; if(plci->Sig.Id!=0xff) { plci->State = OUTG_DIS_PENDING; sendf(appl,_DISCONNECT_R|CONFIRM,Id,Number,"w",0); } break; case SUSPEND_REQ: break; case RESUME_REQ: break; case _CONNECT_B3_R: if(rc!=OK) { sendf(appl,_CONNECT_B3_R|CONFIRM,Id,Number,"w",_WRONG_IDENTIFIER); break; } ncci = get_ncci (plci, ch, 0); Id = (Id & 0xffff) | (((dword) ncci) << 16); plci->channels++; if(req==N_RESET) { a->ncci_state[ncci] = INC_ACT_PENDING; sendf(appl,_CONNECT_B3_R|CONFIRM,Id,Number,"w",0); sendf(appl,_CONNECT_B3_ACTIVE_I,Id,0,"s",""); } else { a->ncci_state[ncci] = OUTG_CON_PENDING; sendf(appl,_CONNECT_B3_R|CONFIRM,Id,Number,"w",0); } break; case _CONNECT_B3_I|RESPONSE: break; case _RESET_B3_R: /* sendf(appl,_RESET_B3_R|CONFIRM,Id,Number,"w",0);*/ break; case _DISCONNECT_B3_R: sendf(appl,_DISCONNECT_B3_R|CONFIRM,Id,Number,"w",0); break; case _MANUFACTURER_R: break; case PERM_LIST_REQ: if(rc!=OK) { Info = _WRONG_IDENTIFIER; sendf(plci->appl,_CONNECT_R|CONFIRM,Id,Number,"w",Info); plci_remove(plci); } else sendf(plci->appl,_CONNECT_R|CONFIRM,Id,Number,"w",Info); break; default: break; } plci->command = 0; } else if (plci->internal_command) { switch(plci->internal_command) { case BLOCK_PLCI: return; case GET_MWI_STATE: if(rc==OK) /* command supported, wait for indication */ { return; } plci_remove(plci); break; /* Get Supported Services */ case GETSERV_REQ_PEND: if(rc==OK) /* command supported, wait for indication */ { break; } PUT_DWORD(&SSstruct[6], MASK_TERMINAL_PORTABILITY); sendf(appl, _FACILITY_R|CONFIRM, Id, Number, "wws",0,3,SSstruct); plci_remove(plci); break; case INTERR_DIVERSION_REQ_PEND: /* Interrogate Parameters */ case INTERR_NUMBERS_REQ_PEND: case CF_START_PEND: /* Call Forwarding Start pending */ case CF_STOP_PEND: /* Call Forwarding Stop pending */ case CCBS_REQUEST_REQ_PEND: case CCBS_DEACTIVATE_REQ_PEND: case CCBS_INTERROGATE_REQ_PEND: switch(plci->internal_command) { case INTERR_DIVERSION_REQ_PEND: SSparms[1] = S_INTERROGATE_DIVERSION; break; case INTERR_NUMBERS_REQ_PEND: SSparms[1] = S_INTERROGATE_NUMBERS; break; case CF_START_PEND: SSparms[1] = S_CALL_FORWARDING_START; break; case CF_STOP_PEND: SSparms[1] = S_CALL_FORWARDING_STOP; break; case CCBS_REQUEST_REQ_PEND: SSparms[1] = S_CCBS_REQUEST; break; case CCBS_DEACTIVATE_REQ_PEND: SSparms[1] = S_CCBS_DEACTIVATE; break; case CCBS_INTERROGATE_REQ_PEND: SSparms[1] = S_CCBS_INTERROGATE; break; } if(global_req==ASSIGN) { dbug(1,dprintf("AssignDiversion_RC=0x%x/0x%x",req,rc)); return; } if(!plci->appl) break; if(rc==ISDN_GUARD_REJ) { Info = _CAPI_GUARD_ERROR; } else if(rc!=OK) { Info = _SUPPLEMENTARY_SERVICE_NOT_SUPPORTED; } sendf(plci->appl,_FACILITY_R|CONFIRM,Id&0x7, plci->number,"wws",Info,(word)3,SSparms); if(Info) plci_remove(plci); break; /* 3pty conference pending */ case PTY_REQ_PEND: if(!plci->relatedPTYPLCI) break; rplci = plci->relatedPTYPLCI; SSparms[1] = plci->ptyState; rId = ((word)rplci->Id<<8)|rplci->adapter->Id; if(rplci->tel) rId|=EXT_CONTROLLER; if(rc!=OK) { Info = 0x300E; /* not supported */ plci->relatedPTYPLCI = NULL; plci->ptyState = 0; } sendf(rplci->appl, _FACILITY_R|CONFIRM, rId, plci->number, "wws",Info,(word)3,SSparms); break; /* Explicit Call Transfer pending */ case ECT_REQ_PEND: dbug(1,dprintf("ECT_RC=0x%x/0x%x",req,rc)); if(!plci->relatedPTYPLCI) break; rplci = plci->relatedPTYPLCI; SSparms[1] = S_ECT; rId = ((word)rplci->Id<<8)|rplci->adapter->Id; if(rplci->tel) rId|=EXT_CONTROLLER; if(rc!=OK) { Info = 0x300E; /* not supported */ plci->relatedPTYPLCI = NULL; plci->ptyState = 0; } sendf(rplci->appl, _FACILITY_R|CONFIRM, rId, plci->number, "wws",Info,(word)3,SSparms); break; case _MANUFACTURER_R: dbug(1,dprintf("_Manufacturer_R=0x%x/0x%x",req,rc)); if ((global_req == ASSIGN) && (rc != ASSIGN_OK)) { dbug(1,dprintf("No more IDs")); sendf(appl,_MANUFACTURER_R|CONFIRM,Id,Number,"dww",_DI_MANU_ID,_MANUFACTURER_R,_OUT_OF_PLCI); plci_remove(plci); /* after codec init, internal codec commands pending */ } break; case _CONNECT_R: dbug(1,dprintf("_Connect_R=0x%x/0x%x",req,rc)); if ((global_req == ASSIGN) && (rc != ASSIGN_OK)) { dbug(1,dprintf("No more IDs")); sendf(appl,_CONNECT_R|CONFIRM,Id&0xffL,Number,"w",_OUT_OF_PLCI); plci_remove(plci); /* after codec init, internal codec commands pending */ } break; case PERM_COD_HOOK: /* finished with Hook_Ind */ return; case PERM_COD_CALL: dbug(1,dprintf("***Codec Connect_Pending A, Rc = 0x%x",rc)); plci->internal_command = PERM_COD_CONN_PEND; return; case PERM_COD_ASSIGN: dbug(1,dprintf("***Codec Assign A, Rc = 0x%x",rc)); if(rc!=ASSIGN_OK) break; sig_req(plci,CALL_REQ,0); send_req(plci); plci->internal_command = PERM_COD_CALL; return; /* Null Call Reference Request pending */ case C_NCR_FAC_REQ: dbug(1,dprintf("NCR_FAC=0x%x/0x%x",req,rc)); if(global_req==ASSIGN) { if(rc==ASSIGN_OK) { return; } else { sendf(appl,_INFO_R|CONFIRM,Id&0xf,Number,"w",_WRONG_STATE); appl->NullCREnable = false; plci_remove(plci); } } else if(req==NCR_FACILITY) { if(rc==OK) { sendf(appl,_INFO_R|CONFIRM,Id&0xf,Number,"w",0); } else { sendf(appl,_INFO_R|CONFIRM,Id&0xf,Number,"w",_WRONG_STATE); appl->NullCREnable = false; } plci_remove(plci); } break; case HOOK_ON_REQ: if(plci->channels) { if(a->ncci_state[ncci]==CONNECTED) { a->ncci_state[ncci] = OUTG_DIS_PENDING; cleanup_ncci_data (plci, ncci); nl_req_ncci(plci,N_DISC,(byte)ncci); } break; } break; case HOOK_OFF_REQ: if (plci->State == INC_DIS_PENDING) break; sig_req(plci,CALL_REQ,0); send_req(plci); plci->State=OUTG_CON_PENDING; break; case MWI_ACTIVATE_REQ_PEND: case MWI_DEACTIVATE_REQ_PEND: if(global_req == ASSIGN && rc==ASSIGN_OK) { dbug(1,dprintf("MWI_REQ assigned")); return; } else if(rc!=OK) { if(rc==WRONG_IE) { Info = 0x2007; /* Illegal message parameter coding */ dbug(1,dprintf("MWI_REQ invalid parameter")); } else { Info = 0x300B; /* not supported */ dbug(1,dprintf("MWI_REQ not supported")); } /* 0x3010: Request not allowed in this state */ PUT_WORD(&SSparms[4],0x300E); /* SS not supported */ } if(plci->internal_command==MWI_ACTIVATE_REQ_PEND) { PUT_WORD(&SSparms[1],S_MWI_ACTIVATE); } else PUT_WORD(&SSparms[1],S_MWI_DEACTIVATE); if(plci->cr_enquiry) { sendf(plci->appl, _FACILITY_R|CONFIRM, Id&0xf, plci->number, "wws",Info,(word)3,SSparms); if(rc!=OK) plci_remove(plci); } else { sendf(plci->appl, _FACILITY_R|CONFIRM, Id, plci->number, "wws",Info,(word)3,SSparms); } break; case CONF_BEGIN_REQ_PEND: case CONF_ADD_REQ_PEND: case CONF_SPLIT_REQ_PEND: case CONF_DROP_REQ_PEND: case CONF_ISOLATE_REQ_PEND: case CONF_REATTACH_REQ_PEND: dbug(1,dprintf("CONF_RC=0x%x/0x%x",req,rc)); if((plci->internal_command==CONF_ADD_REQ_PEND)&&(!plci->relatedPTYPLCI)) break; rplci = plci; rId = Id; switch(plci->internal_command) { case CONF_BEGIN_REQ_PEND: SSparms[1] = S_CONF_BEGIN; break; case CONF_ADD_REQ_PEND: SSparms[1] = S_CONF_ADD; rplci = plci->relatedPTYPLCI; rId = ((word)rplci->Id<<8)|rplci->adapter->Id; break; case CONF_SPLIT_REQ_PEND: SSparms[1] = S_CONF_SPLIT; break; case CONF_DROP_REQ_PEND: SSparms[1] = S_CONF_DROP; break; case CONF_ISOLATE_REQ_PEND: SSparms[1] = S_CONF_ISOLATE; break; case CONF_REATTACH_REQ_PEND: SSparms[1] = S_CONF_REATTACH; break; } if(rc!=OK) { Info = 0x300E; /* not supported */ plci->relatedPTYPLCI = NULL; plci->ptyState = 0; } sendf(rplci->appl, _FACILITY_R|CONFIRM, rId, plci->number, "wws",Info,(word)3,SSparms); break; case VSWITCH_REQ_PEND: if(rc!=OK) { if(plci->relatedPTYPLCI) { plci->relatedPTYPLCI->vswitchstate=0; plci->relatedPTYPLCI->vsprot=0; plci->relatedPTYPLCI->vsprotdialect=0; } plci->vswitchstate=0; plci->vsprot=0; plci->vsprotdialect=0; } else { if(plci->relatedPTYPLCI && plci->vswitchstate==1 && plci->relatedPTYPLCI->vswitchstate==3) /* join complete */ plci->vswitchstate=3; } break; /* Call Deflection Request pending (SSCT) */ case CD_REQ_PEND: SSparms[1] = S_CALL_DEFLECTION; if(rc!=OK) { Info = 0x300E; /* not supported */ plci->appl->CDEnable = 0; } sendf(plci->appl,_FACILITY_R|CONFIRM,Id, plci->number,"wws",Info,(word)3,SSparms); break; case RTP_CONNECT_B3_REQ_COMMAND_2: if (rc == OK) { ncci = get_ncci (plci, ch, 0); Id = (Id & 0xffff) | (((dword) ncci) << 16); plci->channels++; a->ncci_state[ncci] = OUTG_CON_PENDING; } default: if (plci->internal_command_queue[0]) { (*(plci->internal_command_queue[0]))(Id, plci, rc); if (plci->internal_command) return; } break; } next_internal_command (Id, plci); } } else /* appl==0 */ { Id = ((word)plci->Id<<8)|plci->adapter->Id; if(plci->tel) Id|=EXT_CONTROLLER; switch(plci->internal_command) { case BLOCK_PLCI: return; case START_L1_SIG_ASSIGN_PEND: case REM_L1_SIG_ASSIGN_PEND: if(global_req == ASSIGN) { break; } else { dbug(1,dprintf("***L1 Req rem PLCI")); plci->internal_command = 0; sig_req(plci,REMOVE,0); send_req(plci); } break; /* Call Deflection Request pending, just no appl ptr assigned */ case CD_REQ_PEND: SSparms[1] = S_CALL_DEFLECTION; if(rc!=OK) { Info = 0x300E; /* not supported */ } for(i=0; i<max_appl; i++) { if(application[i].CDEnable) { if(!application[i].Id) application[i].CDEnable = 0; else { sendf(&application[i],_FACILITY_R|CONFIRM,Id, plci->number,"wws",Info,(word)3,SSparms); if(Info) application[i].CDEnable = 0; } } } plci->internal_command = 0; break; case PERM_COD_HOOK: /* finished with Hook_Ind */ return; case PERM_COD_CALL: plci->internal_command = PERM_COD_CONN_PEND; dbug(1,dprintf("***Codec Connect_Pending, Rc = 0x%x",rc)); return; case PERM_COD_ASSIGN: dbug(1,dprintf("***Codec Assign, Rc = 0x%x",rc)); plci->internal_command = 0; if(rc!=ASSIGN_OK) break; plci->internal_command = PERM_COD_CALL; sig_req(plci,CALL_REQ,0); send_req(plci); return; case LISTEN_SIG_ASSIGN_PEND: if(rc == ASSIGN_OK) { plci->internal_command = 0; dbug(1,dprintf("ListenCheck, new SIG_ID = 0x%x",plci->Sig.Id)); add_p(plci,ESC,"\x02\x18\x00"); /* support call waiting */ sig_req(plci,INDICATE_REQ,0); send_req(plci); } else { dbug(1,dprintf("ListenCheck failed (assignRc=0x%x)",rc)); a->listen_active--; plci_remove(plci); plci->State = IDLE; } break; case USELAW_REQ: if(global_req == ASSIGN) { if (rc==ASSIGN_OK) { sig_req(plci,LAW_REQ,0); send_req(plci); dbug(1,dprintf("Auto-Law assigned")); } else { dbug(1,dprintf("Auto-Law assign failed")); a->automatic_law = 3; plci->internal_command = 0; a->automatic_lawPLCI = NULL; } break; } else if(req == LAW_REQ && rc==OK) { dbug(1,dprintf("Auto-Law initiated")); a->automatic_law = 2; plci->internal_command = 0; } else { dbug(1,dprintf("Auto-Law not supported")); a->automatic_law = 3; plci->internal_command = 0; sig_req(plci,REMOVE,0); send_req(plci); a->automatic_lawPLCI = NULL; } break; } plci_remove_check(plci); } } static void data_rc(PLCI *plci, byte ch) { dword Id; DIVA_CAPI_ADAPTER * a; NCCI *ncci_ptr; DATA_B3_DESC *data; word ncci; if (plci->appl) { TransmitBufferFree (plci->appl, plci->data_sent_ptr); a = plci->adapter; ncci = a->ch_ncci[ch]; if (ncci && (a->ncci_plci[ncci] == plci->Id)) { ncci_ptr = &(a->ncci[ncci]); dbug(1,dprintf("data_out=%d, data_pending=%d",ncci_ptr->data_out,ncci_ptr->data_pending)); if (ncci_ptr->data_pending) { data = &(ncci_ptr->DBuffer[ncci_ptr->data_out]); if (!(data->Flags &4) && a->ncci_state[ncci]) { Id = (((dword)ncci)<<16)|((word)plci->Id<<8)|a->Id; if(plci->tel) Id|=EXT_CONTROLLER; sendf(plci->appl,_DATA_B3_R|CONFIRM,Id,data->Number, "ww",data->Handle,0); } (ncci_ptr->data_out)++; if (ncci_ptr->data_out == MAX_DATA_B3) ncci_ptr->data_out = 0; (ncci_ptr->data_pending)--; } } } } static void data_ack(PLCI *plci, byte ch) { dword Id; DIVA_CAPI_ADAPTER * a; NCCI *ncci_ptr; word ncci; a = plci->adapter; ncci = a->ch_ncci[ch]; ncci_ptr = &(a->ncci[ncci]); if (ncci_ptr->data_ack_pending) { if (a->ncci_state[ncci] && (a->ncci_plci[ncci] == plci->Id)) { Id = (((dword)ncci)<<16)|((word)plci->Id<<8)|a->Id; if(plci->tel) Id|=EXT_CONTROLLER; sendf(plci->appl,_DATA_B3_R|CONFIRM,Id,ncci_ptr->DataAck[ncci_ptr->data_ack_out].Number, "ww",ncci_ptr->DataAck[ncci_ptr->data_ack_out].Handle,0); } (ncci_ptr->data_ack_out)++; if (ncci_ptr->data_ack_out == MAX_DATA_ACK) ncci_ptr->data_ack_out = 0; (ncci_ptr->data_ack_pending)--; } } static void sig_ind(PLCI *plci) { dword x_Id; dword Id; dword rId; word Number = 0; word i; word cip; dword cip_mask; byte *ie; DIVA_CAPI_ADAPTER * a; API_PARSE saved_parms[MAX_MSG_PARMS+1]; #define MAXPARMSIDS 31 byte * parms[MAXPARMSIDS]; byte * add_i[4]; byte * multi_fac_parms[MAX_MULTI_IE]; byte * multi_pi_parms [MAX_MULTI_IE]; byte * multi_ssext_parms [MAX_MULTI_IE]; byte * multi_CiPN_parms [MAX_MULTI_IE]; byte * multi_vswitch_parms [MAX_MULTI_IE]; byte ai_len; byte *esc_chi = ""; byte *esc_law = ""; byte *pty_cai = ""; byte *esc_cr = ""; byte *esc_profile = ""; byte facility[256]; PLCI * tplci = NULL; byte chi[] = "\x02\x18\x01"; byte voice_cai[] = "\x06\x14\x00\x00\x00\x00\x08"; byte resume_cau[] = "\x05\x05\x00\x02\x00\x00"; /* ESC_MSGTYPE must be the last but one message, a new IE has to be */ /* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */ /* SMSG is situated at the end because its 0 (for compatibility reasons */ /* (see Info_Mask Bit 4, first IE. then the message type) */ word parms_id[] = {MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA, UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW, RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR, CST, ESC_PROFILE, 0xff, ESC_MSGTYPE, SMSG}; /* 14 FTY repl by ESC_CHI */ /* 18 PI repl by ESC_LAW */ /* removed OAD changed to 0xff for future use, OAD is multiIE now */ word multi_fac_id[] = {1, FTY}; word multi_pi_id[] = {1, PI}; word multi_CiPN_id[] = {1, OAD}; word multi_ssext_id[] = {1, ESC_SSEXT}; word multi_vswitch_id[] = {1, ESC_VSWITCH}; byte * cau; word ncci; byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/ byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00"; byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"; byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00"; byte force_mt_info = false; byte dir; dword d; word w; a = plci->adapter; Id = ((word)plci->Id<<8)|a->Id; PUT_WORD(&SS_Ind[4],0x0000); if (plci->sig_remove_id) { plci->Sig.RNR = 2; /* discard */ dbug(1,dprintf("SIG discard while remove pending")); return; } if(plci->tel && plci->SuppState!=CALL_HELD) Id|=EXT_CONTROLLER; dbug(1,dprintf("SigInd-Id=%08lx,plci=%x,tel=%x,state=0x%x,channels=%d,Discflowcl=%d", Id,plci->Id,plci->tel,plci->State,plci->channels,plci->hangup_flow_ctrl_timer)); if(plci->Sig.Ind==CALL_HOLD_ACK && plci->channels) { plci->Sig.RNR = 1; return; } if(plci->Sig.Ind==HANGUP && plci->channels) { plci->Sig.RNR = 1; plci->hangup_flow_ctrl_timer++; /* recover the network layer after timeout */ if(plci->hangup_flow_ctrl_timer==100) { dbug(1,dprintf("Exceptional disc")); plci->Sig.RNR = 0; plci->hangup_flow_ctrl_timer = 0; for (ncci = 1; ncci < MAX_NCCI+1; ncci++) { if (a->ncci_plci[ncci] == plci->Id) { cleanup_ncci_data (plci, ncci); if(plci->channels)plci->channels--; if (plci->appl) sendf(plci->appl,_DISCONNECT_B3_I, (((dword) ncci) << 16) | Id,0,"ws",0,""); } } if (plci->appl) sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", 0); plci_remove(plci); plci->State=IDLE; } return; } /* do first parse the info with no OAD in, because OAD will be converted */ /* first the multiple facility IE, then mult. progress ind. */ /* then the parameters for the info_ind + conn_ind */ IndParse(plci,multi_fac_id,multi_fac_parms,MAX_MULTI_IE); IndParse(plci,multi_pi_id,multi_pi_parms,MAX_MULTI_IE); IndParse(plci,multi_ssext_id,multi_ssext_parms,MAX_MULTI_IE); IndParse(plci,multi_vswitch_id,multi_vswitch_parms,MAX_MULTI_IE); IndParse(plci,parms_id,parms,0); IndParse(plci,multi_CiPN_id,multi_CiPN_parms,MAX_MULTI_IE); esc_chi = parms[14]; esc_law = parms[18]; pty_cai = parms[24]; esc_cr = parms[25]; esc_profile = parms[27]; if(esc_cr[0] && plci) { if(plci->cr_enquiry && plci->appl) { plci->cr_enquiry = false; /* d = MANU_ID */ /* w = m_command */ /* b = total length */ /* b = indication type */ /* b = length of all IEs */ /* b = IE1 */ /* S = IE1 length + cont. */ /* b = IE2 */ /* S = IE2 length + cont. */ sendf(plci->appl, _MANUFACTURER_I, Id, 0, "dwbbbbSbS",_DI_MANU_ID,plci->m_command, 2+1+1+esc_cr[0]+1+1+esc_law[0],plci->Sig.Ind,1+1+esc_cr[0]+1+1+esc_law[0],ESC,esc_cr,ESC,esc_law); } } /* create the additional info structure */ add_i[1] = parms[15]; /* KEY of additional info */ add_i[2] = parms[11]; /* UUI of additional info */ ai_len = AddInfo(add_i,multi_fac_parms, esc_chi, facility); /* the ESC_LAW indicates if u-Law or a-Law is actually used by the card */ /* indication returns by the card if requested by the function */ /* AutomaticLaw() after driver init */ if (a->automatic_law<4) { if(esc_law[0]){ if(esc_law[2]){ dbug(0,dprintf("u-Law selected")); a->u_law = 1; } else { dbug(0,dprintf("a-Law selected")); a->u_law = 0; } a->automatic_law = 4; if(plci==a->automatic_lawPLCI) { plci->internal_command = 0; sig_req(plci,REMOVE,0); send_req(plci); a->automatic_lawPLCI = NULL; } } if (esc_profile[0]) { dbug (1, dprintf ("[%06x] CardProfile: %lx %lx %lx %lx %lx", UnMapController (a->Id), GET_DWORD (&esc_profile[6]), GET_DWORD (&esc_profile[10]), GET_DWORD (&esc_profile[14]), GET_DWORD (&esc_profile[18]), GET_DWORD (&esc_profile[46]))); a->profile.Global_Options &= 0x000000ffL; a->profile.B1_Protocols &= 0x000003ffL; a->profile.B2_Protocols &= 0x00001fdfL; a->profile.B3_Protocols &= 0x000000b7L; a->profile.Global_Options &= GET_DWORD (&esc_profile[6]) | GL_BCHANNEL_OPERATION_SUPPORTED; a->profile.B1_Protocols &= GET_DWORD (&esc_profile[10]); a->profile.B2_Protocols &= GET_DWORD (&esc_profile[14]); a->profile.B3_Protocols &= GET_DWORD (&esc_profile[18]); a->manufacturer_features = GET_DWORD (&esc_profile[46]); a->man_profile.private_options = 0; if (a->manufacturer_features & MANUFACTURER_FEATURE_ECHO_CANCELLER) { a->man_profile.private_options |= 1L << PRIVATE_ECHO_CANCELLER; a->profile.Global_Options |= GL_ECHO_CANCELLER_SUPPORTED; } if (a->manufacturer_features & MANUFACTURER_FEATURE_RTP) a->man_profile.private_options |= 1L << PRIVATE_RTP; a->man_profile.rtp_primary_payloads = GET_DWORD (&esc_profile[50]); a->man_profile.rtp_additional_payloads = GET_DWORD (&esc_profile[54]); if (a->manufacturer_features & MANUFACTURER_FEATURE_T38) a->man_profile.private_options |= 1L << PRIVATE_T38; if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_SUB_SEP_PWD) a->man_profile.private_options |= 1L << PRIVATE_FAX_SUB_SEP_PWD; if (a->manufacturer_features & MANUFACTURER_FEATURE_V18) a->man_profile.private_options |= 1L << PRIVATE_V18; if (a->manufacturer_features & MANUFACTURER_FEATURE_DTMF_TONE) a->man_profile.private_options |= 1L << PRIVATE_DTMF_TONE; if (a->manufacturer_features & MANUFACTURER_FEATURE_PIAFS) a->man_profile.private_options |= 1L << PRIVATE_PIAFS; if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS) a->man_profile.private_options |= 1L << PRIVATE_FAX_PAPER_FORMATS; if (a->manufacturer_features & MANUFACTURER_FEATURE_VOWN) a->man_profile.private_options |= 1L << PRIVATE_VOWN; if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_NONSTANDARD) a->man_profile.private_options |= 1L << PRIVATE_FAX_NONSTANDARD; } else { a->profile.Global_Options &= 0x0000007fL; a->profile.B1_Protocols &= 0x000003dfL; a->profile.B2_Protocols &= 0x00001adfL; a->profile.B3_Protocols &= 0x000000b7L; a->manufacturer_features &= MANUFACTURER_FEATURE_HARDDTMF; } if (a->manufacturer_features & (MANUFACTURER_FEATURE_HARDDTMF | MANUFACTURER_FEATURE_SOFTDTMF_SEND | MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE)) { a->profile.Global_Options |= GL_DTMF_SUPPORTED; } a->manufacturer_features &= ~MANUFACTURER_FEATURE_OOB_CHANNEL; dbug (1, dprintf ("[%06x] Profile: %lx %lx %lx %lx %lx", UnMapController (a->Id), a->profile.Global_Options, a->profile.B1_Protocols, a->profile.B2_Protocols, a->profile.B3_Protocols, a->manufacturer_features)); } /* codec plci for the handset/hook state support is just an internal id */ if(plci!=a->AdvCodecPLCI) { force_mt_info = SendMultiIE(plci,Id,multi_fac_parms, FTY, 0x20, 0); force_mt_info |= SendMultiIE(plci,Id,multi_pi_parms, PI, 0x210, 0); SendSSExtInd(NULL,plci,Id,multi_ssext_parms); SendInfo(plci,Id, parms, force_mt_info); VSwitchReqInd(plci,Id,multi_vswitch_parms); } /* switch the codec to the b-channel */ if(esc_chi[0] && plci && !plci->SuppState){ plci->b_channel = esc_chi[esc_chi[0]]&0x1f; mixer_set_bchannel_id_esc (plci, plci->b_channel); dbug(1,dprintf("storeChannel=0x%x",plci->b_channel)); if(plci->tel==ADV_VOICE && plci->appl) { SetVoiceChannel(a->AdvCodecPLCI, esc_chi, a); } } if(plci->appl) Number = plci->appl->Number++; switch(plci->Sig.Ind) { /* Response to Get_Supported_Services request */ case S_SUPPORTED: dbug(1,dprintf("S_Supported")); if(!plci->appl) break; if(pty_cai[0]==4) { PUT_DWORD(&CF_Ind[6],GET_DWORD(&pty_cai[1]) ); } else { PUT_DWORD(&CF_Ind[6],MASK_TERMINAL_PORTABILITY | MASK_HOLD_RETRIEVE); } PUT_WORD (&CF_Ind[1], 0); PUT_WORD (&CF_Ind[4], 0); sendf(plci->appl,_FACILITY_R|CONFIRM,Id&0x7,plci->number, "wws",0,3,CF_Ind); plci_remove(plci); break; /* Supplementary Service rejected */ case S_SERVICE_REJ: dbug(1,dprintf("S_Reject=0x%x",pty_cai[5])); if(!pty_cai[0]) break; switch (pty_cai[5]) { case ECT_EXECUTE: case THREE_PTY_END: case THREE_PTY_BEGIN: if(!plci->relatedPTYPLCI) break; tplci = plci->relatedPTYPLCI; rId = ( (word)tplci->Id<<8)|tplci->adapter->Id; if(tplci->tel) rId|=EXT_CONTROLLER; if(pty_cai[5]==ECT_EXECUTE) { PUT_WORD(&SS_Ind[1],S_ECT); plci->vswitchstate=0; plci->relatedPTYPLCI->vswitchstate=0; } else { PUT_WORD(&SS_Ind[1],pty_cai[5]+3); } if(pty_cai[2]!=0xff) { PUT_WORD(&SS_Ind[4],0x3600|(word)pty_cai[2]); } else { PUT_WORD(&SS_Ind[4],0x300E); } plci->relatedPTYPLCI = NULL; plci->ptyState = 0; sendf(tplci->appl,_FACILITY_I,rId,0,"ws",3, SS_Ind); break; case CALL_DEFLECTION: if(pty_cai[2]!=0xff) { PUT_WORD(&SS_Ind[4],0x3600|(word)pty_cai[2]); } else { PUT_WORD(&SS_Ind[4],0x300E); } PUT_WORD(&SS_Ind[1],pty_cai[5]); for(i=0; i<max_appl; i++) { if(application[i].CDEnable) { if(application[i].Id) sendf(&application[i],_FACILITY_I,Id,0,"ws",3, SS_Ind); application[i].CDEnable = false; } } break; case DEACTIVATION_DIVERSION: case ACTIVATION_DIVERSION: case DIVERSION_INTERROGATE_CFU: case DIVERSION_INTERROGATE_CFB: case DIVERSION_INTERROGATE_CFNR: case DIVERSION_INTERROGATE_NUM: case CCBS_REQUEST: case CCBS_DEACTIVATE: case CCBS_INTERROGATE: if(!plci->appl) break; if(pty_cai[2]!=0xff) { PUT_WORD(&Interr_Err_Ind[4],0x3600|(word)pty_cai[2]); } else { PUT_WORD(&Interr_Err_Ind[4],0x300E); } switch (pty_cai[5]) { case DEACTIVATION_DIVERSION: dbug(1,dprintf("Deact_Div")); Interr_Err_Ind[0]=0x9; Interr_Err_Ind[3]=0x6; PUT_WORD(&Interr_Err_Ind[1],S_CALL_FORWARDING_STOP); break; case ACTIVATION_DIVERSION: dbug(1,dprintf("Act_Div")); Interr_Err_Ind[0]=0x9; Interr_Err_Ind[3]=0x6; PUT_WORD(&Interr_Err_Ind[1],S_CALL_FORWARDING_START); break; case DIVERSION_INTERROGATE_CFU: case DIVERSION_INTERROGATE_CFB: case DIVERSION_INTERROGATE_CFNR: dbug(1,dprintf("Interr_Div")); Interr_Err_Ind[0]=0xa; Interr_Err_Ind[3]=0x7; PUT_WORD(&Interr_Err_Ind[1],S_INTERROGATE_DIVERSION); break; case DIVERSION_INTERROGATE_NUM: dbug(1,dprintf("Interr_Num")); Interr_Err_Ind[0]=0xa; Interr_Err_Ind[3]=0x7; PUT_WORD(&Interr_Err_Ind[1],S_INTERROGATE_NUMBERS); break; case CCBS_REQUEST: dbug(1,dprintf("CCBS Request")); Interr_Err_Ind[0]=0xd; Interr_Err_Ind[3]=0xa; PUT_WORD(&Interr_Err_Ind[1],S_CCBS_REQUEST); break; case CCBS_DEACTIVATE: dbug(1,dprintf("CCBS Deactivate")); Interr_Err_Ind[0]=0x9; Interr_Err_Ind[3]=0x6; PUT_WORD(&Interr_Err_Ind[1],S_CCBS_DEACTIVATE); break; case CCBS_INTERROGATE: dbug(1,dprintf("CCBS Interrogate")); Interr_Err_Ind[0]=0xb; Interr_Err_Ind[3]=0x8; PUT_WORD(&Interr_Err_Ind[1],S_CCBS_INTERROGATE); break; } PUT_DWORD(&Interr_Err_Ind[6],plci->appl->S_Handle); sendf(plci->appl,_FACILITY_I,Id&0x7,0,"ws",3, Interr_Err_Ind); plci_remove(plci); break; case ACTIVATION_MWI: case DEACTIVATION_MWI: if(pty_cai[5]==ACTIVATION_MWI) { PUT_WORD(&SS_Ind[1],S_MWI_ACTIVATE); } else PUT_WORD(&SS_Ind[1],S_MWI_DEACTIVATE); if(pty_cai[2]!=0xff) { PUT_WORD(&SS_Ind[4],0x3600|(word)pty_cai[2]); } else { PUT_WORD(&SS_Ind[4],0x300E); } if(plci->cr_enquiry) { sendf(plci->appl,_FACILITY_I,Id&0xf,0,"ws",3, SS_Ind); plci_remove(plci); } else { sendf(plci->appl,_FACILITY_I,Id,0,"ws",3, SS_Ind); } break; case CONF_ADD: /* ERROR */ case CONF_BEGIN: case CONF_DROP: case CONF_ISOLATE: case CONF_REATTACH: CONF_Ind[0]=9; CONF_Ind[3]=6; switch(pty_cai[5]) { case CONF_BEGIN: PUT_WORD(&CONF_Ind[1],S_CONF_BEGIN); plci->ptyState = 0; break; case CONF_DROP: CONF_Ind[0]=5; CONF_Ind[3]=2; PUT_WORD(&CONF_Ind[1],S_CONF_DROP); plci->ptyState = CONNECTED; break; case CONF_ISOLATE: CONF_Ind[0]=5; CONF_Ind[3]=2; PUT_WORD(&CONF_Ind[1],S_CONF_ISOLATE); plci->ptyState = CONNECTED; break; case CONF_REATTACH: CONF_Ind[0]=5; CONF_Ind[3]=2; PUT_WORD(&CONF_Ind[1],S_CONF_REATTACH); plci->ptyState = CONNECTED; break; case CONF_ADD: PUT_WORD(&CONF_Ind[1],S_CONF_ADD); plci->relatedPTYPLCI = NULL; tplci=plci->relatedPTYPLCI; if(tplci) tplci->ptyState = CONNECTED; plci->ptyState = CONNECTED; break; } if(pty_cai[2]!=0xff) { PUT_WORD(&CONF_Ind[4],0x3600|(word)pty_cai[2]); } else { PUT_WORD(&CONF_Ind[4],0x3303); /* Time-out: network did not respond within the required time */ } PUT_DWORD(&CONF_Ind[6],0x0); sendf(plci->appl,_FACILITY_I,Id,0,"ws",3, CONF_Ind); break; } break; /* Supplementary Service indicates success */ case S_SERVICE: dbug(1,dprintf("Service_Ind")); PUT_WORD (&CF_Ind[4], 0); switch (pty_cai[5]) { case THREE_PTY_END: case THREE_PTY_BEGIN: case ECT_EXECUTE: if(!plci->relatedPTYPLCI) break; tplci = plci->relatedPTYPLCI; rId = ( (word)tplci->Id<<8)|tplci->adapter->Id; if(tplci->tel) rId|=EXT_CONTROLLER; if(pty_cai[5]==ECT_EXECUTE) { PUT_WORD(&SS_Ind[1],S_ECT); if(plci->vswitchstate!=3) { plci->ptyState = IDLE; plci->relatedPTYPLCI = NULL; plci->ptyState = 0; } dbug(1,dprintf("ECT OK")); sendf(tplci->appl,_FACILITY_I,rId,0,"ws",3, SS_Ind); } else { switch (plci->ptyState) { case S_3PTY_BEGIN: plci->ptyState = CONNECTED; dbug(1,dprintf("3PTY ON")); break; case S_3PTY_END: plci->ptyState = IDLE; plci->relatedPTYPLCI = NULL; plci->ptyState = 0; dbug(1,dprintf("3PTY OFF")); break; } PUT_WORD(&SS_Ind[1],pty_cai[5]+3); sendf(tplci->appl,_FACILITY_I,rId,0,"ws",3, SS_Ind); } break; case CALL_DEFLECTION: PUT_WORD(&SS_Ind[1],pty_cai[5]); for(i=0; i<max_appl; i++) { if(application[i].CDEnable) { if(application[i].Id) sendf(&application[i],_FACILITY_I,Id,0,"ws",3, SS_Ind); application[i].CDEnable = false; } } break; case DEACTIVATION_DIVERSION: case ACTIVATION_DIVERSION: if(!plci->appl) break; PUT_WORD(&CF_Ind[1],pty_cai[5]+2); PUT_DWORD(&CF_Ind[6],plci->appl->S_Handle); sendf(plci->appl,_FACILITY_I,Id&0x7,0,"ws",3, CF_Ind); plci_remove(plci); break; case DIVERSION_INTERROGATE_CFU: case DIVERSION_INTERROGATE_CFB: case DIVERSION_INTERROGATE_CFNR: case DIVERSION_INTERROGATE_NUM: case CCBS_REQUEST: case CCBS_DEACTIVATE: case CCBS_INTERROGATE: if(!plci->appl) break; switch (pty_cai[5]) { case DIVERSION_INTERROGATE_CFU: case DIVERSION_INTERROGATE_CFB: case DIVERSION_INTERROGATE_CFNR: dbug(1,dprintf("Interr_Div")); PUT_WORD(&pty_cai[1],S_INTERROGATE_DIVERSION); pty_cai[3]=pty_cai[0]-3; /* Supplementary Service-specific parameter len */ break; case DIVERSION_INTERROGATE_NUM: dbug(1,dprintf("Interr_Num")); PUT_WORD(&pty_cai[1],S_INTERROGATE_NUMBERS); pty_cai[3]=pty_cai[0]-3; /* Supplementary Service-specific parameter len */ break; case CCBS_REQUEST: dbug(1,dprintf("CCBS Request")); PUT_WORD(&pty_cai[1],S_CCBS_REQUEST); pty_cai[3]=pty_cai[0]-3; /* Supplementary Service-specific parameter len */ break; case CCBS_DEACTIVATE: dbug(1,dprintf("CCBS Deactivate")); PUT_WORD(&pty_cai[1],S_CCBS_DEACTIVATE); pty_cai[3]=pty_cai[0]-3; /* Supplementary Service-specific parameter len */ break; case CCBS_INTERROGATE: dbug(1,dprintf("CCBS Interrogate")); PUT_WORD(&pty_cai[1],S_CCBS_INTERROGATE); pty_cai[3]=pty_cai[0]-3; /* Supplementary Service-specific parameter len */ break; } PUT_WORD(&pty_cai[4],0); /* Supplementary Service Reason */ PUT_DWORD(&pty_cai[6],plci->appl->S_Handle); sendf(plci->appl,_FACILITY_I,Id&0x7,0,"wS",3, pty_cai); plci_remove(plci); break; case ACTIVATION_MWI: case DEACTIVATION_MWI: if(pty_cai[5]==ACTIVATION_MWI) { PUT_WORD(&SS_Ind[1],S_MWI_ACTIVATE); } else PUT_WORD(&SS_Ind[1],S_MWI_DEACTIVATE); if(plci->cr_enquiry) { sendf(plci->appl,_FACILITY_I,Id&0xf,0,"ws",3, SS_Ind); plci_remove(plci); } else { sendf(plci->appl,_FACILITY_I,Id,0,"ws",3, SS_Ind); } break; case MWI_INDICATION: if(pty_cai[0]>=0x12) { PUT_WORD(&pty_cai[3],S_MWI_INDICATE); pty_cai[2]=pty_cai[0]-2; /* len Parameter */ pty_cai[5]=pty_cai[0]-5; /* Supplementary Service-specific parameter len */ if(plci->appl && (a->Notification_Mask[plci->appl->Id-1]&SMASK_MWI)) { if(plci->internal_command==GET_MWI_STATE) /* result on Message Waiting Listen */ { sendf(plci->appl,_FACILITY_I,Id&0xf,0,"wS",3, &pty_cai[2]); plci_remove(plci); return; } else sendf(plci->appl,_FACILITY_I,Id,0,"wS",3, &pty_cai[2]); pty_cai[0]=0; } else { for(i=0; i<max_appl; i++) { if(a->Notification_Mask[i]&SMASK_MWI) { sendf(&application[i],_FACILITY_I,Id&0x7,0,"wS",3, &pty_cai[2]); pty_cai[0]=0; } } } if(!pty_cai[0]) { /* acknowledge */ facility[2]= 0; /* returncode */ } else facility[2]= 0xff; } else { /* reject */ facility[2]= 0xff; /* returncode */ } facility[0]= 2; facility[1]= MWI_RESPONSE; /* Function */ add_p(plci,CAI,facility); add_p(plci,ESC,multi_ssext_parms[0]); /* remembered parameter -> only one possible */ sig_req(plci,S_SERVICE,0); send_req(plci); plci->command = 0; next_internal_command (Id, plci); break; case CONF_ADD: /* OK */ case CONF_BEGIN: case CONF_DROP: case CONF_ISOLATE: case CONF_REATTACH: case CONF_PARTYDISC: CONF_Ind[0]=9; CONF_Ind[3]=6; switch(pty_cai[5]) { case CONF_BEGIN: PUT_WORD(&CONF_Ind[1],S_CONF_BEGIN); if(pty_cai[0]==6) { d=pty_cai[6]; PUT_DWORD(&CONF_Ind[6],d); /* PartyID */ } else { PUT_DWORD(&CONF_Ind[6],0x0); } break; case CONF_ISOLATE: PUT_WORD(&CONF_Ind[1],S_CONF_ISOLATE); CONF_Ind[0]=5; CONF_Ind[3]=2; break; case CONF_REATTACH: PUT_WORD(&CONF_Ind[1],S_CONF_REATTACH); CONF_Ind[0]=5; CONF_Ind[3]=2; break; case CONF_DROP: PUT_WORD(&CONF_Ind[1],S_CONF_DROP); CONF_Ind[0]=5; CONF_Ind[3]=2; break; case CONF_ADD: PUT_WORD(&CONF_Ind[1],S_CONF_ADD); d=pty_cai[6]; PUT_DWORD(&CONF_Ind[6],d); /* PartyID */ tplci=plci->relatedPTYPLCI; if(tplci) tplci->ptyState = CONNECTED; break; case CONF_PARTYDISC: CONF_Ind[0]=7; CONF_Ind[3]=4; PUT_WORD(&CONF_Ind[1],S_CONF_PARTYDISC); d=pty_cai[6]; PUT_DWORD(&CONF_Ind[4],d); /* PartyID */ break; } plci->ptyState = CONNECTED; sendf(plci->appl,_FACILITY_I,Id,0,"ws",3, CONF_Ind); break; case CCBS_INFO_RETAIN: case CCBS_ERASECALLLINKAGEID: case CCBS_STOP_ALERTING: CONF_Ind[0]=5; CONF_Ind[3]=2; switch(pty_cai[5]) { case CCBS_INFO_RETAIN: PUT_WORD(&CONF_Ind[1],S_CCBS_INFO_RETAIN); break; case CCBS_STOP_ALERTING: PUT_WORD(&CONF_Ind[1],S_CCBS_STOP_ALERTING); break; case CCBS_ERASECALLLINKAGEID: PUT_WORD(&CONF_Ind[1],S_CCBS_ERASECALLLINKAGEID); CONF_Ind[0]=7; CONF_Ind[3]=4; CONF_Ind[6]=0; CONF_Ind[7]=0; break; } w=pty_cai[6]; PUT_WORD(&CONF_Ind[4],w); /* PartyID */ if(plci->appl && (a->Notification_Mask[plci->appl->Id-1]&SMASK_CCBS)) { sendf(plci->appl,_FACILITY_I,Id,0,"ws",3, CONF_Ind); } else { for(i=0; i<max_appl; i++) if(a->Notification_Mask[i]&SMASK_CCBS) sendf(&application[i],_FACILITY_I,Id&0x7,0,"ws",3, CONF_Ind); } break; } break; case CALL_HOLD_REJ: cau = parms[7]; if(cau) { i = _L3_CAUSE | cau[2]; if(cau[2]==0) i = 0x3603; } else { i = 0x3603; } PUT_WORD(&SS_Ind[1],S_HOLD); PUT_WORD(&SS_Ind[4],i); if(plci->SuppState == HOLD_REQUEST) { plci->SuppState = IDLE; sendf(plci->appl,_FACILITY_I,Id,0,"ws",3, SS_Ind); } break; case CALL_HOLD_ACK: if(plci->SuppState == HOLD_REQUEST) { plci->SuppState = CALL_HELD; CodecIdCheck(a, plci); start_internal_command (Id, plci, hold_save_command); } break; case CALL_RETRIEVE_REJ: cau = parms[7]; if(cau) { i = _L3_CAUSE | cau[2]; if(cau[2]==0) i = 0x3603; } else { i = 0x3603; } PUT_WORD(&SS_Ind[1],S_RETRIEVE); PUT_WORD(&SS_Ind[4],i); if(plci->SuppState == RETRIEVE_REQUEST) { plci->SuppState = CALL_HELD; CodecIdCheck(a, plci); sendf(plci->appl,_FACILITY_I,Id,0,"ws",3, SS_Ind); } break; case CALL_RETRIEVE_ACK: PUT_WORD(&SS_Ind[1],S_RETRIEVE); if(plci->SuppState == RETRIEVE_REQUEST) { plci->SuppState = IDLE; plci->call_dir |= CALL_DIR_FORCE_OUTG_NL; plci->b_channel = esc_chi[esc_chi[0]]&0x1f; if(plci->tel) { mixer_set_bchannel_id_esc (plci, plci->b_channel); dbug(1,dprintf("RetrChannel=0x%x",plci->b_channel)); SetVoiceChannel(a->AdvCodecPLCI, esc_chi, a); if(plci->B2_prot==B2_TRANSPARENT && plci->B3_prot==B3_TRANSPARENT) { dbug(1,dprintf("Get B-ch")); start_internal_command (Id, plci, retrieve_restore_command); } else sendf(plci->appl,_FACILITY_I,Id,0,"ws",3, SS_Ind); } else start_internal_command (Id, plci, retrieve_restore_command); } break; case INDICATE_IND: if(plci->State != LISTENING) { sig_req(plci,HANGUP,0); send_req(plci); break; } cip = find_cip(a,parms[4],parms[6]); cip_mask = 1L<<cip; dbug(1,dprintf("cip=%d,cip_mask=%lx",cip,cip_mask)); clear_c_ind_mask (plci); if (!remove_started && !a->adapter_disabled) { set_c_ind_mask_bit (plci, MAX_APPL); group_optimization(a, plci); for(i=0; i<max_appl; i++) { if(application[i].Id && (a->CIP_Mask[i]&1 || a->CIP_Mask[i]&cip_mask) && CPN_filter_ok(parms[0],a,i) && test_group_ind_mask_bit (plci, i) ) { dbug(1,dprintf("storedcip_mask[%d]=0x%lx",i,a->CIP_Mask[i] )); set_c_ind_mask_bit (plci, i); dump_c_ind_mask (plci); plci->State = INC_CON_PENDING; plci->call_dir = (plci->call_dir & ~(CALL_DIR_OUT | CALL_DIR_ORIGINATE)) | CALL_DIR_IN | CALL_DIR_ANSWER; if(esc_chi[0]) { plci->b_channel = esc_chi[esc_chi[0]]&0x1f; mixer_set_bchannel_id_esc (plci, plci->b_channel); } /* if a listen on the ext controller is done, check if hook states */ /* are supported or if just a on board codec must be activated */ if(a->codec_listen[i] && !a->AdvSignalPLCI) { if(a->profile.Global_Options & HANDSET) plci->tel = ADV_VOICE; else if(a->profile.Global_Options & ON_BOARD_CODEC) plci->tel = CODEC; if(plci->tel) Id|=EXT_CONTROLLER; a->codec_listen[i] = plci; } sendf(&application[i],_CONNECT_I,Id,0, "wSSSSSSSbSSSSS", cip, /* CIP */ parms[0], /* CalledPartyNumber */ multi_CiPN_parms[0], /* CallingPartyNumber */ parms[2], /* CalledPartySubad */ parms[3], /* CallingPartySubad */ parms[4], /* BearerCapability */ parms[5], /* LowLC */ parms[6], /* HighLC */ ai_len, /* nested struct add_i */ add_i[0], /* B channel info */ add_i[1], /* keypad facility */ add_i[2], /* user user data */ add_i[3], /* nested facility */ multi_CiPN_parms[1] /* second CiPN(SCR) */ ); SendSSExtInd(&application[i], plci, Id, multi_ssext_parms); SendSetupInfo(&application[i], plci, Id, parms, SendMultiIE(plci,Id,multi_pi_parms, PI, 0x210, true)); } } clear_c_ind_mask_bit (plci, MAX_APPL); dump_c_ind_mask (plci); } if(c_ind_mask_empty (plci)) { sig_req(plci,HANGUP,0); send_req(plci); plci->State = IDLE; } plci->notifiedcall = 0; a->listen_active--; listen_check(a); break; case CALL_PEND_NOTIFY: plci->notifiedcall = 1; listen_check(a); break; case CALL_IND: case CALL_CON: if(plci->State==ADVANCED_VOICE_SIG || plci->State==ADVANCED_VOICE_NOSIG) { if(plci->internal_command==PERM_COD_CONN_PEND) { if(plci->State==ADVANCED_VOICE_NOSIG) { dbug(1,dprintf("***Codec OK")); if(a->AdvSignalPLCI) { tplci = a->AdvSignalPLCI; if(tplci->spoofed_msg) { dbug(1,dprintf("***Spoofed Msg(0x%x)",tplci->spoofed_msg)); tplci->command = 0; tplci->internal_command = 0; x_Id = ((word)tplci->Id<<8)|tplci->adapter->Id | 0x80; switch (tplci->spoofed_msg) { case CALL_RES: tplci->command = _CONNECT_I|RESPONSE; api_load_msg (&tplci->saved_msg, saved_parms); add_b1(tplci,&saved_parms[1],0,tplci->B1_facilities); if (tplci->adapter->Info_Mask[tplci->appl->Id-1] & 0x200) { /* early B3 connect (CIP mask bit 9) no release after a disc */ add_p(tplci,LLI,"\x01\x01"); } add_s(tplci, CONN_NR, &saved_parms[2]); add_s(tplci, LLC, &saved_parms[4]); add_ai(tplci, &saved_parms[5]); tplci->State = INC_CON_ACCEPT; sig_req(tplci, CALL_RES,0); send_req(tplci); break; case AWAITING_SELECT_B: dbug(1,dprintf("Select_B continue")); start_internal_command (x_Id, tplci, select_b_command); break; case AWAITING_MANUF_CON: /* Get_Plci per Manufacturer_Req to ext controller */ if(!tplci->Sig.Id) { dbug(1,dprintf("No SigID!")); sendf(tplci->appl, _MANUFACTURER_R|CONFIRM,x_Id,tplci->number, "dww",_DI_MANU_ID,_MANUFACTURER_R,_OUT_OF_PLCI); plci_remove(tplci); break; } tplci->command = _MANUFACTURER_R; api_load_msg (&tplci->saved_msg, saved_parms); dir = saved_parms[2].info[0]; if(dir==1) { sig_req(tplci,CALL_REQ,0); } else if(!dir){ sig_req(tplci,LISTEN_REQ,0); } send_req(tplci); sendf(tplci->appl, _MANUFACTURER_R|CONFIRM,x_Id,tplci->number, "dww",_DI_MANU_ID,_MANUFACTURER_R,0); break; case (CALL_REQ|AWAITING_MANUF_CON): sig_req(tplci,CALL_REQ,0); send_req(tplci); break; case CALL_REQ: if(!tplci->Sig.Id) { dbug(1,dprintf("No SigID!")); sendf(tplci->appl,_CONNECT_R|CONFIRM,tplci->adapter->Id,0,"w",_OUT_OF_PLCI); plci_remove(tplci); break; } tplci->command = _CONNECT_R; api_load_msg (&tplci->saved_msg, saved_parms); add_s(tplci,CPN,&saved_parms[1]); add_s(tplci,DSA,&saved_parms[3]); add_ai(tplci,&saved_parms[9]); sig_req(tplci,CALL_REQ,0); send_req(tplci); break; case CALL_RETRIEVE: tplci->command = C_RETRIEVE_REQ; sig_req(tplci,CALL_RETRIEVE,0); send_req(tplci); break; } tplci->spoofed_msg = 0; if (tplci->internal_command == 0) next_internal_command (x_Id, tplci); } } next_internal_command (Id, plci); break; } dbug(1,dprintf("***Codec Hook Init Req")); plci->internal_command = PERM_COD_HOOK; add_p(plci,FTY,"\x01\x09"); /* Get Hook State*/ sig_req(plci,TEL_CTRL,0); send_req(plci); } } else if(plci->command != _MANUFACTURER_R /* old style permanent connect */ && plci->State!=INC_ACT_PENDING) { mixer_set_bchannel_id_esc (plci, plci->b_channel); if(plci->tel == ADV_VOICE && plci->SuppState == IDLE) /* with permanent codec switch on immediately */ { chi[2] = plci->b_channel; SetVoiceChannel(a->AdvCodecPLCI, chi, a); } sendf(plci->appl,_CONNECT_ACTIVE_I,Id,0,"Sss",parms[21],"",""); plci->State = INC_ACT_PENDING; } break; case TEL_CTRL: Number = 0; ie = multi_fac_parms[0]; /* inspect the facility hook indications */ if(plci->State==ADVANCED_VOICE_SIG && ie[0]){ switch (ie[1]&0x91) { case 0x80: /* hook off */ case 0x81: if(plci->internal_command==PERM_COD_HOOK) { dbug(1,dprintf("init:hook_off")); plci->hook_state = ie[1]; next_internal_command (Id, plci); break; } else /* ignore doubled hook indications */ { if( ((plci->hook_state)&0xf0)==0x80) { dbug(1,dprintf("ignore hook")); break; } plci->hook_state = ie[1]&0x91; } /* check for incoming call pending */ /* and signal '+'.Appl must decide */ /* with connect_res if call must */ /* accepted or not */ for(i=0, tplci=NULL;i<max_appl;i++){ if(a->codec_listen[i] && (a->codec_listen[i]->State==INC_CON_PENDING ||a->codec_listen[i]->State==INC_CON_ALERT) ){ tplci = a->codec_listen[i]; tplci->appl = &application[i]; } } /* no incoming call, do outgoing call */ /* and signal '+' if outg. setup */ if(!a->AdvSignalPLCI && !tplci){ if((i=get_plci(a))) { a->AdvSignalPLCI = &a->plci[i-1]; tplci = a->AdvSignalPLCI; tplci->tel = ADV_VOICE; PUT_WORD(&voice_cai[5],a->AdvSignalAppl->MaxDataLength); if (a->Info_Mask[a->AdvSignalAppl->Id-1] & 0x200){ /* early B3 connect (CIP mask bit 9) no release after a disc */ add_p(tplci,LLI,"\x01\x01"); } add_p(tplci, CAI, voice_cai); add_p(tplci, OAD, a->TelOAD); add_p(tplci, OSA, a->TelOSA); add_p(tplci,SHIFT|6,NULL); add_p(tplci,SIN,"\x02\x01\x00"); add_p(tplci,UID,"\x06\x43\x61\x70\x69\x32\x30"); sig_req(tplci,ASSIGN,DSIG_ID); a->AdvSignalPLCI->internal_command = HOOK_OFF_REQ; a->AdvSignalPLCI->command = 0; tplci->appl = a->AdvSignalAppl; tplci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE; send_req(tplci); } } if(!tplci) break; Id = ((word)tplci->Id<<8)|a->Id; Id|=EXT_CONTROLLER; sendf(tplci->appl, _FACILITY_I, Id, 0, "ws", (word)0, "\x01+"); break; case 0x90: /* hook on */ case 0x91: if(plci->internal_command==PERM_COD_HOOK) { dbug(1,dprintf("init:hook_on")); plci->hook_state = ie[1]&0x91; next_internal_command (Id, plci); break; } else /* ignore doubled hook indications */ { if( ((plci->hook_state)&0xf0)==0x90) break; plci->hook_state = ie[1]&0x91; } /* hangup the adv. voice call and signal '-' to the appl */ if(a->AdvSignalPLCI) { Id = ((word)a->AdvSignalPLCI->Id<<8)|a->Id; if(plci->tel) Id|=EXT_CONTROLLER; sendf(a->AdvSignalAppl, _FACILITY_I, Id, 0, "ws", (word)0, "\x01-"); a->AdvSignalPLCI->internal_command = HOOK_ON_REQ; a->AdvSignalPLCI->command = 0; sig_req(a->AdvSignalPLCI,HANGUP,0); send_req(a->AdvSignalPLCI); } break; } } break; case RESUME: clear_c_ind_mask_bit (plci, (word)(plci->appl->Id-1)); PUT_WORD(&resume_cau[4],GOOD); sendf(plci->appl,_FACILITY_I,Id,0,"ws", (word)3, resume_cau); break; case SUSPEND: clear_c_ind_mask (plci); if (plci->NL.Id && !plci->nl_remove_id) { mixer_remove (plci); nl_req_ncci(plci,REMOVE,0); } if (!plci->sig_remove_id) { plci->internal_command = 0; sig_req(plci,REMOVE,0); } send_req(plci); if(!plci->channels) { sendf(plci->appl,_FACILITY_I,Id,0,"ws", (word)3, "\x05\x04\x00\x02\x00\x00"); sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", 0); } break; case SUSPEND_REJ: break; case HANGUP: plci->hangup_flow_ctrl_timer=0; if(plci->manufacturer && plci->State==LOCAL_CONNECT) break; cau = parms[7]; if(cau) { i = _L3_CAUSE | cau[2]; if(cau[2]==0) i = 0; else if(cau[2]==8) i = _L1_ERROR; else if(cau[2]==9 || cau[2]==10) i = _L2_ERROR; else if(cau[2]==5) i = _CAPI_GUARD_ERROR; } else { i = _L3_ERROR; } if(plci->State==INC_CON_PENDING || plci->State==INC_CON_ALERT) { for(i=0; i<max_appl; i++) { if(test_c_ind_mask_bit (plci, i)) sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); } } else { clear_c_ind_mask (plci); } if(!plci->appl) { if (plci->State == LISTENING) { plci->notifiedcall=0; a->listen_active--; } plci->State = INC_DIS_PENDING; if(c_ind_mask_empty (plci)) { plci->State = IDLE; if (plci->NL.Id && !plci->nl_remove_id) { mixer_remove (plci); nl_req_ncci(plci,REMOVE,0); } if (!plci->sig_remove_id) { plci->internal_command = 0; sig_req(plci,REMOVE,0); } send_req(plci); } } else { /* collision of DISCONNECT or CONNECT_RES with HANGUP can */ /* result in a second HANGUP! Don't generate another */ /* DISCONNECT */ if(plci->State!=IDLE && plci->State!=INC_DIS_PENDING) { if(plci->State==RESUMING) { PUT_WORD(&resume_cau[4],i); sendf(plci->appl,_FACILITY_I,Id,0,"ws", (word)3, resume_cau); } plci->State = INC_DIS_PENDING; sendf(plci->appl,_DISCONNECT_I,Id,0,"w",i); } } break; case SSEXT_IND: SendSSExtInd(NULL,plci,Id,multi_ssext_parms); break; case VSWITCH_REQ: VSwitchReqInd(plci,Id,multi_vswitch_parms); break; case VSWITCH_IND: if(plci->relatedPTYPLCI && plci->vswitchstate==3 && plci->relatedPTYPLCI->vswitchstate==3 && parms[MAXPARMSIDS-1][0]) { add_p(plci->relatedPTYPLCI,SMSG,parms[MAXPARMSIDS-1]); sig_req(plci->relatedPTYPLCI,VSWITCH_REQ,0); send_req(plci->relatedPTYPLCI); } else VSwitchReqInd(plci,Id,multi_vswitch_parms); break; } } static void SendSetupInfo(APPL * appl, PLCI * plci, dword Id, byte * * parms, byte Info_Sent_Flag) { word i; byte * ie; word Info_Number; byte * Info_Element; word Info_Mask = 0; dbug(1,dprintf("SetupInfo")); for(i=0; i<MAXPARMSIDS; i++) { ie = parms[i]; Info_Number = 0; Info_Element = ie; if(ie[0]) { switch(i) { case 0: dbug(1,dprintf("CPN ")); Info_Number = 0x0070; Info_Mask = 0x80; Info_Sent_Flag = true; break; case 8: /* display */ dbug(1,dprintf("display(%d)",i)); Info_Number = 0x0028; Info_Mask = 0x04; Info_Sent_Flag = true; break; case 16: /* Channel Id */ dbug(1,dprintf("CHI")); Info_Number = 0x0018; Info_Mask = 0x100; Info_Sent_Flag = true; mixer_set_bchannel_id (plci, Info_Element); break; case 19: /* Redirected Number */ dbug(1,dprintf("RDN")); Info_Number = 0x0074; Info_Mask = 0x400; Info_Sent_Flag = true; break; case 20: /* Redirected Number extended */ dbug(1,dprintf("RDX")); Info_Number = 0x0073; Info_Mask = 0x400; Info_Sent_Flag = true; break; case 22: /* Redirecing Number */ dbug(1,dprintf("RIN")); Info_Number = 0x0076; Info_Mask = 0x400; Info_Sent_Flag = true; break; default: Info_Number = 0; break; } } if(i==MAXPARMSIDS-2){ /* to indicate the message type "Setup" */ Info_Number = 0x8000 |5; Info_Mask = 0x10; Info_Element = ""; } if(Info_Sent_Flag && Info_Number){ if(plci->adapter->Info_Mask[appl->Id-1] & Info_Mask) { sendf(appl,_INFO_I,Id,0,"wS",Info_Number,Info_Element); } } } } static void SendInfo(PLCI *plci, dword Id, byte **parms, byte iesent) { word i; word j; word k; byte * ie; word Info_Number; byte * Info_Element; word Info_Mask = 0; static byte charges[5] = {4,0,0,0,0}; static byte cause[] = {0x02,0x80,0x00}; APPL *appl; dbug(1,dprintf("InfoParse ")); if( !plci->appl && !plci->State && plci->Sig.Ind!=NCR_FACILITY ) { dbug(1,dprintf("NoParse ")); return; } cause[2] = 0; for(i=0; i<MAXPARMSIDS; i++) { ie = parms[i]; Info_Number = 0; Info_Element = ie; if(ie[0]) { switch(i) { case 0: dbug(1,dprintf("CPN ")); Info_Number = 0x0070; Info_Mask = 0x80; break; case 7: /* ESC_CAU */ dbug(1,dprintf("cau(0x%x)",ie[2])); Info_Number = 0x0008; Info_Mask = 0x00; cause[2] = ie[2]; Info_Element = NULL; break; case 8: /* display */ dbug(1,dprintf("display(%d)",i)); Info_Number = 0x0028; Info_Mask = 0x04; break; case 9: /* Date display */ dbug(1,dprintf("date(%d)",i)); Info_Number = 0x0029; Info_Mask = 0x02; break; case 10: /* charges */ for(j=0;j<4;j++) charges[1+j] = 0; for(j=0; j<ie[0] && !(ie[1+j]&0x80); j++); for(k=1,j++; j<ie[0] && k<=4; j++,k++) charges[k] = ie[1+j]; Info_Number = 0x4000; Info_Mask = 0x40; Info_Element = charges; break; case 11: /* user user info */ dbug(1,dprintf("uui")); Info_Number = 0x007E; Info_Mask = 0x08; break; case 12: /* congestion receiver ready */ dbug(1,dprintf("clRDY")); Info_Number = 0x00B0; Info_Mask = 0x08; Info_Element = ""; break; case 13: /* congestion receiver not ready */ dbug(1,dprintf("clNRDY")); Info_Number = 0x00BF; Info_Mask = 0x08; Info_Element = ""; break; case 15: /* Keypad Facility */ dbug(1,dprintf("KEY")); Info_Number = 0x002C; Info_Mask = 0x20; break; case 16: /* Channel Id */ dbug(1,dprintf("CHI")); Info_Number = 0x0018; Info_Mask = 0x100; mixer_set_bchannel_id (plci, Info_Element); break; case 17: /* if no 1tr6 cause, send full cause, else esc_cause */ dbug(1,dprintf("q9cau(0x%x)",ie[2])); if(!cause[2] || cause[2]<0x80) break; /* eg. layer 1 error */ Info_Number = 0x0008; Info_Mask = 0x01; if(cause[2] != ie[2]) Info_Element = cause; break; case 19: /* Redirected Number */ dbug(1,dprintf("RDN")); Info_Number = 0x0074; Info_Mask = 0x400; break; case 22: /* Redirecing Number */ dbug(1,dprintf("RIN")); Info_Number = 0x0076; Info_Mask = 0x400; break; case 23: /* Notification Indicator */ dbug(1,dprintf("NI")); Info_Number = (word)NI; Info_Mask = 0x210; break; case 26: /* Call State */ dbug(1,dprintf("CST")); Info_Number = (word)CST; Info_Mask = 0x01; /* do with cause i.e. for now */ break; case MAXPARMSIDS-2: /* Escape Message Type, must be the last indication */ dbug(1,dprintf("ESC/MT[0x%x]",ie[3])); Info_Number = 0x8000 |ie[3]; if(iesent) Info_Mask = 0xffff; else Info_Mask = 0x10; Info_Element = ""; break; default: Info_Number = 0; Info_Mask = 0; Info_Element = ""; break; } } if(plci->Sig.Ind==NCR_FACILITY) /* check controller broadcast */ { for(j=0; j<max_appl; j++) { appl = &application[j]; if(Info_Number && appl->Id && plci->adapter->Info_Mask[appl->Id-1] &Info_Mask) { dbug(1,dprintf("NCR_Ind")); iesent=true; sendf(&application[j],_INFO_I,Id&0x0f,0,"wS",Info_Number,Info_Element); } } } else if(!plci->appl) { /* overlap receiving broadcast */ if(Info_Number==CPN || Info_Number==KEY || Info_Number==NI || Info_Number==DSP || Info_Number==UUI ) { for(j=0; j<max_appl; j++) { if(test_c_ind_mask_bit (plci, j)) { dbug(1,dprintf("Ovl_Ind")); iesent=true; sendf(&application[j],_INFO_I,Id,0,"wS",Info_Number,Info_Element); } } } } /* all other signalling states */ else if(Info_Number && plci->adapter->Info_Mask[plci->appl->Id-1] &Info_Mask) { dbug(1,dprintf("Std_Ind")); iesent=true; sendf(plci->appl,_INFO_I,Id,0,"wS",Info_Number,Info_Element); } } } static byte SendMultiIE(PLCI *plci, dword Id, byte **parms, byte ie_type, dword info_mask, byte setupParse) { word i; word j; byte * ie; word Info_Number; byte * Info_Element; APPL *appl; word Info_Mask = 0; byte iesent=0; if( !plci->appl && !plci->State && plci->Sig.Ind!=NCR_FACILITY && !setupParse ) { dbug(1,dprintf("NoM-IEParse ")); return 0; } dbug(1,dprintf("M-IEParse ")); for(i=0; i<MAX_MULTI_IE; i++) { ie = parms[i]; Info_Number = 0; Info_Element = ie; if(ie[0]) { dbug(1,dprintf("[Ind0x%x]:IE=0x%x",plci->Sig.Ind,ie_type)); Info_Number = (word)ie_type; Info_Mask = (word)info_mask; } if(plci->Sig.Ind==NCR_FACILITY) /* check controller broadcast */ { for(j=0; j<max_appl; j++) { appl = &application[j]; if(Info_Number && appl->Id && plci->adapter->Info_Mask[appl->Id-1] &Info_Mask) { iesent = true; dbug(1,dprintf("Mlt_NCR_Ind")); sendf(&application[j],_INFO_I,Id&0x0f,0,"wS",Info_Number,Info_Element); } } } else if(!plci->appl && Info_Number) { /* overlap receiving broadcast */ for(j=0; j<max_appl; j++) { if(test_c_ind_mask_bit (plci, j)) { iesent = true; dbug(1,dprintf("Mlt_Ovl_Ind")); sendf(&application[j],_INFO_I,Id,0,"wS",Info_Number,Info_Element); } } } /* all other signalling states */ else if(Info_Number && plci->adapter->Info_Mask[plci->appl->Id-1] &Info_Mask) { iesent = true; dbug(1,dprintf("Mlt_Std_Ind")); sendf(plci->appl,_INFO_I,Id,0,"wS",Info_Number,Info_Element); } } return iesent; } static void SendSSExtInd(APPL * appl, PLCI * plci, dword Id, byte * * parms) { word i; /* Format of multi_ssext_parms[i][]: 0 byte length 1 byte SSEXTIE 2 byte SSEXT_REQ/SSEXT_IND 3 byte length 4 word SSExtCommand 6... Params */ if( plci && plci->State && plci->Sig.Ind!=NCR_FACILITY ) for(i=0;i<MAX_MULTI_IE;i++) { if(parms[i][0]<6) continue; if(parms[i][2]==SSEXT_REQ) continue; if(appl) { parms[i][0]=0; /* kill it */ sendf(appl,_MANUFACTURER_I, Id, 0, "dwS", _DI_MANU_ID, _DI_SSEXT_CTRL, &parms[i][3]); } else if(plci->appl) { parms[i][0]=0; /* kill it */ sendf(plci->appl,_MANUFACTURER_I, Id, 0, "dwS", _DI_MANU_ID, _DI_SSEXT_CTRL, &parms[i][3]); } } }; static void nl_ind(PLCI *plci) { byte ch; word ncci; dword Id; DIVA_CAPI_ADAPTER * a; word NCCIcode; APPL * APPLptr; word count; word Num; word i, ncpi_state; byte len, ncci_state; word msg; word info = 0; word fax_feature_bits; byte fax_send_edata_ack; static byte v120_header_buffer[2 + 3]; static word fax_info[] = { 0, /* T30_SUCCESS */ _FAX_NO_CONNECTION, /* T30_ERR_NO_DIS_RECEIVED */ _FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_NO_RESPONSE */ _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_RESPONSE */ _FAX_PROTOCOL_ERROR, /* T30_ERR_TOO_MANY_REPEATS */ _FAX_PROTOCOL_ERROR, /* T30_ERR_UNEXPECTED_MESSAGE */ _FAX_REMOTE_ABORT, /* T30_ERR_UNEXPECTED_DCN */ _FAX_LOCAL_ABORT, /* T30_ERR_DTC_UNSUPPORTED */ _FAX_TRAINING_ERROR, /* T30_ERR_ALL_RATES_FAILED */ _FAX_TRAINING_ERROR, /* T30_ERR_TOO_MANY_TRAINS */ _FAX_PARAMETER_ERROR, /* T30_ERR_RECEIVE_CORRUPTED */ _FAX_REMOTE_ABORT, /* T30_ERR_UNEXPECTED_DISC */ _FAX_LOCAL_ABORT, /* T30_ERR_APPLICATION_DISC */ _FAX_REMOTE_REJECT, /* T30_ERR_INCOMPATIBLE_DIS */ _FAX_LOCAL_ABORT, /* T30_ERR_INCOMPATIBLE_DCS */ _FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_NO_COMMAND */ _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_COMMAND */ _FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_COMMAND_TOO_LONG */ _FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_RESPONSE_TOO_LONG */ _FAX_NO_CONNECTION, /* T30_ERR_NOT_IDENTIFIED */ _FAX_PROTOCOL_ERROR, /* T30_ERR_SUPERVISORY_TIMEOUT */ _FAX_PARAMETER_ERROR, /* T30_ERR_TOO_LONG_SCAN_LINE */ _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_PAGE_AFTER_MPS */ _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_PAGE_AFTER_CFR */ _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCS_AFTER_FTT */ _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCS_AFTER_EOM */ _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCS_AFTER_MPS */ _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCN_AFTER_MCF */ _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCN_AFTER_RTN */ _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_CFR */ _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_MCF_AFTER_EOP */ _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_MCF_AFTER_EOM */ _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_MCF_AFTER_MPS */ 0x331d, /* T30_ERR_SUB_SEP_UNSUPPORTED */ 0x331e, /* T30_ERR_PWD_UNSUPPORTED */ 0x331f, /* T30_ERR_SUB_SEP_PWD_UNSUPPORTED */ _FAX_PROTOCOL_ERROR, /* T30_ERR_INVALID_COMMAND_FRAME */ _FAX_PARAMETER_ERROR, /* T30_ERR_UNSUPPORTED_PAGE_CODING */ _FAX_PARAMETER_ERROR, /* T30_ERR_INVALID_PAGE_CODING */ _FAX_REMOTE_REJECT, /* T30_ERR_INCOMPATIBLE_PAGE_CONFIG */ _FAX_LOCAL_ABORT, /* T30_ERR_TIMEOUT_FROM_APPLICATION */ _FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_NO_REACTION_ON_MARK */ _FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_TRAINING_TIMEOUT */ _FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_UNEXPECTED_V21 */ _FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_PRIMARY_CTS_ON */ _FAX_LOCAL_ABORT, /* T30_ERR_V34FAX_TURNAROUND_POLLING */ _FAX_LOCAL_ABORT /* T30_ERR_V34FAX_V8_INCOMPATIBILITY */ }; byte dtmf_code_buffer[CAPIDTMF_RECV_DIGIT_BUFFER_SIZE + 1]; static word rtp_info[] = { GOOD, /* RTP_SUCCESS */ 0x3600 /* RTP_ERR_SSRC_OR_PAYLOAD_CHANGE */ }; static dword udata_forwarding_table[0x100 / sizeof(dword)] = { 0x0020301e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; ch = plci->NL.IndCh; a = plci->adapter; ncci = a->ch_ncci[ch]; Id = (((dword)(ncci ? ncci : ch)) << 16) | (((word) plci->Id) << 8) | a->Id; if(plci->tel) Id|=EXT_CONTROLLER; APPLptr = plci->appl; dbug(1,dprintf("NL_IND-Id(NL:0x%x)=0x%08lx,plci=%x,tel=%x,state=0x%x,ch=0x%x,chs=%d,Ind=%x", plci->NL.Id,Id,plci->Id,plci->tel,plci->State,ch,plci->channels,plci->NL.Ind &0x0f)); /* in the case if no connect_active_Ind was sent to the appl we wait for */ if (plci->nl_remove_id) { plci->NL.RNR = 2; /* discard */ dbug(1,dprintf("NL discard while remove pending")); return; } if((plci->NL.Ind &0x0f)==N_CONNECT) { if(plci->State==INC_DIS_PENDING || plci->State==OUTG_DIS_PENDING || plci->State==IDLE) { plci->NL.RNR = 2; /* discard */ dbug(1,dprintf("discard n_connect")); return; } if(plci->State < INC_ACT_PENDING) { plci->NL.RNR = 1; /* flow control */ channel_x_off (plci, ch, N_XON_CONNECT_IND); return; } } if(!APPLptr) /* no application or invalid data */ { /* while reloading the DSP */ dbug(1,dprintf("discard1")); plci->NL.RNR = 2; return; } if (((plci->NL.Ind &0x0f) == N_UDATA) && (((plci->B2_prot != B2_SDLC) && ((plci->B1_resource == 17) || (plci->B1_resource == 18))) || (plci->B2_prot == 7) || (plci->B3_prot == 7)) ) { plci->ncpi_buffer[0] = 0; ncpi_state = plci->ncpi_state; if (plci->NL.complete == 1) { byte * data = &plci->NL.RBuffer->P[0]; if ((plci->NL.RBuffer->length >= 12) &&( (*data == DSP_UDATA_INDICATION_DCD_ON) ||(*data == DSP_UDATA_INDICATION_CTS_ON)) ) { word conn_opt, ncpi_opt = 0x00; /* HexDump ("MDM N_UDATA:", plci->NL.RBuffer->length, data); */ if (*data == DSP_UDATA_INDICATION_DCD_ON) plci->ncpi_state |= NCPI_MDM_DCD_ON_RECEIVED; if (*data == DSP_UDATA_INDICATION_CTS_ON) plci->ncpi_state |= NCPI_MDM_CTS_ON_RECEIVED; data++; /* indication code */ data += 2; /* timestamp */ if ((*data == DSP_CONNECTED_NORM_V18) || (*data == DSP_CONNECTED_NORM_VOWN)) ncpi_state &= ~(NCPI_MDM_DCD_ON_RECEIVED | NCPI_MDM_CTS_ON_RECEIVED); data++; /* connected norm */ conn_opt = GET_WORD(data); data += 2; /* connected options */ PUT_WORD (&(plci->ncpi_buffer[1]), (word)(GET_DWORD(data) & 0x0000FFFF)); if (conn_opt & DSP_CONNECTED_OPTION_MASK_V42) { ncpi_opt |= MDM_NCPI_ECM_V42; } else if (conn_opt & DSP_CONNECTED_OPTION_MASK_MNP) { ncpi_opt |= MDM_NCPI_ECM_MNP; } else { ncpi_opt |= MDM_NCPI_TRANSPARENT; } if (conn_opt & DSP_CONNECTED_OPTION_MASK_COMPRESSION) { ncpi_opt |= MDM_NCPI_COMPRESSED; } PUT_WORD (&(plci->ncpi_buffer[3]), ncpi_opt); plci->ncpi_buffer[0] = 4; plci->ncpi_state |= NCPI_VALID_CONNECT_B3_IND | NCPI_VALID_CONNECT_B3_ACT | NCPI_VALID_DISC_B3_IND; } } if (plci->B3_prot == 7) { if (((a->ncci_state[ncci] == INC_ACT_PENDING) || (a->ncci_state[ncci] == OUTG_CON_PENDING)) && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT) && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT)) { a->ncci_state[ncci] = INC_ACT_PENDING; sendf(plci->appl,_CONNECT_B3_ACTIVE_I,Id,0,"S",plci->ncpi_buffer); plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT; } } if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id-1]) & ((1L << PRIVATE_V18) | (1L << PRIVATE_VOWN))) || !(ncpi_state & NCPI_MDM_DCD_ON_RECEIVED) || !(ncpi_state & NCPI_MDM_CTS_ON_RECEIVED)) { plci->NL.RNR = 2; return; } } if(plci->NL.complete == 2) { if (((plci->NL.Ind &0x0f) == N_UDATA) && !(udata_forwarding_table[plci->RData[0].P[0] >> 5] & (1L << (plci->RData[0].P[0] & 0x1f)))) { switch(plci->RData[0].P[0]) { case DTMF_UDATA_INDICATION_FAX_CALLING_TONE: if (plci->dtmf_rec_active & DTMF_LISTEN_ACTIVE_FLAG) sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0,"ws", SELECTOR_DTMF, "\x01X"); break; case DTMF_UDATA_INDICATION_ANSWER_TONE: if (plci->dtmf_rec_active & DTMF_LISTEN_ACTIVE_FLAG) sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0,"ws", SELECTOR_DTMF, "\x01Y"); break; case DTMF_UDATA_INDICATION_DIGITS_RECEIVED: dtmf_indication (Id, plci, plci->RData[0].P, plci->RData[0].PLength); break; case DTMF_UDATA_INDICATION_DIGITS_SENT: dtmf_confirmation (Id, plci); break; case UDATA_INDICATION_MIXER_TAP_DATA: capidtmf_recv_process_block (&(plci->capidtmf_state), plci->RData[0].P + 1, (word)(plci->RData[0].PLength - 1)); i = capidtmf_indication (&(plci->capidtmf_state), dtmf_code_buffer + 1); if (i != 0) { dtmf_code_buffer[0] = DTMF_UDATA_INDICATION_DIGITS_RECEIVED; dtmf_indication (Id, plci, dtmf_code_buffer, (word)(i + 1)); } break; case UDATA_INDICATION_MIXER_COEFS_SET: mixer_indication_coefs_set (Id, plci); break; case UDATA_INDICATION_XCONNECT_FROM: mixer_indication_xconnect_from (Id, plci, plci->RData[0].P, plci->RData[0].PLength); break; case UDATA_INDICATION_XCONNECT_TO: mixer_indication_xconnect_to (Id, plci, plci->RData[0].P, plci->RData[0].PLength); break; case LEC_UDATA_INDICATION_DISABLE_DETECT: ec_indication (Id, plci, plci->RData[0].P, plci->RData[0].PLength); break; default: break; } } else { if ((plci->RData[0].PLength != 0) && ((plci->B2_prot == B2_V120_ASYNC) || (plci->B2_prot == B2_V120_ASYNC_V42BIS) || (plci->B2_prot == B2_V120_BIT_TRANSPARENT))) { sendf(plci->appl,_DATA_B3_I,Id,0, "dwww", plci->RData[1].P, (plci->NL.RNum < 2) ? 0 : plci->RData[1].PLength, plci->RNum, plci->RFlags); } else { sendf(plci->appl,_DATA_B3_I,Id,0, "dwww", plci->RData[0].P, plci->RData[0].PLength, plci->RNum, plci->RFlags); } } return; } fax_feature_bits = 0; if((plci->NL.Ind &0x0f)==N_CONNECT || (plci->NL.Ind &0x0f)==N_CONNECT_ACK || (plci->NL.Ind &0x0f)==N_DISC || (plci->NL.Ind &0x0f)==N_EDATA || (plci->NL.Ind &0x0f)==N_DISC_ACK) { info = 0; plci->ncpi_buffer[0] = 0; switch (plci->B3_prot) { case 0: /*XPARENT*/ case 1: /*T.90 NL*/ break; /* no network control protocol info - jfr */ case 2: /*ISO8202*/ case 3: /*X25 DCE*/ for(i=0; i<plci->NL.RLength; i++) plci->ncpi_buffer[4+i] = plci->NL.RBuffer->P[i]; plci->ncpi_buffer[0] = (byte)(i+3); plci->ncpi_buffer[1] = (byte)(plci->NL.Ind &N_D_BIT? 1:0); plci->ncpi_buffer[2] = 0; plci->ncpi_buffer[3] = 0; break; case 4: /*T.30 - FAX*/ case 5: /*T.30 - FAX*/ if(plci->NL.RLength>=sizeof(T30_INFO)) { dbug(1,dprintf("FaxStatus %04x", ((T30_INFO *)plci->NL.RBuffer->P)->code)); len = 9; PUT_WORD(&(plci->ncpi_buffer[1]),((T30_INFO *)plci->NL.RBuffer->P)->rate_div_2400 * 2400); fax_feature_bits = GET_WORD(&((T30_INFO *)plci->NL.RBuffer->P)->feature_bits_low); i = (((T30_INFO *)plci->NL.RBuffer->P)->resolution & T30_RESOLUTION_R8_0770_OR_200) ? 0x0001 : 0x0000; if (plci->B3_prot == 5) { if (!(fax_feature_bits & T30_FEATURE_BIT_ECM)) i |= 0x8000; /* This is not an ECM connection */ if (fax_feature_bits & T30_FEATURE_BIT_T6_CODING) i |= 0x4000; /* This is a connection with MMR compression */ if (fax_feature_bits & T30_FEATURE_BIT_2D_CODING) i |= 0x2000; /* This is a connection with MR compression */ if (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS) i |= 0x0004; /* More documents */ if (fax_feature_bits & T30_FEATURE_BIT_POLLING) i |= 0x0002; /* Fax-polling indication */ } dbug(1,dprintf("FAX Options %04x %04x",fax_feature_bits,i)); PUT_WORD(&(plci->ncpi_buffer[3]),i); PUT_WORD(&(plci->ncpi_buffer[5]),((T30_INFO *)plci->NL.RBuffer->P)->data_format); plci->ncpi_buffer[7] = ((T30_INFO *)plci->NL.RBuffer->P)->pages_low; plci->ncpi_buffer[8] = ((T30_INFO *)plci->NL.RBuffer->P)->pages_high; plci->ncpi_buffer[len] = 0; if(((T30_INFO *)plci->NL.RBuffer->P)->station_id_len) { plci->ncpi_buffer[len] = 20; for (i = 0; i < T30_MAX_STATION_ID_LENGTH; i++) plci->ncpi_buffer[++len] = ((T30_INFO *)plci->NL.RBuffer->P)->station_id[i]; } if (((plci->NL.Ind & 0x0f) == N_DISC) || ((plci->NL.Ind & 0x0f) == N_DISC_ACK)) { if (((T30_INFO *)plci->NL.RBuffer->P)->code < ARRAY_SIZE(fax_info)) info = fax_info[((T30_INFO *)plci->NL.RBuffer->P)->code]; else info = _FAX_PROTOCOL_ERROR; } if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id-1]) & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD))) { i = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + ((T30_INFO *)plci->NL.RBuffer->P)->head_line_len; while (i < plci->NL.RBuffer->length) plci->ncpi_buffer[++len] = plci->NL.RBuffer->P[i++]; } plci->ncpi_buffer[0] = len; fax_feature_bits = GET_WORD(&((T30_INFO *)plci->NL.RBuffer->P)->feature_bits_low); PUT_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->feature_bits_low, fax_feature_bits); plci->ncpi_state |= NCPI_VALID_CONNECT_B3_IND; if (((plci->NL.Ind &0x0f) == N_CONNECT_ACK) || (((plci->NL.Ind &0x0f) == N_CONNECT) && (fax_feature_bits & T30_FEATURE_BIT_POLLING)) || (((plci->NL.Ind &0x0f) == N_EDATA) && ((((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_TRAIN_OK) || (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DIS) || (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DTC)))) { plci->ncpi_state |= NCPI_VALID_CONNECT_B3_ACT; } if (((plci->NL.Ind &0x0f) == N_DISC) || ((plci->NL.Ind &0x0f) == N_DISC_ACK) || (((plci->NL.Ind &0x0f) == N_EDATA) && (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_EOP_CAPI))) { plci->ncpi_state |= NCPI_VALID_CONNECT_B3_ACT | NCPI_VALID_DISC_B3_IND; } } break; case B3_RTP: if (((plci->NL.Ind & 0x0f) == N_DISC) || ((plci->NL.Ind & 0x0f) == N_DISC_ACK)) { if (plci->NL.RLength != 0) { info = rtp_info[plci->NL.RBuffer->P[0]]; plci->ncpi_buffer[0] = plci->NL.RLength - 1; for (i = 1; i < plci->NL.RLength; i++) plci->ncpi_buffer[i] = plci->NL.RBuffer->P[i]; } } break; } plci->NL.RNR = 2; } switch(plci->NL.Ind &0x0f) { case N_EDATA: if ((plci->B3_prot == 4) || (plci->B3_prot == 5)) { dbug(1,dprintf("EDATA ncci=0x%x state=%d code=%02x", ncci, a->ncci_state[ncci], ((T30_INFO *)plci->NL.RBuffer->P)->code)); fax_send_edata_ack = (((T30_INFO *)(plci->fax_connect_info_buffer))->operating_mode == T30_OPERATING_MODE_CAPI_NEG); if ((plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF) && (plci->nsf_control_bits & (T30_NSF_CONTROL_BIT_NEGOTIATE_IND | T30_NSF_CONTROL_BIT_NEGOTIATE_RESP)) && (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DIS) && (a->ncci_state[ncci] == OUTG_CON_PENDING) && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT) && !(plci->ncpi_state & NCPI_NEGOTIATE_B3_SENT)) { ((T30_INFO *)(plci->fax_connect_info_buffer))->code = ((T30_INFO *)plci->NL.RBuffer->P)->code; sendf(plci->appl,_MANUFACTURER_I,Id,0,"dwbS",_DI_MANU_ID,_DI_NEGOTIATE_B3, (byte)(plci->ncpi_buffer[0] + 1), plci->ncpi_buffer); plci->ncpi_state |= NCPI_NEGOTIATE_B3_SENT; if (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP) fax_send_edata_ack = false; } if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS) { switch (((T30_INFO *)plci->NL.RBuffer->P)->code) { case EDATA_T30_DIS: if ((a->ncci_state[ncci] == OUTG_CON_PENDING) && !(GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low) & T30_CONTROL_BIT_REQUEST_POLLING) && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT) && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT)) { a->ncci_state[ncci] = INC_ACT_PENDING; if (plci->B3_prot == 4) sendf(plci->appl,_CONNECT_B3_ACTIVE_I,Id,0,"s",""); else sendf(plci->appl,_CONNECT_B3_ACTIVE_I,Id,0,"S",plci->ncpi_buffer); plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT; } break; case EDATA_T30_TRAIN_OK: if ((a->ncci_state[ncci] == INC_ACT_PENDING) && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT) && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT)) { if (plci->B3_prot == 4) sendf(plci->appl,_CONNECT_B3_ACTIVE_I,Id,0,"s",""); else sendf(plci->appl,_CONNECT_B3_ACTIVE_I,Id,0,"S",plci->ncpi_buffer); plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT; } break; case EDATA_T30_EOP_CAPI: if (a->ncci_state[ncci] == CONNECTED) { sendf(plci->appl,_DISCONNECT_B3_I,Id,0,"wS",GOOD,plci->ncpi_buffer); a->ncci_state[ncci] = INC_DIS_PENDING; plci->ncpi_state = 0; fax_send_edata_ack = false; } break; } } else { switch (((T30_INFO *)plci->NL.RBuffer->P)->code) { case EDATA_T30_TRAIN_OK: if ((a->ncci_state[ncci] == INC_ACT_PENDING) && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT) && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT)) { if (plci->B3_prot == 4) sendf(plci->appl,_CONNECT_B3_ACTIVE_I,Id,0,"s",""); else sendf(plci->appl,_CONNECT_B3_ACTIVE_I,Id,0,"S",plci->ncpi_buffer); plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT; } break; } } if (fax_send_edata_ack) { ((T30_INFO *)(plci->fax_connect_info_buffer))->code = ((T30_INFO *)plci->NL.RBuffer->P)->code; plci->fax_edata_ack_length = 1; start_internal_command (Id, plci, fax_edata_ack_command); } } else { dbug(1,dprintf("EDATA ncci=0x%x state=%d", ncci, a->ncci_state[ncci])); } break; case N_CONNECT: if (!a->ch_ncci[ch]) { ncci = get_ncci (plci, ch, 0); Id = (Id & 0xffff) | (((dword) ncci) << 16); } dbug(1,dprintf("N_CONNECT: ch=%d state=%d plci=%lx plci_Id=%lx plci_State=%d", ch, a->ncci_state[ncci], a->ncci_plci[ncci], plci->Id, plci->State)); msg = _CONNECT_B3_I; if (a->ncci_state[ncci] == IDLE) plci->channels++; else if (plci->B3_prot == 1) msg = _CONNECT_B3_T90_ACTIVE_I; a->ncci_state[ncci] = INC_CON_PENDING; if(plci->B3_prot == 4) sendf(plci->appl,msg,Id,0,"s",""); else sendf(plci->appl,msg,Id,0,"S",plci->ncpi_buffer); break; case N_CONNECT_ACK: dbug(1,dprintf("N_connect_Ack")); if (plci->internal_command_queue[0] && ((plci->adjust_b_state == ADJUST_B_CONNECT_2) || (plci->adjust_b_state == ADJUST_B_CONNECT_3) || (plci->adjust_b_state == ADJUST_B_CONNECT_4))) { (*(plci->internal_command_queue[0]))(Id, plci, 0); if (!plci->internal_command) next_internal_command (Id, plci); break; } msg = _CONNECT_B3_ACTIVE_I; if (plci->B3_prot == 1) { if (a->ncci_state[ncci] != OUTG_CON_PENDING) msg = _CONNECT_B3_T90_ACTIVE_I; a->ncci_state[ncci] = INC_ACT_PENDING; sendf(plci->appl,msg,Id,0,"S",plci->ncpi_buffer); } else if ((plci->B3_prot == 4) || (plci->B3_prot == 5) || (plci->B3_prot == 7)) { if ((a->ncci_state[ncci] == OUTG_CON_PENDING) && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT) && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT)) { a->ncci_state[ncci] = INC_ACT_PENDING; if (plci->B3_prot == 4) sendf(plci->appl,msg,Id,0,"s",""); else sendf(plci->appl,msg,Id,0,"S",plci->ncpi_buffer); plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT; } } else { a->ncci_state[ncci] = INC_ACT_PENDING; sendf(plci->appl,msg,Id,0,"S",plci->ncpi_buffer); } if (plci->adjust_b_restore) { plci->adjust_b_restore = false; start_internal_command (Id, plci, adjust_b_restore); } break; case N_DISC: case N_DISC_ACK: if (plci->internal_command_queue[0] && ((plci->internal_command == FAX_DISCONNECT_COMMAND_1) || (plci->internal_command == FAX_DISCONNECT_COMMAND_2) || (plci->internal_command == FAX_DISCONNECT_COMMAND_3))) { (*(plci->internal_command_queue[0]))(Id, plci, 0); if (!plci->internal_command) next_internal_command (Id, plci); } ncci_state = a->ncci_state[ncci]; ncci_remove (plci, ncci, false); /* with N_DISC or N_DISC_ACK the IDI frees the respective */ /* channel, so we cannot store the state in ncci_state! The */ /* information which channel we received a N_DISC is thus */ /* stored in the inc_dis_ncci_table buffer. */ for(i=0; plci->inc_dis_ncci_table[i]; i++); plci->inc_dis_ncci_table[i] = (byte) ncci; /* need a connect_b3_ind before a disconnect_b3_ind with FAX */ if (!plci->channels && (plci->B1_resource == 16) && (plci->State <= CONNECTED)) { len = 9; i = ((T30_INFO *)plci->fax_connect_info_buffer)->rate_div_2400 * 2400; PUT_WORD (&plci->ncpi_buffer[1], i); PUT_WORD (&plci->ncpi_buffer[3], 0); i = ((T30_INFO *)plci->fax_connect_info_buffer)->data_format; PUT_WORD (&plci->ncpi_buffer[5], i); PUT_WORD (&plci->ncpi_buffer[7], 0); plci->ncpi_buffer[len] = 0; plci->ncpi_buffer[0] = len; if(plci->B3_prot == 4) sendf(plci->appl,_CONNECT_B3_I,Id,0,"s",""); else { if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id-1]) & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD))) { plci->ncpi_buffer[++len] = 0; plci->ncpi_buffer[++len] = 0; plci->ncpi_buffer[++len] = 0; plci->ncpi_buffer[0] = len; } sendf(plci->appl,_CONNECT_B3_I,Id,0,"S",plci->ncpi_buffer); } sendf(plci->appl,_DISCONNECT_B3_I,Id,0,"wS",info,plci->ncpi_buffer); plci->ncpi_state = 0; sig_req(plci,HANGUP,0); send_req(plci); plci->State = OUTG_DIS_PENDING; /* disc here */ } else if ((a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS) && ((plci->B3_prot == 4) || (plci->B3_prot == 5)) && ((ncci_state == INC_DIS_PENDING) || (ncci_state == IDLE))) { if (ncci_state == IDLE) { if (plci->channels) plci->channels--; if((plci->State==IDLE || plci->State==SUSPENDING) && !plci->channels){ if(plci->State == SUSPENDING){ sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", (word)3, "\x03\x04\x00\x00"); sendf(plci->appl, _DISCONNECT_I, Id & 0xffffL, 0, "w", 0); } plci_remove(plci); plci->State=IDLE; } } } else if (plci->channels) { sendf(plci->appl,_DISCONNECT_B3_I,Id,0,"wS",info,plci->ncpi_buffer); plci->ncpi_state = 0; if ((ncci_state == OUTG_REJ_PENDING) && ((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE))) { sig_req(plci,HANGUP,0); send_req(plci); plci->State = OUTG_DIS_PENDING; } } break; case N_RESET: a->ncci_state[ncci] = INC_RES_PENDING; sendf(plci->appl,_RESET_B3_I,Id,0,"S",plci->ncpi_buffer); break; case N_RESET_ACK: a->ncci_state[ncci] = CONNECTED; sendf(plci->appl,_RESET_B3_I,Id,0,"S",plci->ncpi_buffer); break; case N_UDATA: if (!(udata_forwarding_table[plci->NL.RBuffer->P[0] >> 5] & (1L << (plci->NL.RBuffer->P[0] & 0x1f)))) { plci->RData[0].P = plci->internal_ind_buffer + (-((int)(long)(plci->internal_ind_buffer)) & 3); plci->RData[0].PLength = INTERNAL_IND_BUFFER_SIZE; plci->NL.R = plci->RData; plci->NL.RNum = 1; return; } case N_BDATA: case N_DATA: if (((a->ncci_state[ncci] != CONNECTED) && (plci->B2_prot == 1)) /* transparent */ || (a->ncci_state[ncci] == IDLE) || (a->ncci_state[ncci] == INC_DIS_PENDING)) { plci->NL.RNR = 2; break; } if ((a->ncci_state[ncci] != CONNECTED) && (a->ncci_state[ncci] != OUTG_DIS_PENDING) && (a->ncci_state[ncci] != OUTG_REJ_PENDING)) { dbug(1,dprintf("flow control")); plci->NL.RNR = 1; /* flow control */ channel_x_off (plci, ch, 0); break; } NCCIcode = ncci | (((word)a->Id) << 8); /* count all buffers within the Application pool */ /* belonging to the same NCCI. If this is below the */ /* number of buffers available per NCCI we accept */ /* this packet, otherwise we reject it */ count = 0; Num = 0xffff; for(i=0; i<APPLptr->MaxBuffer; i++) { if(NCCIcode==APPLptr->DataNCCI[i]) count++; if(!APPLptr->DataNCCI[i] && Num==0xffff) Num = i; } if(count>=APPLptr->MaxNCCIData || Num==0xffff) { dbug(3,dprintf("Flow-Control")); plci->NL.RNR = 1; if( ++(APPLptr->NCCIDataFlowCtrlTimer)>= (word)((a->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL) ? 40 : 2000)) { plci->NL.RNR = 2; dbug(3,dprintf("DiscardData")); } else { channel_x_off (plci, ch, 0); } break; } else { APPLptr->NCCIDataFlowCtrlTimer = 0; } plci->RData[0].P = ReceiveBufferGet(APPLptr,Num); if(!plci->RData[0].P) { plci->NL.RNR = 1; channel_x_off (plci, ch, 0); break; } APPLptr->DataNCCI[Num] = NCCIcode; APPLptr->DataFlags[Num] = (plci->Id<<8) | (plci->NL.Ind>>4); dbug(3,dprintf("Buffer(%d), Max = %d",Num,APPLptr->MaxBuffer)); plci->RNum = Num; plci->RFlags = plci->NL.Ind>>4; plci->RData[0].PLength = APPLptr->MaxDataLength; plci->NL.R = plci->RData; if ((plci->NL.RLength != 0) && ((plci->B2_prot == B2_V120_ASYNC) || (plci->B2_prot == B2_V120_ASYNC_V42BIS) || (plci->B2_prot == B2_V120_BIT_TRANSPARENT))) { plci->RData[1].P = plci->RData[0].P; plci->RData[1].PLength = plci->RData[0].PLength; plci->RData[0].P = v120_header_buffer + (-((unsigned long)v120_header_buffer) & 3); if ((plci->NL.RBuffer->P[0] & V120_HEADER_EXTEND_BIT) || (plci->NL.RLength == 1)) plci->RData[0].PLength = 1; else plci->RData[0].PLength = 2; if (plci->NL.RBuffer->P[0] & V120_HEADER_BREAK_BIT) plci->RFlags |= 0x0010; if (plci->NL.RBuffer->P[0] & (V120_HEADER_C1_BIT | V120_HEADER_C2_BIT)) plci->RFlags |= 0x8000; plci->NL.RNum = 2; } else { if((plci->NL.Ind &0x0f)==N_UDATA) plci->RFlags |= 0x0010; else if ((plci->B3_prot == B3_RTP) && ((plci->NL.Ind & 0x0f) == N_BDATA)) plci->RFlags |= 0x0001; plci->NL.RNum = 1; } break; case N_DATA_ACK: data_ack (plci, ch); break; default: plci->NL.RNR = 2; break; } } /*------------------------------------------------------------------*/ /* find a free PLCI */ /*------------------------------------------------------------------*/ static word get_plci(DIVA_CAPI_ADAPTER *a) { word i,j; PLCI * plci; dump_plcis (a); for(i=0;i<a->max_plci && a->plci[i].Id;i++); if(i==a->max_plci) { dbug(1,dprintf("get_plci: out of PLCIs")); return 0; } plci = &a->plci[i]; plci->Id = (byte)(i+1); plci->Sig.Id = 0; plci->NL.Id = 0; plci->sig_req = 0; plci->nl_req = 0; plci->appl = NULL; plci->relatedPTYPLCI = NULL; plci->State = IDLE; plci->SuppState = IDLE; plci->channels = 0; plci->tel = 0; plci->B1_resource = 0; plci->B2_prot = 0; plci->B3_prot = 0; plci->command = 0; plci->m_command = 0; init_internal_command_queue (plci); plci->number = 0; plci->req_in_start = 0; plci->req_in = 0; plci->req_out = 0; plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE; plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE; plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE; plci->data_sent = false; plci->send_disc = 0; plci->sig_global_req = 0; plci->sig_remove_id = 0; plci->nl_global_req = 0; plci->nl_remove_id = 0; plci->adv_nl = 0; plci->manufacturer = false; plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE; plci->spoofed_msg = 0; plci->ptyState = 0; plci->cr_enquiry = false; plci->hangup_flow_ctrl_timer = 0; plci->ncci_ring_list = 0; for(j=0;j<MAX_CHANNELS_PER_PLCI;j++) plci->inc_dis_ncci_table[j] = 0; clear_c_ind_mask (plci); set_group_ind_mask (plci); plci->fax_connect_info_length = 0; plci->nsf_control_bits = 0; plci->ncpi_state = 0x00; plci->ncpi_buffer[0] = 0; plci->requested_options_conn = 0; plci->requested_options = 0; plci->notifiedcall = 0; plci->vswitchstate = 0; plci->vsprot = 0; plci->vsprotdialect = 0; init_b1_config (plci); dbug(1,dprintf("get_plci(%x)",plci->Id)); return i+1; } /*------------------------------------------------------------------*/ /* put a parameter in the parameter buffer */ /*------------------------------------------------------------------*/ static void add_p(PLCI * plci, byte code, byte * p) { word p_length; p_length = 0; if(p) p_length = p[0]; add_ie(plci, code, p, p_length); } /*------------------------------------------------------------------*/ /* put a structure in the parameter buffer */ /*------------------------------------------------------------------*/ static void add_s(PLCI * plci, byte code, API_PARSE * p) { if(p) add_ie(plci, code, p->info, (word)p->length); } /*------------------------------------------------------------------*/ /* put multiple structures in the parameter buffer */ /*------------------------------------------------------------------*/ static void add_ss(PLCI * plci, byte code, API_PARSE * p) { byte i; if(p){ dbug(1,dprintf("add_ss(%x,len=%d)",code,p->length)); for(i=2;i<(byte)p->length;i+=p->info[i]+2){ dbug(1,dprintf("add_ss_ie(%x,len=%d)",p->info[i-1],p->info[i])); add_ie(plci, p->info[i-1], (byte *)&(p->info[i]), (word)p->info[i]); } } } /*------------------------------------------------------------------*/ /* return the channel number sent by the application in a esc_chi */ /*------------------------------------------------------------------*/ static byte getChannel(API_PARSE * p) { byte i; if(p){ for(i=2;i<(byte)p->length;i+=p->info[i]+2){ if(p->info[i]==2){ if(p->info[i-1]==ESC && p->info[i+1]==CHI) return (p->info[i+2]); } } } return 0; } /*------------------------------------------------------------------*/ /* put an information element in the parameter buffer */ /*------------------------------------------------------------------*/ static void add_ie(PLCI * plci, byte code, byte * p, word p_length) { word i; if(!(code &0x80) && !p_length) return; if(plci->req_in==plci->req_in_start) { plci->req_in +=2; } else { plci->req_in--; } plci->RBuffer[plci->req_in++] = code; if(p) { plci->RBuffer[plci->req_in++] = (byte)p_length; for(i=0;i<p_length;i++) plci->RBuffer[plci->req_in++] = p[1+i]; } plci->RBuffer[plci->req_in++] = 0; } /*------------------------------------------------------------------*/ /* put a unstructured data into the buffer */ /*------------------------------------------------------------------*/ static void add_d(PLCI *plci, word length, byte *p) { word i; if(plci->req_in==plci->req_in_start) { plci->req_in +=2; } else { plci->req_in--; } for(i=0;i<length;i++) plci->RBuffer[plci->req_in++] = p[i]; } /*------------------------------------------------------------------*/ /* put parameters from the Additional Info parameter in the */ /* parameter buffer */ /*------------------------------------------------------------------*/ static void add_ai(PLCI *plci, API_PARSE *ai) { word i; API_PARSE ai_parms[5]; for(i=0;i<5;i++) ai_parms[i].length = 0; if(!ai->length) return; if(api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms)) return; add_s (plci,KEY,&ai_parms[1]); add_s (plci,UUI,&ai_parms[2]); add_ss(plci,FTY,&ai_parms[3]); } /*------------------------------------------------------------------*/ /* put parameter for b1 protocol in the parameter buffer */ /*------------------------------------------------------------------*/ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info, word b1_facilities) { API_PARSE bp_parms[8]; API_PARSE mdm_cfg[9]; API_PARSE global_config[2]; byte cai[256]; byte resource[] = {5,9,13,12,16,39,9,17,17,18}; byte voice_cai[] = "\x06\x14\x00\x00\x00\x00\x08"; word i; API_PARSE mdm_cfg_v18[4]; word j, n, w; dword d; for(i=0;i<8;i++) bp_parms[i].length = 0; for(i=0;i<2;i++) global_config[i].length = 0; dbug(1,dprintf("add_b1")); api_save_msg(bp, "s", &plci->B_protocol); if(b_channel_info==2){ plci->B1_resource = 0; adjust_b1_facilities (plci, plci->B1_resource, b1_facilities); add_p(plci, CAI, "\x01\x00"); dbug(1,dprintf("Cai=1,0 (no resource)")); return 0; } if(plci->tel == CODEC_PERMANENT) return 0; else if(plci->tel == CODEC){ plci->B1_resource = 1; adjust_b1_facilities (plci, plci->B1_resource, b1_facilities); add_p(plci, CAI, "\x01\x01"); dbug(1,dprintf("Cai=1,1 (Codec)")); return 0; } else if(plci->tel == ADV_VOICE){ plci->B1_resource = add_b1_facilities (plci, 9, (word)(b1_facilities | B1_FACILITY_VOICE)); adjust_b1_facilities (plci, plci->B1_resource, (word)(b1_facilities | B1_FACILITY_VOICE)); voice_cai[1] = plci->B1_resource; PUT_WORD (&voice_cai[5], plci->appl->MaxDataLength); add_p(plci, CAI, voice_cai); dbug(1,dprintf("Cai=1,0x%x (AdvVoice)",voice_cai[1])); return 0; } plci->call_dir &= ~(CALL_DIR_ORIGINATE | CALL_DIR_ANSWER); if (plci->call_dir & CALL_DIR_OUT) plci->call_dir |= CALL_DIR_ORIGINATE; else if (plci->call_dir & CALL_DIR_IN) plci->call_dir |= CALL_DIR_ANSWER; if(!bp->length){ plci->B1_resource = 0x5; adjust_b1_facilities (plci, plci->B1_resource, b1_facilities); add_p(plci, CAI, "\x01\x05"); return 0; } dbug(1,dprintf("b_prot_len=%d",(word)bp->length)); if(bp->length>256) return _WRONG_MESSAGE_FORMAT; if(api_parse(&bp->info[1], (word)bp->length, "wwwsssb", bp_parms)) { bp_parms[6].length = 0; if(api_parse(&bp->info[1], (word)bp->length, "wwwsss", bp_parms)) { dbug(1,dprintf("b-form.!")); return _WRONG_MESSAGE_FORMAT; } } else if (api_parse(&bp->info[1], (word)bp->length, "wwwssss", bp_parms)) { dbug(1,dprintf("b-form.!")); return _WRONG_MESSAGE_FORMAT; } if(bp_parms[6].length) { if(api_parse(&bp_parms[6].info[1], (word)bp_parms[6].length, "w", global_config)) { return _WRONG_MESSAGE_FORMAT; } switch(GET_WORD(global_config[0].info)) { case 1: plci->call_dir = (plci->call_dir & ~CALL_DIR_ANSWER) | CALL_DIR_ORIGINATE; break; case 2: plci->call_dir = (plci->call_dir & ~CALL_DIR_ORIGINATE) | CALL_DIR_ANSWER; break; } } dbug(1,dprintf("call_dir=%04x", plci->call_dir)); if ((GET_WORD(bp_parms[0].info) == B1_RTP) && (plci->adapter->man_profile.private_options & (1L << PRIVATE_RTP))) { plci->B1_resource = add_b1_facilities (plci, 31, (word)(b1_facilities & ~B1_FACILITY_VOICE)); adjust_b1_facilities (plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE)); cai[1] = plci->B1_resource; cai[2] = 0; cai[3] = 0; cai[4] = 0; PUT_WORD(&cai[5],plci->appl->MaxDataLength); for (i = 0; i < bp_parms[3].length; i++) cai[7+i] = bp_parms[3].info[1+i]; cai[0] = 6 + bp_parms[3].length; add_p(plci, CAI, cai); return 0; } if ((GET_WORD(bp_parms[0].info) == B1_PIAFS) && (plci->adapter->man_profile.private_options & (1L << PRIVATE_PIAFS))) { plci->B1_resource = add_b1_facilities (plci, 35/* PIAFS HARDWARE FACILITY */, (word)(b1_facilities & ~B1_FACILITY_VOICE)); adjust_b1_facilities (plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE)); cai[1] = plci->B1_resource; cai[2] = 0; cai[3] = 0; cai[4] = 0; PUT_WORD(&cai[5],plci->appl->MaxDataLength); cai[0] = 6; add_p(plci, CAI, cai); return 0; } if ((GET_WORD(bp_parms[0].info) >= 32) || (!((1L << GET_WORD(bp_parms[0].info)) & plci->adapter->profile.B1_Protocols) && ((GET_WORD(bp_parms[0].info) != 3) || !((1L << B1_HDLC) & plci->adapter->profile.B1_Protocols) || ((bp_parms[3].length != 0) && (GET_WORD(&bp_parms[3].info[1]) != 0) && (GET_WORD(&bp_parms[3].info[1]) != 56000))))) { return _B1_NOT_SUPPORTED; } plci->B1_resource = add_b1_facilities (plci, resource[GET_WORD(bp_parms[0].info)], (word)(b1_facilities & ~B1_FACILITY_VOICE)); adjust_b1_facilities (plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE)); cai[0] = 6; cai[1] = plci->B1_resource; for (i=2;i<sizeof(cai);i++) cai[i] = 0; if ((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE) || (GET_WORD(bp_parms[0].info) == B1_MODEM_ASYNC) || (GET_WORD(bp_parms[0].info) == B1_MODEM_SYNC_HDLC)) { /* B1 - modem */ for (i=0;i<7;i++) mdm_cfg[i].length = 0; if (bp_parms[3].length) { if(api_parse(&bp_parms[3].info[1],(word)bp_parms[3].length,"wwwwww", mdm_cfg)) { return (_WRONG_MESSAGE_FORMAT); } cai[2] = 0; /* Bit rate for adaptation */ dbug(1,dprintf("MDM Max Bit Rate:<%d>", GET_WORD(mdm_cfg[0].info))); PUT_WORD (&cai[13], 0); /* Min Tx speed */ PUT_WORD (&cai[15], GET_WORD(mdm_cfg[0].info)); /* Max Tx speed */ PUT_WORD (&cai[17], 0); /* Min Rx speed */ PUT_WORD (&cai[19], GET_WORD(mdm_cfg[0].info)); /* Max Rx speed */ cai[3] = 0; /* Async framing parameters */ switch (GET_WORD (mdm_cfg[2].info)) { /* Parity */ case 1: /* odd parity */ cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_ODD); dbug(1,dprintf("MDM: odd parity")); break; case 2: /* even parity */ cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_EVEN); dbug(1,dprintf("MDM: even parity")); break; default: dbug(1,dprintf("MDM: no parity")); break; } switch (GET_WORD (mdm_cfg[3].info)) { /* stop bits */ case 1: /* 2 stop bits */ cai[3] |= DSP_CAI_ASYNC_TWO_STOP_BITS; dbug(1,dprintf("MDM: 2 stop bits")); break; default: dbug(1,dprintf("MDM: 1 stop bit")); break; } switch (GET_WORD (mdm_cfg[1].info)) { /* char length */ case 5: cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_5; dbug(1,dprintf("MDM: 5 bits")); break; case 6: cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_6; dbug(1,dprintf("MDM: 6 bits")); break; case 7: cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_7; dbug(1,dprintf("MDM: 7 bits")); break; default: dbug(1,dprintf("MDM: 8 bits")); break; } cai[7] = 0; /* Line taking options */ cai[8] = 0; /* Modulation negotiation options */ cai[9] = 0; /* Modulation options */ if (((plci->call_dir & CALL_DIR_ORIGINATE) != 0) ^ ((plci->call_dir & CALL_DIR_OUT) != 0)) { cai[9] |= DSP_CAI_MODEM_REVERSE_DIRECTION; dbug(1, dprintf("MDM: Reverse direction")); } if (GET_WORD (mdm_cfg[4].info) & MDM_CAPI_DISABLE_RETRAIN) { cai[9] |= DSP_CAI_MODEM_DISABLE_RETRAIN; dbug(1, dprintf("MDM: Disable retrain")); } if (GET_WORD (mdm_cfg[4].info) & MDM_CAPI_DISABLE_RING_TONE) { cai[7] |= DSP_CAI_MODEM_DISABLE_CALLING_TONE | DSP_CAI_MODEM_DISABLE_ANSWER_TONE; dbug(1, dprintf("MDM: Disable ring tone")); } if (GET_WORD (mdm_cfg[4].info) & MDM_CAPI_GUARD_1800) { cai[8] |= DSP_CAI_MODEM_GUARD_TONE_1800HZ; dbug(1, dprintf("MDM: 1800 guard tone")); } else if (GET_WORD (mdm_cfg[4].info) & MDM_CAPI_GUARD_550 ) { cai[8] |= DSP_CAI_MODEM_GUARD_TONE_550HZ; dbug(1, dprintf("MDM: 550 guard tone")); } if ((GET_WORD (mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_V100) { cai[8] |= DSP_CAI_MODEM_NEGOTIATE_V100; dbug(1, dprintf("MDM: V100")); } else if ((GET_WORD (mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_MOD_CLASS) { cai[8] |= DSP_CAI_MODEM_NEGOTIATE_IN_CLASS; dbug(1, dprintf("MDM: IN CLASS")); } else if ((GET_WORD (mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_DISABLED) { cai[8] |= DSP_CAI_MODEM_NEGOTIATE_DISABLED; dbug(1, dprintf("MDM: DISABLED")); } cai[0] = 20; if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_V18)) && (GET_WORD(mdm_cfg[5].info) & 0x8000)) /* Private V.18 enable */ { plci->requested_options |= 1L << PRIVATE_V18; } if (GET_WORD(mdm_cfg[5].info) & 0x4000) /* Private VOWN enable */ plci->requested_options |= 1L << PRIVATE_VOWN; if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id-1]) & ((1L << PRIVATE_V18) | (1L << PRIVATE_VOWN))) { if (!api_parse(&bp_parms[3].info[1],(word)bp_parms[3].length,"wwwwwws", mdm_cfg)) { i = 27; if (mdm_cfg[6].length >= 4) { d = GET_DWORD(&mdm_cfg[6].info[1]); cai[7] |= (byte) d; /* line taking options */ cai[9] |= (byte)(d >> 8); /* modulation options */ cai[++i] = (byte)(d >> 16); /* vown modulation options */ cai[++i] = (byte)(d >> 24); if (mdm_cfg[6].length >= 8) { d = GET_DWORD(&mdm_cfg[6].info[5]); cai[10] |= (byte) d; /* disabled modulations mask */ cai[11] |= (byte)(d >> 8); if (mdm_cfg[6].length >= 12) { d = GET_DWORD(&mdm_cfg[6].info[9]); cai[12] = (byte) d; /* enabled modulations mask */ cai[++i] = (byte)(d >> 8); /* vown enabled modulations */ cai[++i] = (byte)(d >> 16); cai[++i] = (byte)(d >> 24); cai[++i] = 0; if (mdm_cfg[6].length >= 14) { w = GET_WORD(&mdm_cfg[6].info[13]); if (w != 0) PUT_WORD(&cai[13], w); /* min tx speed */ if (mdm_cfg[6].length >= 16) { w = GET_WORD(&mdm_cfg[6].info[15]); if (w != 0) PUT_WORD(&cai[15], w); /* max tx speed */ if (mdm_cfg[6].length >= 18) { w = GET_WORD(&mdm_cfg[6].info[17]); if (w != 0) PUT_WORD(&cai[17], w); /* min rx speed */ if (mdm_cfg[6].length >= 20) { w = GET_WORD(&mdm_cfg[6].info[19]); if (w != 0) PUT_WORD(&cai[19], w); /* max rx speed */ if (mdm_cfg[6].length >= 22) { w = GET_WORD(&mdm_cfg[6].info[21]); cai[23] = (byte)(-((short) w)); /* transmit level */ if (mdm_cfg[6].length >= 24) { w = GET_WORD(&mdm_cfg[6].info[23]); cai[22] |= (byte) w; /* info options mask */ cai[21] |= (byte)(w >> 8); /* disabled symbol rates */ } } } } } } } } } cai[27] = i - 27; i++; if (!api_parse(&bp_parms[3].info[1],(word)bp_parms[3].length,"wwwwwwss", mdm_cfg)) { if (!api_parse(&mdm_cfg[7].info[1],(word)mdm_cfg[7].length,"sss", mdm_cfg_v18)) { for (n = 0; n < 3; n++) { cai[i] = (byte)(mdm_cfg_v18[n].length); for (j = 1; j < ((word)(cai[i] + 1)); j++) cai[i+j] = mdm_cfg_v18[n].info[j]; i += cai[i] + 1; } } } cai[0] = (byte)(i - 1); } } } } if(GET_WORD(bp_parms[0].info)==2 || /* V.110 async */ GET_WORD(bp_parms[0].info)==3 ) /* V.110 sync */ { if(bp_parms[3].length){ dbug(1,dprintf("V.110,%d",GET_WORD(&bp_parms[3].info[1]))); switch(GET_WORD(&bp_parms[3].info[1])){ /* Rate */ case 0: case 56000: if(GET_WORD(bp_parms[0].info)==3){ /* V.110 sync 56k */ dbug(1,dprintf("56k sync HSCX")); cai[1] = 8; cai[2] = 0; cai[3] = 0; } else if(GET_WORD(bp_parms[0].info)==2){ dbug(1,dprintf("56k async DSP")); cai[2] = 9; } break; case 50: cai[2] = 1; break; case 75: cai[2] = 1; break; case 110: cai[2] = 1; break; case 150: cai[2] = 1; break; case 200: cai[2] = 1; break; case 300: cai[2] = 1; break; case 600: cai[2] = 1; break; case 1200: cai[2] = 2; break; case 2400: cai[2] = 3; break; case 4800: cai[2] = 4; break; case 7200: cai[2] = 10; break; case 9600: cai[2] = 5; break; case 12000: cai[2] = 13; break; case 24000: cai[2] = 0; break; case 14400: cai[2] = 11; break; case 19200: cai[2] = 6; break; case 28800: cai[2] = 12; break; case 38400: cai[2] = 7; break; case 48000: cai[2] = 8; break; case 76: cai[2] = 15; break; /* 75/1200 */ case 1201: cai[2] = 14; break; /* 1200/75 */ case 56001: cai[2] = 9; break; /* V.110 56000 */ default: return _B1_PARM_NOT_SUPPORTED; } cai[3] = 0; if (cai[1] == 13) /* v.110 async */ { if (bp_parms[3].length >= 8) { switch (GET_WORD (&bp_parms[3].info[3])) { /* char length */ case 5: cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_5; break; case 6: cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_6; break; case 7: cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_7; break; } switch (GET_WORD (&bp_parms[3].info[5])) { /* Parity */ case 1: /* odd parity */ cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_ODD); break; case 2: /* even parity */ cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_EVEN); break; } switch (GET_WORD (&bp_parms[3].info[7])) { /* stop bits */ case 1: /* 2 stop bits */ cai[3] |= DSP_CAI_ASYNC_TWO_STOP_BITS; break; } } } } else if(cai[1]==8 || GET_WORD(bp_parms[0].info)==3 ){ dbug(1,dprintf("V.110 default 56k sync")); cai[1] = 8; cai[2] = 0; cai[3] = 0; } else { dbug(1,dprintf("V.110 default 9600 async")); cai[2] = 5; } } PUT_WORD(&cai[5],plci->appl->MaxDataLength); dbug(1,dprintf("CAI[%d]=%x,%x,%x,%x,%x,%x", cai[0], cai[1], cai[2], cai[3], cai[4], cai[5], cai[6])); /* HexDump ("CAI", sizeof(cai), &cai[0]); */ add_p(plci, CAI, cai); return 0; } /*------------------------------------------------------------------*/ /* put parameter for b2 and B3 protocol in the parameter buffer */ /*------------------------------------------------------------------*/ static word add_b23(PLCI *plci, API_PARSE *bp) { word i, fax_control_bits; byte pos, len; byte SAPI = 0x40; /* default SAPI 16 for x.31 */ API_PARSE bp_parms[8]; API_PARSE * b1_config; API_PARSE * b2_config; API_PARSE b2_config_parms[8]; API_PARSE * b3_config; API_PARSE b3_config_parms[6]; API_PARSE global_config[2]; static byte llc[3] = {2,0,0}; static byte dlc[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; static byte nlc[256]; static byte lli[12] = {1,1}; const byte llc2_out[] = {1,2,4,6,2,0,0,0, X75_V42BIS,V120_L2,V120_V42BIS,V120_L2,6}; const byte llc2_in[] = {1,3,4,6,3,0,0,0, X75_V42BIS,V120_L2,V120_V42BIS,V120_L2,6}; const byte llc3[] = {4,3,2,2,6,6,0}; const byte header[] = {0,2,3,3,0,0,0}; for(i=0;i<8;i++) bp_parms[i].length = 0; for(i=0;i<6;i++) b2_config_parms[i].length = 0; for(i=0;i<5;i++) b3_config_parms[i].length = 0; lli[0] = 1; lli[1] = 1; if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL) lli[1] |= 2; if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL) lli[1] |= 4; if ((lli[1] & 0x02) && (diva_xdi_extended_features & DIVA_CAPI_USE_CMA)) { lli[1] |= 0x10; if (plci->rx_dma_descriptor <= 0) { plci->rx_dma_descriptor=diva_get_dma_descriptor(plci,&plci->rx_dma_magic); if (plci->rx_dma_descriptor >= 0) plci->rx_dma_descriptor++; } if (plci->rx_dma_descriptor > 0) { lli[0] = 6; lli[1] |= 0x40; lli[2] = (byte)(plci->rx_dma_descriptor - 1); lli[3] = (byte)plci->rx_dma_magic; lli[4] = (byte)(plci->rx_dma_magic >> 8); lli[5] = (byte)(plci->rx_dma_magic >> 16); lli[6] = (byte)(plci->rx_dma_magic >> 24); } } if (DIVA_CAPI_SUPPORTS_NO_CANCEL(plci->adapter)) { lli[1] |= 0x20; } dbug(1,dprintf("add_b23")); api_save_msg(bp, "s", &plci->B_protocol); if(!bp->length && plci->tel) { plci->adv_nl = true; dbug(1,dprintf("Default adv.Nl")); add_p(plci,LLI,lli); plci->B2_prot = 1 /*XPARENT*/; plci->B3_prot = 0 /*XPARENT*/; llc[1] = 2; llc[2] = 4; add_p(plci, LLC, llc); dlc[0] = 2; PUT_WORD(&dlc[1],plci->appl->MaxDataLength); add_p(plci, DLC, dlc); return 0; } if(!bp->length) /*default*/ { dbug(1,dprintf("ret default")); add_p(plci,LLI,lli); plci->B2_prot = 0 /*X.75 */; plci->B3_prot = 0 /*XPARENT*/; llc[1] = 1; llc[2] = 4; add_p(plci, LLC, llc); dlc[0] = 2; PUT_WORD(&dlc[1],plci->appl->MaxDataLength); add_p(plci, DLC, dlc); return 0; } dbug(1,dprintf("b_prot_len=%d",(word)bp->length)); if((word)bp->length > 256) return _WRONG_MESSAGE_FORMAT; if(api_parse(&bp->info[1], (word)bp->length, "wwwsssb", bp_parms)) { bp_parms[6].length = 0; if(api_parse(&bp->info[1], (word)bp->length, "wwwsss", bp_parms)) { dbug(1,dprintf("b-form.!")); return _WRONG_MESSAGE_FORMAT; } } else if (api_parse(&bp->info[1], (word)bp->length, "wwwssss", bp_parms)) { dbug(1,dprintf("b-form.!")); return _WRONG_MESSAGE_FORMAT; } if(plci->tel==ADV_VOICE) /* transparent B on advanced voice */ { if(GET_WORD(bp_parms[1].info)!=1 || GET_WORD(bp_parms[2].info)!=0) return _B2_NOT_SUPPORTED; plci->adv_nl = true; } else if(plci->tel) return _B2_NOT_SUPPORTED; if ((GET_WORD(bp_parms[1].info) == B2_RTP) && (GET_WORD(bp_parms[2].info) == B3_RTP) && (plci->adapter->man_profile.private_options & (1L << PRIVATE_RTP))) { add_p(plci,LLI,lli); plci->B2_prot = (byte) GET_WORD(bp_parms[1].info); plci->B3_prot = (byte) GET_WORD(bp_parms[2].info); llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ? 14 : 13; llc[2] = 4; add_p(plci, LLC, llc); dlc[0] = 2; PUT_WORD(&dlc[1],plci->appl->MaxDataLength); dlc[3] = 3; /* Addr A */ dlc[4] = 1; /* Addr B */ dlc[5] = 7; /* modulo mode */ dlc[6] = 7; /* window size */ dlc[7] = 0; /* XID len Lo */ dlc[8] = 0; /* XID len Hi */ for (i = 0; i < bp_parms[4].length; i++) dlc[9+i] = bp_parms[4].info[1+i]; dlc[0] = (byte)(8 + bp_parms[4].length); add_p(plci, DLC, dlc); for (i = 0; i < bp_parms[5].length; i++) nlc[1+i] = bp_parms[5].info[1+i]; nlc[0] = (byte)(bp_parms[5].length); add_p(plci, NLC, nlc); return 0; } if ((GET_WORD(bp_parms[1].info) >= 32) || (!((1L << GET_WORD(bp_parms[1].info)) & plci->adapter->profile.B2_Protocols) && ((GET_WORD(bp_parms[1].info) != B2_PIAFS) || !(plci->adapter->man_profile.private_options & (1L << PRIVATE_PIAFS))))) { return _B2_NOT_SUPPORTED; } if ((GET_WORD(bp_parms[2].info) >= 32) || !((1L << GET_WORD(bp_parms[2].info)) & plci->adapter->profile.B3_Protocols)) { return _B3_NOT_SUPPORTED; } if ((GET_WORD(bp_parms[1].info) != B2_SDLC) && ((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE) || (GET_WORD(bp_parms[0].info) == B1_MODEM_ASYNC) || (GET_WORD(bp_parms[0].info) == B1_MODEM_SYNC_HDLC))) { return (add_modem_b23 (plci, bp_parms)); } add_p(plci,LLI,lli); plci->B2_prot = (byte) GET_WORD(bp_parms[1].info); plci->B3_prot = (byte) GET_WORD(bp_parms[2].info); if(plci->B2_prot==12) SAPI = 0; /* default SAPI D-channel */ if(bp_parms[6].length) { if(api_parse(&bp_parms[6].info[1], (word)bp_parms[6].length, "w", global_config)) { return _WRONG_MESSAGE_FORMAT; } switch(GET_WORD(global_config[0].info)) { case 1: plci->call_dir = (plci->call_dir & ~CALL_DIR_ANSWER) | CALL_DIR_ORIGINATE; break; case 2: plci->call_dir = (plci->call_dir & ~CALL_DIR_ORIGINATE) | CALL_DIR_ANSWER; break; } } dbug(1,dprintf("call_dir=%04x", plci->call_dir)); if (plci->B2_prot == B2_PIAFS) llc[1] = PIAFS_CRC; else /* IMPLEMENT_PIAFS */ { llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ? llc2_out[GET_WORD(bp_parms[1].info)] : llc2_in[GET_WORD(bp_parms[1].info)]; } llc[2] = llc3[GET_WORD(bp_parms[2].info)]; add_p(plci, LLC, llc); dlc[0] = 2; PUT_WORD(&dlc[1], plci->appl->MaxDataLength + header[GET_WORD(bp_parms[2].info)]); b1_config = &bp_parms[3]; nlc[0] = 0; if(plci->B3_prot == 4 || plci->B3_prot == 5) { for (i=0;i<sizeof(T30_INFO);i++) nlc[i] = 0; nlc[0] = sizeof(T30_INFO); if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS) ((T30_INFO *)&nlc[1])->operating_mode = T30_OPERATING_MODE_CAPI; ((T30_INFO *)&nlc[1])->rate_div_2400 = 0xff; if(b1_config->length>=2) { ((T30_INFO *)&nlc[1])->rate_div_2400 = (byte)(GET_WORD(&b1_config->info[1])/2400); } } b2_config = &bp_parms[4]; if (llc[1] == PIAFS_CRC) { if (plci->B3_prot != B3_TRANSPARENT) { return _B_STACK_NOT_SUPPORTED; } if(b2_config->length && api_parse(&b2_config->info[1], (word)b2_config->length, "bwww", b2_config_parms)) { return _WRONG_MESSAGE_FORMAT; } PUT_WORD(&dlc[1],plci->appl->MaxDataLength); dlc[3] = 0; /* Addr A */ dlc[4] = 0; /* Addr B */ dlc[5] = 0; /* modulo mode */ dlc[6] = 0; /* window size */ if (b2_config->length >= 7){ dlc[ 7] = 7; dlc[ 8] = 0; dlc[ 9] = b2_config_parms[0].info[0]; /* PIAFS protocol Speed configuration */ dlc[10] = b2_config_parms[1].info[0]; /* V.42bis P0 */ dlc[11] = b2_config_parms[1].info[1]; /* V.42bis P0 */ dlc[12] = b2_config_parms[2].info[0]; /* V.42bis P1 */ dlc[13] = b2_config_parms[2].info[1]; /* V.42bis P1 */ dlc[14] = b2_config_parms[3].info[0]; /* V.42bis P2 */ dlc[15] = b2_config_parms[3].info[1]; /* V.42bis P2 */ dlc[ 0] = 15; if(b2_config->length >= 8) { /* PIAFS control abilities */ dlc[ 7] = 10; dlc[16] = 2; /* Length of PIAFS extention */ dlc[17] = PIAFS_UDATA_ABILITIES; /* control (UDATA) ability */ dlc[18] = b2_config_parms[4].info[0]; /* value */ dlc[ 0] = 18; } } else /* default values, 64K, variable, no compression */ { dlc[ 7] = 7; dlc[ 8] = 0; dlc[ 9] = 0x03; /* PIAFS protocol Speed configuration */ dlc[10] = 0x03; /* V.42bis P0 */ dlc[11] = 0; /* V.42bis P0 */ dlc[12] = 0; /* V.42bis P1 */ dlc[13] = 0; /* V.42bis P1 */ dlc[14] = 0; /* V.42bis P2 */ dlc[15] = 0; /* V.42bis P2 */ dlc[ 0] = 15; } add_p(plci, DLC, dlc); } else if ((llc[1] == V120_L2) || (llc[1] == V120_V42BIS)) { if (plci->B3_prot != B3_TRANSPARENT) return _B_STACK_NOT_SUPPORTED; dlc[0] = 6; PUT_WORD (&dlc[1], GET_WORD (&dlc[1]) + 2); dlc[3] = 0x08; dlc[4] = 0x01; dlc[5] = 127; dlc[6] = 7; if (b2_config->length != 0) { if((llc[1]==V120_V42BIS) && api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbwww", b2_config_parms)) { return _WRONG_MESSAGE_FORMAT; } dlc[3] = (byte)((b2_config->info[2] << 3) | ((b2_config->info[1] >> 5) & 0x04)); dlc[4] = (byte)((b2_config->info[1] << 1) | 0x01); if (b2_config->info[3] != 128) { dbug(1,dprintf("1D-dlc= %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4])); return _B2_PARM_NOT_SUPPORTED; } dlc[5] = (byte)(b2_config->info[3] - 1); dlc[6] = b2_config->info[4]; if(llc[1]==V120_V42BIS){ if (b2_config->length >= 10){ dlc[ 7] = 6; dlc[ 8] = 0; dlc[ 9] = b2_config_parms[4].info[0]; dlc[10] = b2_config_parms[4].info[1]; dlc[11] = b2_config_parms[5].info[0]; dlc[12] = b2_config_parms[5].info[1]; dlc[13] = b2_config_parms[6].info[0]; dlc[14] = b2_config_parms[6].info[1]; dlc[ 0] = 14; dbug(1,dprintf("b2_config_parms[4].info[0] [1]: %x %x", b2_config_parms[4].info[0], b2_config_parms[4].info[1])); dbug(1,dprintf("b2_config_parms[5].info[0] [1]: %x %x", b2_config_parms[5].info[0], b2_config_parms[5].info[1])); dbug(1,dprintf("b2_config_parms[6].info[0] [1]: %x %x", b2_config_parms[6].info[0], b2_config_parms[6].info[1])); } else { dlc[ 6] = 14; } } } } else { if(b2_config->length) { dbug(1,dprintf("B2-Config")); if(llc[1]==X75_V42BIS){ if(api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbwww", b2_config_parms)) { return _WRONG_MESSAGE_FORMAT; } } else { if(api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbs", b2_config_parms)) { return _WRONG_MESSAGE_FORMAT; } } /* if B2 Protocol is LAPD, b2_config structure is different */ if(llc[1]==6) { dlc[0] = 4; if(b2_config->length>=1) dlc[2] = b2_config->info[1]; /* TEI */ else dlc[2] = 0x01; if( (b2_config->length>=2) && (plci->B2_prot==12) ) { SAPI = b2_config->info[2]; /* SAPI */ } dlc[1] = SAPI; if( (b2_config->length>=3) && (b2_config->info[3]==128) ) { dlc[3] = 127; /* Mode */ } else { dlc[3] = 7; /* Mode */ } if(b2_config->length>=4) dlc[4] = b2_config->info[4]; /* Window */ else dlc[4] = 1; dbug(1,dprintf("D-dlc[%d]=%x,%x,%x,%x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4])); if(b2_config->length>5) return _B2_PARM_NOT_SUPPORTED; } else { dlc[0] = (byte)(b2_config_parms[4].length+6); dlc[3] = b2_config->info[1]; dlc[4] = b2_config->info[2]; if(b2_config->info[3]!=8 && b2_config->info[3]!=128){ dbug(1,dprintf("1D-dlc= %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4])); return _B2_PARM_NOT_SUPPORTED; } dlc[5] = (byte)(b2_config->info[3]-1); dlc[6] = b2_config->info[4]; if(dlc[6]>dlc[5]){ dbug(1,dprintf("2D-dlc= %x %x %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4], dlc[5], dlc[6])); return _B2_PARM_NOT_SUPPORTED; } if(llc[1]==X75_V42BIS) { if (b2_config->length >= 10){ dlc[ 7] = 6; dlc[ 8] = 0; dlc[ 9] = b2_config_parms[4].info[0]; dlc[10] = b2_config_parms[4].info[1]; dlc[11] = b2_config_parms[5].info[0]; dlc[12] = b2_config_parms[5].info[1]; dlc[13] = b2_config_parms[6].info[0]; dlc[14] = b2_config_parms[6].info[1]; dlc[ 0] = 14; dbug(1,dprintf("b2_config_parms[4].info[0] [1]: %x %x", b2_config_parms[4].info[0], b2_config_parms[4].info[1])); dbug(1,dprintf("b2_config_parms[5].info[0] [1]: %x %x", b2_config_parms[5].info[0], b2_config_parms[5].info[1])); dbug(1,dprintf("b2_config_parms[6].info[0] [1]: %x %x", b2_config_parms[6].info[0], b2_config_parms[6].info[1])); } else { dlc[ 6] = 14; } } else { PUT_WORD(&dlc[7], (word)b2_config_parms[4].length); for(i=0; i<b2_config_parms[4].length; i++) dlc[11+i] = b2_config_parms[4].info[1+i]; } } } } add_p(plci, DLC, dlc); b3_config = &bp_parms[5]; if(b3_config->length) { if(plci->B3_prot == 4 || plci->B3_prot == 5) { if(api_parse(&b3_config->info[1], (word)b3_config->length, "wwss", b3_config_parms)) { return _WRONG_MESSAGE_FORMAT; } i = GET_WORD((byte *)(b3_config_parms[0].info)); ((T30_INFO *)&nlc[1])->resolution = (byte)(((i & 0x0001) || ((plci->B3_prot == 4) && (((byte)(GET_WORD((byte *)b3_config_parms[1].info))) != 5))) ? T30_RESOLUTION_R8_0770_OR_200 : 0); ((T30_INFO *)&nlc[1])->data_format = (byte)(GET_WORD((byte *)b3_config_parms[1].info)); fax_control_bits = T30_CONTROL_BIT_ALL_FEATURES; if ((((T30_INFO *)&nlc[1])->rate_div_2400 != 0) && (((T30_INFO *)&nlc[1])->rate_div_2400 <= 6)) fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_V34FAX; if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS) { if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id-1]) & (1L << PRIVATE_FAX_PAPER_FORMATS)) { ((T30_INFO *)&nlc[1])->resolution |= T30_RESOLUTION_R8_1540 | T30_RESOLUTION_R16_1540_OR_400 | T30_RESOLUTION_300_300 | T30_RESOLUTION_INCH_BASED | T30_RESOLUTION_METRIC_BASED; } ((T30_INFO *)&nlc[1])->recording_properties = T30_RECORDING_WIDTH_ISO_A3 | (T30_RECORDING_LENGTH_UNLIMITED << 2) | (T30_MIN_SCANLINE_TIME_00_00_00 << 4); } if(plci->B3_prot == 5) { if (i & 0x0002) /* Accept incoming fax-polling requests */ fax_control_bits |= T30_CONTROL_BIT_ACCEPT_POLLING; if (i & 0x2000) /* Do not use MR compression */ fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_2D_CODING; if (i & 0x4000) /* Do not use MMR compression */ fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_T6_CODING; if (i & 0x8000) /* Do not use ECM */ fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_ECM; if (plci->fax_connect_info_length != 0) { ((T30_INFO *)&nlc[1])->resolution = ((T30_INFO *)plci->fax_connect_info_buffer)->resolution; ((T30_INFO *)&nlc[1])->data_format = ((T30_INFO *)plci->fax_connect_info_buffer)->data_format; ((T30_INFO *)&nlc[1])->recording_properties = ((T30_INFO *)plci->fax_connect_info_buffer)->recording_properties; fax_control_bits |= GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low) & (T30_CONTROL_BIT_REQUEST_POLLING | T30_CONTROL_BIT_MORE_DOCUMENTS); } } /* copy station id to NLC */ for(i=0; i < T30_MAX_STATION_ID_LENGTH; i++) { if(i<b3_config_parms[2].length) { ((T30_INFO *)&nlc[1])->station_id[i] = ((byte *)b3_config_parms[2].info)[1+i]; } else { ((T30_INFO *)&nlc[1])->station_id[i] = ' '; } } ((T30_INFO *)&nlc[1])->station_id_len = T30_MAX_STATION_ID_LENGTH; /* copy head line to NLC */ if(b3_config_parms[3].length) { pos = (byte)(fax_head_line_time (&(((T30_INFO *)&nlc[1])->station_id[T30_MAX_STATION_ID_LENGTH]))); if (pos != 0) { if (CAPI_MAX_DATE_TIME_LENGTH + 2 + b3_config_parms[3].length > CAPI_MAX_HEAD_LINE_SPACE) pos = 0; else { nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' '; nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' '; len = (byte)b3_config_parms[2].length; if (len > 20) len = 20; if (CAPI_MAX_DATE_TIME_LENGTH + 2 + len + 2 + b3_config_parms[3].length <= CAPI_MAX_HEAD_LINE_SPACE) { for (i = 0; i < len; i++) nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ((byte *)b3_config_parms[2].info)[1+i]; nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' '; nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' '; } } } len = (byte)b3_config_parms[3].length; if (len > CAPI_MAX_HEAD_LINE_SPACE - pos) len = (byte)(CAPI_MAX_HEAD_LINE_SPACE - pos); ((T30_INFO *)&nlc[1])->head_line_len = (byte)(pos + len); nlc[0] += (byte)(pos + len); for (i = 0; i < len; i++) nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ((byte *)b3_config_parms[3].info)[1+i]; } else ((T30_INFO *)&nlc[1])->head_line_len = 0; plci->nsf_control_bits = 0; if(plci->B3_prot == 5) { if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_FAX_SUB_SEP_PWD)) && (GET_WORD((byte *)b3_config_parms[1].info) & 0x8000)) /* Private SUB/SEP/PWD enable */ { plci->requested_options |= 1L << PRIVATE_FAX_SUB_SEP_PWD; } if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_FAX_NONSTANDARD)) && (GET_WORD((byte *)b3_config_parms[1].info) & 0x4000)) /* Private non-standard facilities enable */ { plci->requested_options |= 1L << PRIVATE_FAX_NONSTANDARD; } if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id-1]) & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD))) { if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id-1]) & (1L << PRIVATE_FAX_SUB_SEP_PWD)) { fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_PASSWORD; if (fax_control_bits & T30_CONTROL_BIT_ACCEPT_POLLING) fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING; } len = nlc[0]; pos = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH; if (pos < plci->fax_connect_info_length) { for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--) nlc[++len] = plci->fax_connect_info_buffer[pos++]; } else nlc[++len] = 0; if (pos < plci->fax_connect_info_length) { for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--) nlc[++len] = plci->fax_connect_info_buffer[pos++]; } else nlc[++len] = 0; if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id-1]) & (1L << PRIVATE_FAX_NONSTANDARD)) { if ((pos < plci->fax_connect_info_length) && (plci->fax_connect_info_buffer[pos] != 0)) { if ((plci->fax_connect_info_buffer[pos] >= 3) && (plci->fax_connect_info_buffer[pos+1] >= 2)) plci->nsf_control_bits = GET_WORD(&plci->fax_connect_info_buffer[pos+2]); for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--) nlc[++len] = plci->fax_connect_info_buffer[pos++]; } else { if(api_parse(&b3_config->info[1], (word)b3_config->length, "wwsss", b3_config_parms)) { dbug(1,dprintf("non-standard facilities info missing or wrong format")); nlc[++len] = 0; } else { if ((b3_config_parms[4].length >= 3) && (b3_config_parms[4].info[1] >= 2)) plci->nsf_control_bits = GET_WORD(&b3_config_parms[4].info[2]); nlc[++len] = (byte)(b3_config_parms[4].length); for (i = 0; i < b3_config_parms[4].length; i++) nlc[++len] = b3_config_parms[4].info[1+i]; } } } nlc[0] = len; if ((plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF) && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP)) { ((T30_INFO *)&nlc[1])->operating_mode = T30_OPERATING_MODE_CAPI_NEG; } } } PUT_WORD(&(((T30_INFO *)&nlc[1])->control_bits_low), fax_control_bits); len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH; for (i = 0; i < len; i++) plci->fax_connect_info_buffer[i] = nlc[1+i]; ((T30_INFO *) plci->fax_connect_info_buffer)->head_line_len = 0; i += ((T30_INFO *)&nlc[1])->head_line_len; while (i < nlc[0]) plci->fax_connect_info_buffer[len++] = nlc[++i]; plci->fax_connect_info_length = len; } else { nlc[0] = 14; if(b3_config->length!=16) return _B3_PARM_NOT_SUPPORTED; for(i=0; i<12; i++) nlc[1+i] = b3_config->info[1+i]; if(GET_WORD(&b3_config->info[13])!=8 && GET_WORD(&b3_config->info[13])!=128) return _B3_PARM_NOT_SUPPORTED; nlc[13] = b3_config->info[13]; if(GET_WORD(&b3_config->info[15])>=nlc[13]) return _B3_PARM_NOT_SUPPORTED; nlc[14] = b3_config->info[15]; } } else { if (plci->B3_prot == 4 || plci->B3_prot == 5 /*T.30 - FAX*/ ) return _B3_PARM_NOT_SUPPORTED; } add_p(plci, NLC, nlc); return 0; } /*----------------------------------------------------------------*/ /* make the same as add_b23, but only for the modem related */ /* L2 and L3 B-Chan protocol. */ /* */ /* Enabled L2 and L3 Configurations: */ /* If L1 == Modem all negotiation */ /* only L2 == Modem with full negotiation is allowed */ /* If L1 == Modem async or sync */ /* only L2 == Transparent is allowed */ /* L3 == Modem or L3 == Transparent are allowed */ /* B2 Configuration for modem: */ /* word : enable/disable compression, bitoptions */ /* B3 Configuration for modem: */ /* empty */ /*----------------------------------------------------------------*/ static word add_modem_b23 (PLCI * plci, API_PARSE* bp_parms) { static byte lli[12] = {1,1}; static byte llc[3] = {2,0,0}; static byte dlc[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; API_PARSE mdm_config[2]; word i; word b2_config = 0; for(i=0;i<2;i++) mdm_config[i].length = 0; for(i=0;i<sizeof(dlc);i++) dlc[i] = 0; if (((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE) && (GET_WORD(bp_parms[1].info) != B2_MODEM_EC_COMPRESSION)) || ((GET_WORD(bp_parms[0].info) != B1_MODEM_ALL_NEGOTIATE) && (GET_WORD(bp_parms[1].info) != B2_TRANSPARENT))) { return (_B_STACK_NOT_SUPPORTED); } if ((GET_WORD(bp_parms[2].info) != B3_MODEM) && (GET_WORD(bp_parms[2].info) != B3_TRANSPARENT)) { return (_B_STACK_NOT_SUPPORTED); } plci->B2_prot = (byte) GET_WORD(bp_parms[1].info); plci->B3_prot = (byte) GET_WORD(bp_parms[2].info); if ((GET_WORD(bp_parms[1].info) == B2_MODEM_EC_COMPRESSION) && bp_parms[4].length) { if (api_parse (&bp_parms[4].info[1], (word)bp_parms[4].length, "w", mdm_config)) { return (_WRONG_MESSAGE_FORMAT); } b2_config = GET_WORD(mdm_config[0].info); } /* OK, L2 is modem */ lli[0] = 1; lli[1] = 1; if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL) lli[1] |= 2; if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL) lli[1] |= 4; if ((lli[1] & 0x02) && (diva_xdi_extended_features & DIVA_CAPI_USE_CMA)) { lli[1] |= 0x10; if (plci->rx_dma_descriptor <= 0) { plci->rx_dma_descriptor=diva_get_dma_descriptor(plci,&plci->rx_dma_magic); if (plci->rx_dma_descriptor >= 0) plci->rx_dma_descriptor++; } if (plci->rx_dma_descriptor > 0) { lli[1] |= 0x40; lli[0] = 6; lli[2] = (byte)(plci->rx_dma_descriptor - 1); lli[3] = (byte)plci->rx_dma_magic; lli[4] = (byte)(plci->rx_dma_magic >> 8); lli[5] = (byte)(plci->rx_dma_magic >> 16); lli[6] = (byte)(plci->rx_dma_magic >> 24); } } if (DIVA_CAPI_SUPPORTS_NO_CANCEL(plci->adapter)) { lli[1] |= 0x20; } llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ? /*V42*/ 10 : /*V42_IN*/ 9; llc[2] = 4; /* pass L3 always transparent */ add_p(plci, LLI, lli); add_p(plci, LLC, llc); i = 1; PUT_WORD (&dlc[i], plci->appl->MaxDataLength); i += 2; if (GET_WORD(bp_parms[1].info) == B2_MODEM_EC_COMPRESSION) { if (bp_parms[4].length) { dbug(1, dprintf("MDM b2_config=%02x", b2_config)); dlc[i++] = 3; /* Addr A */ dlc[i++] = 1; /* Addr B */ dlc[i++] = 7; /* modulo mode */ dlc[i++] = 7; /* window size */ dlc[i++] = 0; /* XID len Lo */ dlc[i++] = 0; /* XID len Hi */ if (b2_config & MDM_B2_DISABLE_V42bis) { dlc[i] |= DLC_MODEMPROT_DISABLE_V42_V42BIS; } if (b2_config & MDM_B2_DISABLE_MNP) { dlc[i] |= DLC_MODEMPROT_DISABLE_MNP_MNP5; } if (b2_config & MDM_B2_DISABLE_TRANS) { dlc[i] |= DLC_MODEMPROT_REQUIRE_PROTOCOL; } if (b2_config & MDM_B2_DISABLE_V42) { dlc[i] |= DLC_MODEMPROT_DISABLE_V42_DETECT; } if (b2_config & MDM_B2_DISABLE_COMP) { dlc[i] |= DLC_MODEMPROT_DISABLE_COMPRESSION; } i++; } } else { dlc[i++] = 3; /* Addr A */ dlc[i++] = 1; /* Addr B */ dlc[i++] = 7; /* modulo mode */ dlc[i++] = 7; /* window size */ dlc[i++] = 0; /* XID len Lo */ dlc[i++] = 0; /* XID len Hi */ dlc[i++] = DLC_MODEMPROT_DISABLE_V42_V42BIS | DLC_MODEMPROT_DISABLE_MNP_MNP5 | DLC_MODEMPROT_DISABLE_V42_DETECT | DLC_MODEMPROT_DISABLE_COMPRESSION; } dlc[0] = (byte)(i - 1); /* HexDump ("DLC", sizeof(dlc), &dlc[0]); */ add_p(plci, DLC, dlc); return (0); } /*------------------------------------------------------------------*/ /* send a request for the signaling entity */ /*------------------------------------------------------------------*/ static void sig_req(PLCI *plci, byte req, byte Id) { if(!plci) return; if(plci->adapter->adapter_disabled) return; dbug(1,dprintf("sig_req(%x)",req)); if (req == REMOVE) plci->sig_remove_id = plci->Sig.Id; if(plci->req_in==plci->req_in_start) { plci->req_in +=2; plci->RBuffer[plci->req_in++] = 0; } PUT_WORD(&plci->RBuffer[plci->req_in_start], plci->req_in-plci->req_in_start-2); plci->RBuffer[plci->req_in++] = Id; /* sig/nl flag */ plci->RBuffer[plci->req_in++] = req; /* request */ plci->RBuffer[plci->req_in++] = 0; /* channel */ plci->req_in_start = plci->req_in; } /*------------------------------------------------------------------*/ /* send a request for the network layer entity */ /*------------------------------------------------------------------*/ static void nl_req_ncci(PLCI *plci, byte req, byte ncci) { if(!plci) return; if(plci->adapter->adapter_disabled) return; dbug(1,dprintf("nl_req %02x %02x %02x", plci->Id, req, ncci)); if (req == REMOVE) { plci->nl_remove_id = plci->NL.Id; ncci_remove (plci, 0, (byte)(ncci != 0)); ncci = 0; } if(plci->req_in==plci->req_in_start) { plci->req_in +=2; plci->RBuffer[plci->req_in++] = 0; } PUT_WORD(&plci->RBuffer[plci->req_in_start], plci->req_in-plci->req_in_start-2); plci->RBuffer[plci->req_in++] = 1; /* sig/nl flag */ plci->RBuffer[plci->req_in++] = req; /* request */ plci->RBuffer[plci->req_in++] = plci->adapter->ncci_ch[ncci]; /* channel */ plci->req_in_start = plci->req_in; } static void send_req(PLCI *plci) { ENTITY * e; word l; /* word i; */ if(!plci) return; if(plci->adapter->adapter_disabled) return; channel_xmit_xon (plci); /* if nothing to do, return */ if(plci->req_in==plci->req_out) return; dbug(1,dprintf("send_req(in=%d,out=%d)",plci->req_in,plci->req_out)); if(plci->nl_req || plci->sig_req) return; l = GET_WORD(&plci->RBuffer[plci->req_out]); plci->req_out += 2; plci->XData[0].P = &plci->RBuffer[plci->req_out]; plci->req_out += l; if(plci->RBuffer[plci->req_out]==1) { e = &plci->NL; plci->req_out++; e->Req = plci->nl_req = plci->RBuffer[plci->req_out++]; e->ReqCh = plci->RBuffer[plci->req_out++]; if(!(e->Id & 0x1f)) { e->Id = NL_ID; plci->RBuffer[plci->req_out-4] = CAI; plci->RBuffer[plci->req_out-3] = 1; plci->RBuffer[plci->req_out-2] = (plci->Sig.Id==0xff) ? 0 : plci->Sig.Id; plci->RBuffer[plci->req_out-1] = 0; l+=3; plci->nl_global_req = plci->nl_req; } dbug(1,dprintf("%x:NLREQ(%x:%x:%x)",plci->adapter->Id,e->Id,e->Req,e->ReqCh)); } else { e = &plci->Sig; if(plci->RBuffer[plci->req_out]) e->Id = plci->RBuffer[plci->req_out]; plci->req_out++; e->Req = plci->sig_req = plci->RBuffer[plci->req_out++]; e->ReqCh = plci->RBuffer[plci->req_out++]; if(!(e->Id & 0x1f)) plci->sig_global_req = plci->sig_req; dbug(1,dprintf("%x:SIGREQ(%x:%x:%x)",plci->adapter->Id,e->Id,e->Req,e->ReqCh)); } plci->XData[0].PLength = l; e->X = plci->XData; plci->adapter->request(e); dbug(1,dprintf("send_ok")); } static void send_data(PLCI *plci) { DIVA_CAPI_ADAPTER * a; DATA_B3_DESC * data; NCCI *ncci_ptr; word ncci; if (!plci->nl_req && plci->ncci_ring_list) { a = plci->adapter; ncci = plci->ncci_ring_list; do { ncci = a->ncci_next[ncci]; ncci_ptr = &(a->ncci[ncci]); if (!(a->ncci_ch[ncci] && (a->ch_flow_control[a->ncci_ch[ncci]] & N_OK_FC_PENDING))) { if (ncci_ptr->data_pending) { if ((a->ncci_state[ncci] == CONNECTED) || (a->ncci_state[ncci] == INC_ACT_PENDING) || (plci->send_disc == ncci)) { data = &(ncci_ptr->DBuffer[ncci_ptr->data_out]); if ((plci->B2_prot == B2_V120_ASYNC) || (plci->B2_prot == B2_V120_ASYNC_V42BIS) || (plci->B2_prot == B2_V120_BIT_TRANSPARENT)) { plci->NData[1].P = TransmitBufferGet (plci->appl, data->P); plci->NData[1].PLength = data->Length; if (data->Flags & 0x10) plci->NData[0].P = v120_break_header; else plci->NData[0].P = v120_default_header; plci->NData[0].PLength = 1 ; plci->NL.XNum = 2; plci->NL.Req = plci->nl_req = (byte)((data->Flags&0x07)<<4 |N_DATA); } else { plci->NData[0].P = TransmitBufferGet (plci->appl, data->P); plci->NData[0].PLength = data->Length; if (data->Flags & 0x10) plci->NL.Req = plci->nl_req = (byte)N_UDATA; else if ((plci->B3_prot == B3_RTP) && (data->Flags & 0x01)) plci->NL.Req = plci->nl_req = (byte)N_BDATA; else plci->NL.Req = plci->nl_req = (byte)((data->Flags&0x07)<<4 |N_DATA); } plci->NL.X = plci->NData; plci->NL.ReqCh = a->ncci_ch[ncci]; dbug(1,dprintf("%x:DREQ(%x:%x)",a->Id,plci->NL.Id,plci->NL.Req)); plci->data_sent = true; plci->data_sent_ptr = data->P; a->request(&plci->NL); } else { cleanup_ncci_data (plci, ncci); } } else if (plci->send_disc == ncci) { /* dprintf("N_DISC"); */ plci->NData[0].PLength = 0; plci->NL.ReqCh = a->ncci_ch[ncci]; plci->NL.Req = plci->nl_req = N_DISC; a->request(&plci->NL); plci->command = _DISCONNECT_B3_R; plci->send_disc = 0; } } } while (!plci->nl_req && (ncci != plci->ncci_ring_list)); plci->ncci_ring_list = ncci; } } static void listen_check(DIVA_CAPI_ADAPTER *a) { word i,j; PLCI * plci; byte activnotifiedcalls = 0; dbug(1,dprintf("listen_check(%d,%d)",a->listen_active,a->max_listen)); if (!remove_started && !a->adapter_disabled) { for(i=0;i<a->max_plci;i++) { plci = &(a->plci[i]); if(plci->notifiedcall) activnotifiedcalls++; } dbug(1,dprintf("listen_check(%d)",activnotifiedcalls)); for(i=a->listen_active; i < ((word)(a->max_listen+activnotifiedcalls)); i++) { if((j=get_plci(a))) { a->listen_active++; plci = &a->plci[j-1]; plci->State = LISTENING; add_p(plci,OAD,"\x01\xfd"); add_p(plci,KEY,"\x04\x43\x41\x32\x30"); add_p(plci,CAI,"\x01\xc0"); add_p(plci,UID,"\x06\x43\x61\x70\x69\x32\x30"); add_p(plci,LLI,"\x01\xc4"); /* support Dummy CR FAC + MWI + SpoofNotify */ add_p(plci,SHIFT|6,NULL); add_p(plci,SIN,"\x02\x00\x00"); plci->internal_command = LISTEN_SIG_ASSIGN_PEND; /* do indicate_req if OK */ sig_req(plci,ASSIGN,DSIG_ID); send_req(plci); } } } } /*------------------------------------------------------------------*/ /* functions for all parameters sent in INDs */ /*------------------------------------------------------------------*/ static void IndParse(PLCI *plci, word *parms_id, byte **parms, byte multiIEsize) { word ploc; /* points to current location within packet */ byte w; byte wlen; byte codeset,lock; byte * in; word i; word code; word mIEindex = 0; ploc = 0; codeset = 0; lock = 0; in = plci->Sig.RBuffer->P; for(i=0; i<parms_id[0]; i++) /* multiIE parms_id contains just the 1st */ { /* element but parms array is larger */ parms[i] = (byte *)""; } for(i=0; i<multiIEsize; i++) { parms[i] = (byte *)""; } while(ploc<plci->Sig.RBuffer->length-1) { /* read information element id and length */ w = in[ploc]; if(w & 0x80) { /* w &=0xf0; removed, cannot detect congestion levels */ /* upper 4 bit masked with w==SHIFT now */ wlen = 0; } else { wlen = (byte)(in[ploc+1]+1); } /* check if length valid (not exceeding end of packet) */ if((ploc+wlen) > 270) return ; if(lock & 0x80) lock &=0x7f; else codeset = lock; if((w&0xf0)==SHIFT) { codeset = in[ploc]; if(!(codeset & 0x08)) lock = (byte)(codeset & 7); codeset &=7; lock |=0x80; } else { if(w==ESC && wlen>=3) code = in[ploc+2] |0x800; else code = w; code |= (codeset<<8); for(i=1; i<parms_id[0]+1 && parms_id[i]!=code; i++); if(i<parms_id[0]+1) { if(!multiIEsize) { /* with multiIEs use next field index, */ mIEindex = i-1; /* with normal IEs use same index like parms_id */ } parms[mIEindex] = &in[ploc+1]; dbug(1,dprintf("mIE[%d]=0x%x",*parms[mIEindex],in[ploc])); if(parms_id[i]==OAD || parms_id[i]==CONN_NR || parms_id[i]==CAD) { if(in[ploc+2] &0x80) { in[ploc+0] = (byte)(in[ploc+1]+1); in[ploc+1] = (byte)(in[ploc+2] &0x7f); in[ploc+2] = 0x80; parms[mIEindex] = &in[ploc]; } } mIEindex++; /* effects multiIEs only */ } } ploc +=(wlen+1); } return ; } /*------------------------------------------------------------------*/ /* try to match a cip from received BC and HLC */ /*------------------------------------------------------------------*/ static byte ie_compare(byte *ie1, byte *ie2) { word i; if(!ie1 || ! ie2) return false; if(!ie1[0]) return false; for(i=0;i<(word)(ie1[0]+1);i++) if(ie1[i]!=ie2[i]) return false; return true; } static word find_cip(DIVA_CAPI_ADAPTER *a, byte *bc, byte *hlc) { word i; word j; for(i=9;i && !ie_compare(bc,cip_bc[i][a->u_law]);i--); for(j=16;j<29 && (!ie_compare(bc,cip_bc[j][a->u_law]) || !ie_compare(hlc,cip_hlc[j])); j++); if(j==29) return i; return j; } static byte AddInfo(byte **add_i, byte **fty_i, byte *esc_chi, byte *facility) { byte i; byte j; byte k; byte flen; byte len=0; /* facility is a nested structure */ /* FTY can be more than once */ if (esc_chi[0] && !(esc_chi[esc_chi[0]] & 0x7f)) { add_i[0] = (byte *)"\x02\x02\x00"; /* use neither b nor d channel */ } else { add_i[0] = (byte *)""; } if(!fty_i[0][0]) { add_i[3] = (byte *)""; } else { /* facility array found */ for(i=0,j=1;i<MAX_MULTI_IE && fty_i[i][0];i++) { dbug(1,dprintf("AddIFac[%d]",fty_i[i][0])); len += fty_i[i][0]; len += 2; flen=fty_i[i][0]; facility[j++]=0x1c; /* copy fac IE */ for(k=0;k<=flen;k++,j++) { facility[j]=fty_i[i][k]; /* dbug(1,dprintf("%x ",facility[j])); */ } } facility[0] = len; add_i[3] = facility; } /* dbug(1,dprintf("FacArrLen=%d ",len)); */ len = add_i[0][0]+add_i[1][0]+add_i[2][0]+add_i[3][0]; len += 4; /* calculate length of all */ return(len); } /*------------------------------------------------------------------*/ /* voice and codec features */ /*------------------------------------------------------------------*/ static void SetVoiceChannel(PLCI *plci, byte *chi, DIVA_CAPI_ADAPTER *a) { byte voice_chi[] = "\x02\x18\x01"; byte channel; channel = chi[chi[0]]&0x3; dbug(1,dprintf("ExtDevON(Ch=0x%x)",channel)); voice_chi[2] = (channel) ? channel : 1; add_p(plci,FTY,"\x02\x01\x07"); /* B On, default on 1 */ add_p(plci,ESC,voice_chi); /* Channel */ sig_req(plci,TEL_CTRL,0); send_req(plci); if(a->AdvSignalPLCI) { adv_voice_write_coefs (a->AdvSignalPLCI, ADV_VOICE_WRITE_ACTIVATION); } } static void VoiceChannelOff(PLCI *plci) { dbug(1,dprintf("ExtDevOFF")); add_p(plci,FTY,"\x02\x01\x08"); /* B Off */ sig_req(plci,TEL_CTRL,0); send_req(plci); if(plci->adapter->AdvSignalPLCI) { adv_voice_clear_config (plci->adapter->AdvSignalPLCI); } } static word AdvCodecSupport(DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, byte hook_listen) { word j; PLCI *splci; /* check if hardware supports handset with hook states (adv.codec) */ /* or if just a on board codec is supported */ /* the advanced codec plci is just for internal use */ /* diva Pro with on-board codec: */ if(a->profile.Global_Options & HANDSET) { /* new call, but hook states are already signalled */ if(a->AdvCodecFLAG) { if(a->AdvSignalAppl!=appl || a->AdvSignalPLCI) { dbug(1,dprintf("AdvSigPlci=0x%x",a->AdvSignalPLCI)); return 0x2001; /* codec in use by another application */ } if(plci!=NULL) { a->AdvSignalPLCI = plci; plci->tel=ADV_VOICE; } return 0; /* adv codec still used */ } if((j=get_plci(a))) { splci = &a->plci[j-1]; splci->tel = CODEC_PERMANENT; /* hook_listen indicates if a facility_req with handset/hook support */ /* was sent. Otherwise if just a call on an external device was made */ /* the codec will be used but the hook info will be discarded (just */ /* the external controller is in use */ if(hook_listen) splci->State = ADVANCED_VOICE_SIG; else { splci->State = ADVANCED_VOICE_NOSIG; if(plci) { plci->spoofed_msg = SPOOFING_REQUIRED; } /* indicate D-ch connect if */ } /* codec is connected OK */ if(plci!=NULL) { a->AdvSignalPLCI = plci; plci->tel=ADV_VOICE; } a->AdvSignalAppl = appl; a->AdvCodecFLAG = true; a->AdvCodecPLCI = splci; add_p(splci,CAI,"\x01\x15"); add_p(splci,LLI,"\x01\x00"); add_p(splci,ESC,"\x02\x18\x00"); add_p(splci,UID,"\x06\x43\x61\x70\x69\x32\x30"); splci->internal_command = PERM_COD_ASSIGN; dbug(1,dprintf("Codec Assign")); sig_req(splci,ASSIGN,DSIG_ID); send_req(splci); } else { return 0x2001; /* wrong state, no more plcis */ } } else if(a->profile.Global_Options & ON_BOARD_CODEC) { if(hook_listen) return 0x300B; /* Facility not supported */ /* no hook with SCOM */ if(plci!=NULL) plci->tel = CODEC; dbug(1,dprintf("S/SCOM codec")); /* first time we use the scom-s codec we must shut down the internal */ /* handset application of the card. This can be done by an assign with */ /* a cai with the 0x80 bit set. Assign return code is 'out of resource'*/ if(!a->scom_appl_disable){ if((j=get_plci(a))) { splci = &a->plci[j-1]; add_p(splci,CAI,"\x01\x80"); add_p(splci,UID,"\x06\x43\x61\x70\x69\x32\x30"); sig_req(splci,ASSIGN,0xC0); /* 0xc0 is the TEL_ID */ send_req(splci); a->scom_appl_disable = true; } else{ return 0x2001; /* wrong state, no more plcis */ } } } else return 0x300B; /* Facility not supported */ return 0; } static void CodecIdCheck(DIVA_CAPI_ADAPTER *a, PLCI *plci) { dbug(1,dprintf("CodecIdCheck")); if(a->AdvSignalPLCI == plci) { dbug(1,dprintf("PLCI owns codec")); VoiceChannelOff(a->AdvCodecPLCI); if(a->AdvCodecPLCI->State == ADVANCED_VOICE_NOSIG) { dbug(1,dprintf("remove temp codec PLCI")); plci_remove(a->AdvCodecPLCI); a->AdvCodecFLAG = 0; a->AdvCodecPLCI = NULL; a->AdvSignalAppl = NULL; } a->AdvSignalPLCI = NULL; } } /* ------------------------------------------------------------------- Ask for physical address of card on PCI bus ------------------------------------------------------------------- */ static void diva_ask_for_xdi_sdram_bar (DIVA_CAPI_ADAPTER * a, IDI_SYNC_REQ * preq) { a->sdram_bar = 0; if (diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR) { ENTITY * e = (ENTITY *)preq; e->user[0] = a->Id - 1; preq->xdi_sdram_bar.info.bar = 0; preq->xdi_sdram_bar.Req = 0; preq->xdi_sdram_bar.Rc = IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR; (*(a->request))(e); a->sdram_bar = preq->xdi_sdram_bar.info.bar; dbug(3,dprintf("A(%d) SDRAM BAR = %08x", a->Id, a->sdram_bar)); } } /* ------------------------------------------------------------------- Ask XDI about extended features ------------------------------------------------------------------- */ static void diva_get_extended_adapter_features (DIVA_CAPI_ADAPTER * a) { IDI_SYNC_REQ * preq; char buffer[ ((sizeof(preq->xdi_extended_features)+4) > sizeof(ENTITY)) ? (sizeof(preq->xdi_extended_features)+4) : sizeof(ENTITY)]; char features[4]; preq = (IDI_SYNC_REQ *)&buffer[0]; if (!diva_xdi_extended_features) { ENTITY * e = (ENTITY *)preq; diva_xdi_extended_features |= 0x80000000; e->user[0] = a->Id - 1; preq->xdi_extended_features.Req = 0; preq->xdi_extended_features.Rc = IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES; preq->xdi_extended_features.info.buffer_length_in_bytes = sizeof(features); preq->xdi_extended_features.info.features = &features[0]; (*(a->request))(e); if (features[0] & DIVA_XDI_EXTENDED_FEATURES_VALID) { /* Check features located in the byte '0' */ if (features[0] & DIVA_XDI_EXTENDED_FEATURE_CMA) { diva_xdi_extended_features |= DIVA_CAPI_USE_CMA; } if (features[0] & DIVA_XDI_EXTENDED_FEATURE_RX_DMA) { diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_RX_DMA; dbug(1,dprintf("XDI provides RxDMA")); } if (features[0] & DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR) { diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR; } if (features[0] & DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC) { diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_NO_CANCEL; dbug(3,dprintf("XDI provides NO_CANCEL_RC feature")); } } } diva_ask_for_xdi_sdram_bar (a, preq); } /*------------------------------------------------------------------*/ /* automatic law */ /*------------------------------------------------------------------*/ /* called from OS specific part after init time to get the Law */ /* a-law (Euro) and u-law (us,japan) use different BCs in the Setup message */ void AutomaticLaw(DIVA_CAPI_ADAPTER *a) { word j; PLCI *splci; if(a->automatic_law) { return; } if((j=get_plci(a))) { diva_get_extended_adapter_features (a); splci = &a->plci[j-1]; a->automatic_lawPLCI = splci; a->automatic_law = 1; add_p(splci,CAI,"\x01\x80"); add_p(splci,UID,"\x06\x43\x61\x70\x69\x32\x30"); splci->internal_command = USELAW_REQ; splci->command = 0; splci->number = 0; sig_req(splci,ASSIGN,DSIG_ID); send_req(splci); } } /* called from OS specific part if an application sends an Capi20Release */ word CapiRelease(word Id) { word i, j, appls_found; PLCI *plci; APPL *this; DIVA_CAPI_ADAPTER *a; if (!Id) { dbug(0,dprintf("A: CapiRelease(Id==0)")); return (_WRONG_APPL_ID); } this = &application[Id-1]; /* get application pointer */ for(i=0,appls_found=0; i<max_appl; i++) { if(application[i].Id) /* an application has been found */ { appls_found++; } } for(i=0; i<max_adapter; i++) /* scan all adapters... */ { a = &adapter[i]; if (a->request) { a->Info_Mask[Id-1] = 0; a->CIP_Mask[Id-1] = 0; a->Notification_Mask[Id-1] = 0; a->codec_listen[Id-1] = NULL; a->requested_options_table[Id-1] = 0; for(j=0; j<a->max_plci; j++) /* and all PLCIs connected */ { /* with this application */ plci = &a->plci[j]; if(plci->Id) /* if plci owns no application */ { /* it may be not jet connected */ if(plci->State==INC_CON_PENDING || plci->State==INC_CON_ALERT) { if(test_c_ind_mask_bit (plci, (word)(Id-1))) { clear_c_ind_mask_bit (plci, (word)(Id-1)); if(c_ind_mask_empty (plci)) { sig_req(plci,HANGUP,0); send_req(plci); plci->State = OUTG_DIS_PENDING; } } } if(test_c_ind_mask_bit (plci, (word)(Id-1))) { clear_c_ind_mask_bit (plci, (word)(Id-1)); if(c_ind_mask_empty (plci)) { if(!plci->appl) { plci_remove(plci); plci->State = IDLE; } } } if(plci->appl==this) { plci->appl = NULL; plci_remove(plci); plci->State = IDLE; } } } listen_check(a); if(a->flag_dynamic_l1_down) { if(appls_found==1) /* last application does a capi release */ { if((j=get_plci(a))) { plci = &a->plci[j-1]; plci->command = 0; add_p(plci,OAD,"\x01\xfd"); add_p(plci,CAI,"\x01\x80"); add_p(plci,UID,"\x06\x43\x61\x70\x69\x32\x30"); add_p(plci,SHIFT|6,NULL); add_p(plci,SIN,"\x02\x00\x00"); plci->internal_command = REM_L1_SIG_ASSIGN_PEND; sig_req(plci,ASSIGN,DSIG_ID); add_p(plci,FTY,"\x02\xff\x06"); /* l1 down */ sig_req(plci,SIG_CTRL,0); send_req(plci); } } } if(a->AdvSignalAppl==this) { this->NullCREnable = false; if (a->AdvCodecPLCI) { plci_remove(a->AdvCodecPLCI); a->AdvCodecPLCI->tel = 0; a->AdvCodecPLCI->adv_nl = 0; } a->AdvSignalAppl = NULL; a->AdvSignalPLCI = NULL; a->AdvCodecFLAG = 0; a->AdvCodecPLCI = NULL; } } } this->Id = 0; return GOOD; } static word plci_remove_check(PLCI *plci) { if(!plci) return true; if(!plci->NL.Id && c_ind_mask_empty (plci)) { if(plci->Sig.Id == 0xff) plci->Sig.Id = 0; if(!plci->Sig.Id) { dbug(1,dprintf("plci_remove_complete(%x)",plci->Id)); dbug(1,dprintf("tel=0x%x,Sig=0x%x",plci->tel,plci->Sig.Id)); if (plci->Id) { CodecIdCheck(plci->adapter, plci); clear_b1_config (plci); ncci_remove (plci, 0, false); plci_free_msg_in_queue (plci); channel_flow_control_remove (plci); plci->Id = 0; plci->State = IDLE; plci->channels = 0; plci->appl = NULL; plci->notifiedcall = 0; } listen_check(plci->adapter); return true; } } return false; } /*------------------------------------------------------------------*/ static byte plci_nl_busy (PLCI *plci) { /* only applicable for non-multiplexed protocols */ return (plci->nl_req || (plci->ncci_ring_list && plci->adapter->ncci_ch[plci->ncci_ring_list] && (plci->adapter->ch_flow_control[plci->adapter->ncci_ch[plci->ncci_ring_list]] & N_OK_FC_PENDING))); } /*------------------------------------------------------------------*/ /* DTMF facilities */ /*------------------------------------------------------------------*/ static struct { byte send_mask; byte listen_mask; byte character; byte code; } dtmf_digit_map[] = { { 0x01, 0x01, 0x23, DTMF_DIGIT_TONE_CODE_HASHMARK }, { 0x01, 0x01, 0x2a, DTMF_DIGIT_TONE_CODE_STAR }, { 0x01, 0x01, 0x30, DTMF_DIGIT_TONE_CODE_0 }, { 0x01, 0x01, 0x31, DTMF_DIGIT_TONE_CODE_1 }, { 0x01, 0x01, 0x32, DTMF_DIGIT_TONE_CODE_2 }, { 0x01, 0x01, 0x33, DTMF_DIGIT_TONE_CODE_3 }, { 0x01, 0x01, 0x34, DTMF_DIGIT_TONE_CODE_4 }, { 0x01, 0x01, 0x35, DTMF_DIGIT_TONE_CODE_5 }, { 0x01, 0x01, 0x36, DTMF_DIGIT_TONE_CODE_6 }, { 0x01, 0x01, 0x37, DTMF_DIGIT_TONE_CODE_7 }, { 0x01, 0x01, 0x38, DTMF_DIGIT_TONE_CODE_8 }, { 0x01, 0x01, 0x39, DTMF_DIGIT_TONE_CODE_9 }, { 0x01, 0x01, 0x41, DTMF_DIGIT_TONE_CODE_A }, { 0x01, 0x01, 0x42, DTMF_DIGIT_TONE_CODE_B }, { 0x01, 0x01, 0x43, DTMF_DIGIT_TONE_CODE_C }, { 0x01, 0x01, 0x44, DTMF_DIGIT_TONE_CODE_D }, { 0x01, 0x00, 0x61, DTMF_DIGIT_TONE_CODE_A }, { 0x01, 0x00, 0x62, DTMF_DIGIT_TONE_CODE_B }, { 0x01, 0x00, 0x63, DTMF_DIGIT_TONE_CODE_C }, { 0x01, 0x00, 0x64, DTMF_DIGIT_TONE_CODE_D }, { 0x04, 0x04, 0x80, DTMF_SIGNAL_NO_TONE }, { 0x00, 0x04, 0x81, DTMF_SIGNAL_UNIDENTIFIED_TONE }, { 0x04, 0x04, 0x82, DTMF_SIGNAL_DIAL_TONE }, { 0x04, 0x04, 0x83, DTMF_SIGNAL_PABX_INTERNAL_DIAL_TONE }, { 0x04, 0x04, 0x84, DTMF_SIGNAL_SPECIAL_DIAL_TONE }, { 0x04, 0x04, 0x85, DTMF_SIGNAL_SECOND_DIAL_TONE }, { 0x04, 0x04, 0x86, DTMF_SIGNAL_RINGING_TONE }, { 0x04, 0x04, 0x87, DTMF_SIGNAL_SPECIAL_RINGING_TONE }, { 0x04, 0x04, 0x88, DTMF_SIGNAL_BUSY_TONE }, { 0x04, 0x04, 0x89, DTMF_SIGNAL_CONGESTION_TONE }, { 0x04, 0x04, 0x8a, DTMF_SIGNAL_SPECIAL_INFORMATION_TONE }, { 0x04, 0x04, 0x8b, DTMF_SIGNAL_COMFORT_TONE }, { 0x04, 0x04, 0x8c, DTMF_SIGNAL_HOLD_TONE }, { 0x04, 0x04, 0x8d, DTMF_SIGNAL_RECORD_TONE }, { 0x04, 0x04, 0x8e, DTMF_SIGNAL_CALLER_WAITING_TONE }, { 0x04, 0x04, 0x8f, DTMF_SIGNAL_CALL_WAITING_TONE }, { 0x04, 0x04, 0x90, DTMF_SIGNAL_PAY_TONE }, { 0x04, 0x04, 0x91, DTMF_SIGNAL_POSITIVE_INDICATION_TONE }, { 0x04, 0x04, 0x92, DTMF_SIGNAL_NEGATIVE_INDICATION_TONE }, { 0x04, 0x04, 0x93, DTMF_SIGNAL_WARNING_TONE }, { 0x04, 0x04, 0x94, DTMF_SIGNAL_INTRUSION_TONE }, { 0x04, 0x04, 0x95, DTMF_SIGNAL_CALLING_CARD_SERVICE_TONE }, { 0x04, 0x04, 0x96, DTMF_SIGNAL_PAYPHONE_RECOGNITION_TONE }, { 0x04, 0x04, 0x97, DTMF_SIGNAL_CPE_ALERTING_SIGNAL }, { 0x04, 0x04, 0x98, DTMF_SIGNAL_OFF_HOOK_WARNING_TONE }, { 0x04, 0x04, 0xbf, DTMF_SIGNAL_INTERCEPT_TONE }, { 0x04, 0x04, 0xc0, DTMF_SIGNAL_MODEM_CALLING_TONE }, { 0x04, 0x04, 0xc1, DTMF_SIGNAL_FAX_CALLING_TONE }, { 0x04, 0x04, 0xc2, DTMF_SIGNAL_ANSWER_TONE }, { 0x04, 0x04, 0xc3, DTMF_SIGNAL_REVERSED_ANSWER_TONE }, { 0x04, 0x04, 0xc4, DTMF_SIGNAL_ANSAM_TONE }, { 0x04, 0x04, 0xc5, DTMF_SIGNAL_REVERSED_ANSAM_TONE }, { 0x04, 0x04, 0xc6, DTMF_SIGNAL_BELL103_ANSWER_TONE }, { 0x04, 0x04, 0xc7, DTMF_SIGNAL_FAX_FLAGS }, { 0x04, 0x04, 0xc8, DTMF_SIGNAL_G2_FAX_GROUP_ID }, { 0x00, 0x04, 0xc9, DTMF_SIGNAL_HUMAN_SPEECH }, { 0x04, 0x04, 0xca, DTMF_SIGNAL_ANSWERING_MACHINE_390 }, { 0x02, 0x02, 0xf1, DTMF_MF_DIGIT_TONE_CODE_1 }, { 0x02, 0x02, 0xf2, DTMF_MF_DIGIT_TONE_CODE_2 }, { 0x02, 0x02, 0xf3, DTMF_MF_DIGIT_TONE_CODE_3 }, { 0x02, 0x02, 0xf4, DTMF_MF_DIGIT_TONE_CODE_4 }, { 0x02, 0x02, 0xf5, DTMF_MF_DIGIT_TONE_CODE_5 }, { 0x02, 0x02, 0xf6, DTMF_MF_DIGIT_TONE_CODE_6 }, { 0x02, 0x02, 0xf7, DTMF_MF_DIGIT_TONE_CODE_7 }, { 0x02, 0x02, 0xf8, DTMF_MF_DIGIT_TONE_CODE_8 }, { 0x02, 0x02, 0xf9, DTMF_MF_DIGIT_TONE_CODE_9 }, { 0x02, 0x02, 0xfa, DTMF_MF_DIGIT_TONE_CODE_0 }, { 0x02, 0x02, 0xfb, DTMF_MF_DIGIT_TONE_CODE_K1 }, { 0x02, 0x02, 0xfc, DTMF_MF_DIGIT_TONE_CODE_K2 }, { 0x02, 0x02, 0xfd, DTMF_MF_DIGIT_TONE_CODE_KP }, { 0x02, 0x02, 0xfe, DTMF_MF_DIGIT_TONE_CODE_S1 }, { 0x02, 0x02, 0xff, DTMF_MF_DIGIT_TONE_CODE_ST }, }; #define DTMF_DIGIT_MAP_ENTRIES ARRAY_SIZE(dtmf_digit_map) static void dtmf_enable_receiver (PLCI *plci, byte enable_mask) { word min_digit_duration, min_gap_duration; dbug (1, dprintf ("[%06lx] %s,%d: dtmf_enable_receiver %02x", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__, enable_mask)); if (enable_mask != 0) { min_digit_duration = (plci->dtmf_rec_pulse_ms == 0) ? 40 : plci->dtmf_rec_pulse_ms; min_gap_duration = (plci->dtmf_rec_pause_ms == 0) ? 40 : plci->dtmf_rec_pause_ms; plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_ENABLE_RECEIVER; PUT_WORD (&plci->internal_req_buffer[1], min_digit_duration); PUT_WORD (&plci->internal_req_buffer[3], min_gap_duration); plci->NData[0].PLength = 5; PUT_WORD (&plci->internal_req_buffer[5], INTERNAL_IND_BUFFER_SIZE); plci->NData[0].PLength += 2; capidtmf_recv_enable (&(plci->capidtmf_state), min_digit_duration, min_gap_duration); } else { plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_DISABLE_RECEIVER; plci->NData[0].PLength = 1; capidtmf_recv_disable (&(plci->capidtmf_state)); } plci->NData[0].P = plci->internal_req_buffer; plci->NL.X = plci->NData; plci->NL.ReqCh = 0; plci->NL.Req = plci->nl_req = (byte) N_UDATA; plci->adapter->request (&plci->NL); } static void dtmf_send_digits (PLCI *plci, byte *digit_buffer, word digit_count) { word w, i; dbug (1, dprintf ("[%06lx] %s,%d: dtmf_send_digits %d", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__, digit_count)); plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_SEND_DIGITS; w = (plci->dtmf_send_pulse_ms == 0) ? 40 : plci->dtmf_send_pulse_ms; PUT_WORD (&plci->internal_req_buffer[1], w); w = (plci->dtmf_send_pause_ms == 0) ? 40 : plci->dtmf_send_pause_ms; PUT_WORD (&plci->internal_req_buffer[3], w); for (i = 0; i < digit_count; i++) { w = 0; while ((w < DTMF_DIGIT_MAP_ENTRIES) && (digit_buffer[i] != dtmf_digit_map[w].character)) { w++; } plci->internal_req_buffer[5+i] = (w < DTMF_DIGIT_MAP_ENTRIES) ? dtmf_digit_map[w].code : DTMF_DIGIT_TONE_CODE_STAR; } plci->NData[0].PLength = 5 + digit_count; plci->NData[0].P = plci->internal_req_buffer; plci->NL.X = plci->NData; plci->NL.ReqCh = 0; plci->NL.Req = plci->nl_req = (byte) N_UDATA; plci->adapter->request (&plci->NL); } static void dtmf_rec_clear_config (PLCI *plci) { dbug (1, dprintf ("[%06lx] %s,%d: dtmf_rec_clear_config", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); plci->dtmf_rec_active = 0; plci->dtmf_rec_pulse_ms = 0; plci->dtmf_rec_pause_ms = 0; capidtmf_init (&(plci->capidtmf_state), plci->adapter->u_law); } static void dtmf_send_clear_config (PLCI *plci) { dbug (1, dprintf ("[%06lx] %s,%d: dtmf_send_clear_config", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); plci->dtmf_send_requests = 0; plci->dtmf_send_pulse_ms = 0; plci->dtmf_send_pause_ms = 0; } static void dtmf_prepare_switch (dword Id, PLCI *plci) { dbug (1, dprintf ("[%06lx] %s,%d: dtmf_prepare_switch", UnMapId (Id), (char *)(FILE_), __LINE__)); while (plci->dtmf_send_requests != 0) dtmf_confirmation (Id, plci); } static word dtmf_save_config (dword Id, PLCI *plci, byte Rc) { dbug (1, dprintf ("[%06lx] %s,%d: dtmf_save_config %02x %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state)); return (GOOD); } static word dtmf_restore_config (dword Id, PLCI *plci, byte Rc) { word Info; dbug (1, dprintf ("[%06lx] %s,%d: dtmf_restore_config %02x %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state)); Info = GOOD; if (plci->B1_facilities & B1_FACILITY_DTMFR) { switch (plci->adjust_b_state) { case ADJUST_B_RESTORE_DTMF_1: plci->internal_command = plci->adjust_b_command; if (plci_nl_busy (plci)) { plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1; break; } dtmf_enable_receiver (plci, plci->dtmf_rec_active); plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_2; break; case ADJUST_B_RESTORE_DTMF_2: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: Reenable DTMF receiver failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _WRONG_STATE; break; } break; } } return (Info); } static void dtmf_command (dword Id, PLCI *plci, byte Rc) { word internal_command, Info; byte mask; byte result[4]; dbug (1, dprintf ("[%06lx] %s,%d: dtmf_command %02x %04x %04x %d %d %d %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command, plci->dtmf_cmd, plci->dtmf_rec_pulse_ms, plci->dtmf_rec_pause_ms, plci->dtmf_send_pulse_ms, plci->dtmf_send_pause_ms)); Info = GOOD; result[0] = 2; PUT_WORD (&result[1], DTMF_SUCCESS); internal_command = plci->internal_command; plci->internal_command = 0; mask = 0x01; switch (plci->dtmf_cmd) { case DTMF_LISTEN_TONE_START: mask <<= 1; case DTMF_LISTEN_MF_START: mask <<= 1; case DTMF_LISTEN_START: switch (internal_command) { default: adjust_b1_resource (Id, plci, NULL, (word)(plci->B1_facilities | B1_FACILITY_DTMFR), DTMF_COMMAND_1); case DTMF_COMMAND_1: if (adjust_b_process (Id, plci, Rc) != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: Load DTMF failed", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _FACILITY_NOT_SUPPORTED; break; } if (plci->internal_command) return; case DTMF_COMMAND_2: if (plci_nl_busy (plci)) { plci->internal_command = DTMF_COMMAND_2; return; } plci->internal_command = DTMF_COMMAND_3; dtmf_enable_receiver (plci, (byte)(plci->dtmf_rec_active | mask)); return; case DTMF_COMMAND_3: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: Enable DTMF receiver failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _FACILITY_NOT_SUPPORTED; break; } plci->tone_last_indication_code = DTMF_SIGNAL_NO_TONE; plci->dtmf_rec_active |= mask; break; } break; case DTMF_LISTEN_TONE_STOP: mask <<= 1; case DTMF_LISTEN_MF_STOP: mask <<= 1; case DTMF_LISTEN_STOP: switch (internal_command) { default: plci->dtmf_rec_active &= ~mask; if (plci->dtmf_rec_active) break; /* case DTMF_COMMAND_1: if (plci->dtmf_rec_active) { if (plci_nl_busy (plci)) { plci->internal_command = DTMF_COMMAND_1; return; } plci->dtmf_rec_active &= ~mask; plci->internal_command = DTMF_COMMAND_2; dtmf_enable_receiver (plci, false); return; } Rc = OK; case DTMF_COMMAND_2: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: Disable DTMF receiver failed %02x", UnMapId (Id), (char far *)(FILE_), __LINE__, Rc)); Info = _FACILITY_NOT_SUPPORTED; break; } */ adjust_b1_resource (Id, plci, NULL, (word)(plci->B1_facilities & ~(B1_FACILITY_DTMFX | B1_FACILITY_DTMFR)), DTMF_COMMAND_3); case DTMF_COMMAND_3: if (adjust_b_process (Id, plci, Rc) != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: Unload DTMF failed", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _FACILITY_NOT_SUPPORTED; break; } if (plci->internal_command) return; break; } break; case DTMF_SEND_TONE: mask <<= 1; case DTMF_SEND_MF: mask <<= 1; case DTMF_DIGITS_SEND: switch (internal_command) { default: adjust_b1_resource (Id, plci, NULL, (word)(plci->B1_facilities | ((plci->dtmf_parameter_length != 0) ? B1_FACILITY_DTMFX | B1_FACILITY_DTMFR : B1_FACILITY_DTMFX)), DTMF_COMMAND_1); case DTMF_COMMAND_1: if (adjust_b_process (Id, plci, Rc) != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: Load DTMF failed", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _FACILITY_NOT_SUPPORTED; break; } if (plci->internal_command) return; case DTMF_COMMAND_2: if (plci_nl_busy (plci)) { plci->internal_command = DTMF_COMMAND_2; return; } plci->dtmf_msg_number_queue[(plci->dtmf_send_requests)++] = plci->number; plci->internal_command = DTMF_COMMAND_3; dtmf_send_digits (plci, &plci->saved_msg.parms[3].info[1], plci->saved_msg.parms[3].length); return; case DTMF_COMMAND_3: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: Send DTMF digits failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); if (plci->dtmf_send_requests != 0) (plci->dtmf_send_requests)--; Info = _FACILITY_NOT_SUPPORTED; break; } return; } break; } sendf (plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->number, "wws", Info, SELECTOR_DTMF, result); } static byte dtmf_request (dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg) { word Info; word i, j; byte mask; API_PARSE dtmf_parms[5]; byte result[40]; dbug (1, dprintf ("[%06lx] %s,%d: dtmf_request", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = GOOD; result[0] = 2; PUT_WORD (&result[1], DTMF_SUCCESS); if (!(a->profile.Global_Options & GL_DTMF_SUPPORTED)) { dbug (1, dprintf ("[%06lx] %s,%d: Facility not supported", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _FACILITY_NOT_SUPPORTED; } else if (api_parse (&msg[1].info[1], msg[1].length, "w", dtmf_parms)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong message format", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_MESSAGE_FORMAT; } else if ((GET_WORD (dtmf_parms[0].info) == DTMF_GET_SUPPORTED_DETECT_CODES) || (GET_WORD (dtmf_parms[0].info) == DTMF_GET_SUPPORTED_SEND_CODES)) { if (!((a->requested_options_table[appl->Id-1]) & (1L << PRIVATE_DTMF_TONE))) { dbug (1, dprintf ("[%06lx] %s,%d: DTMF unknown request %04x", UnMapId (Id), (char *)(FILE_), __LINE__, GET_WORD (dtmf_parms[0].info))); PUT_WORD (&result[1], DTMF_UNKNOWN_REQUEST); } else { for (i = 0; i < 32; i++) result[4 + i] = 0; if (GET_WORD (dtmf_parms[0].info) == DTMF_GET_SUPPORTED_DETECT_CODES) { for (i = 0; i < DTMF_DIGIT_MAP_ENTRIES; i++) { if (dtmf_digit_map[i].listen_mask != 0) result[4 + (dtmf_digit_map[i].character >> 3)] |= (1 << (dtmf_digit_map[i].character & 0x7)); } } else { for (i = 0; i < DTMF_DIGIT_MAP_ENTRIES; i++) { if (dtmf_digit_map[i].send_mask != 0) result[4 + (dtmf_digit_map[i].character >> 3)] |= (1 << (dtmf_digit_map[i].character & 0x7)); } } result[0] = 3 + 32; result[3] = 32; } } else if (plci == NULL) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong PLCI", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_IDENTIFIER; } else { if (!plci->State || !plci->NL.Id || plci->nl_remove_id) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong state", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_STATE; } else { plci->command = 0; plci->dtmf_cmd = GET_WORD (dtmf_parms[0].info); mask = 0x01; switch (plci->dtmf_cmd) { case DTMF_LISTEN_TONE_START: case DTMF_LISTEN_TONE_STOP: mask <<= 1; case DTMF_LISTEN_MF_START: case DTMF_LISTEN_MF_STOP: mask <<= 1; if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[appl->Id-1]) & (1L << PRIVATE_DTMF_TONE))) { dbug (1, dprintf ("[%06lx] %s,%d: DTMF unknown request %04x", UnMapId (Id), (char *)(FILE_), __LINE__, GET_WORD (dtmf_parms[0].info))); PUT_WORD (&result[1], DTMF_UNKNOWN_REQUEST); break; } case DTMF_LISTEN_START: case DTMF_LISTEN_STOP: if (!(a->manufacturer_features & MANUFACTURER_FEATURE_HARDDTMF) && !(a->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE)) { dbug (1, dprintf ("[%06lx] %s,%d: Facility not supported", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _FACILITY_NOT_SUPPORTED; break; } if (mask & DTMF_LISTEN_ACTIVE_FLAG) { if (api_parse (&msg[1].info[1], msg[1].length, "wwws", dtmf_parms)) { plci->dtmf_rec_pulse_ms = 0; plci->dtmf_rec_pause_ms = 0; } else { plci->dtmf_rec_pulse_ms = GET_WORD (dtmf_parms[1].info); plci->dtmf_rec_pause_ms = GET_WORD (dtmf_parms[2].info); } } start_internal_command (Id, plci, dtmf_command); return (false); case DTMF_SEND_TONE: mask <<= 1; case DTMF_SEND_MF: mask <<= 1; if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[appl->Id-1]) & (1L << PRIVATE_DTMF_TONE))) { dbug (1, dprintf ("[%06lx] %s,%d: DTMF unknown request %04x", UnMapId (Id), (char *)(FILE_), __LINE__, GET_WORD (dtmf_parms[0].info))); PUT_WORD (&result[1], DTMF_UNKNOWN_REQUEST); break; } case DTMF_DIGITS_SEND: if (api_parse (&msg[1].info[1], msg[1].length, "wwws", dtmf_parms)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong message format", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_MESSAGE_FORMAT; break; } if (mask & DTMF_LISTEN_ACTIVE_FLAG) { plci->dtmf_send_pulse_ms = GET_WORD (dtmf_parms[1].info); plci->dtmf_send_pause_ms = GET_WORD (dtmf_parms[2].info); } i = 0; j = 0; while ((i < dtmf_parms[3].length) && (j < DTMF_DIGIT_MAP_ENTRIES)) { j = 0; while ((j < DTMF_DIGIT_MAP_ENTRIES) && ((dtmf_parms[3].info[i+1] != dtmf_digit_map[j].character) || ((dtmf_digit_map[j].send_mask & mask) == 0))) { j++; } i++; } if (j == DTMF_DIGIT_MAP_ENTRIES) { dbug (1, dprintf ("[%06lx] %s,%d: Incorrect DTMF digit %02x", UnMapId (Id), (char *)(FILE_), __LINE__, dtmf_parms[3].info[i])); PUT_WORD (&result[1], DTMF_INCORRECT_DIGIT); break; } if (plci->dtmf_send_requests >= ARRAY_SIZE(plci->dtmf_msg_number_queue)) { dbug (1, dprintf ("[%06lx] %s,%d: DTMF request overrun", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_STATE; break; } api_save_msg (dtmf_parms, "wwws", &plci->saved_msg); start_internal_command (Id, plci, dtmf_command); return (false); default: dbug (1, dprintf ("[%06lx] %s,%d: DTMF unknown request %04x", UnMapId (Id), (char *)(FILE_), __LINE__, plci->dtmf_cmd)); PUT_WORD (&result[1], DTMF_UNKNOWN_REQUEST); } } } sendf (appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number, "wws", Info, SELECTOR_DTMF, result); return (false); } static void dtmf_confirmation (dword Id, PLCI *plci) { word Info; word i; byte result[4]; dbug (1, dprintf ("[%06lx] %s,%d: dtmf_confirmation", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = GOOD; result[0] = 2; PUT_WORD (&result[1], DTMF_SUCCESS); if (plci->dtmf_send_requests != 0) { sendf (plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->dtmf_msg_number_queue[0], "wws", GOOD, SELECTOR_DTMF, result); (plci->dtmf_send_requests)--; for (i = 0; i < plci->dtmf_send_requests; i++) plci->dtmf_msg_number_queue[i] = plci->dtmf_msg_number_queue[i+1]; } } static void dtmf_indication (dword Id, PLCI *plci, byte *msg, word length) { word i, j, n; dbug (1, dprintf ("[%06lx] %s,%d: dtmf_indication", UnMapId (Id), (char *)(FILE_), __LINE__)); n = 0; for (i = 1; i < length; i++) { j = 0; while ((j < DTMF_DIGIT_MAP_ENTRIES) && ((msg[i] != dtmf_digit_map[j].code) || ((dtmf_digit_map[j].listen_mask & plci->dtmf_rec_active) == 0))) { j++; } if (j < DTMF_DIGIT_MAP_ENTRIES) { if ((dtmf_digit_map[j].listen_mask & DTMF_TONE_LISTEN_ACTIVE_FLAG) && (plci->tone_last_indication_code == DTMF_SIGNAL_NO_TONE) && (dtmf_digit_map[j].character != DTMF_SIGNAL_UNIDENTIFIED_TONE)) { if (n + 1 == i) { for (i = length; i > n + 1; i--) msg[i] = msg[i - 1]; length++; i++; } msg[++n] = DTMF_SIGNAL_UNIDENTIFIED_TONE; } plci->tone_last_indication_code = dtmf_digit_map[j].character; msg[++n] = dtmf_digit_map[j].character; } } if (n != 0) { msg[0] = (byte) n; sendf (plci->appl, _FACILITY_I, Id & 0xffffL, 0, "wS", SELECTOR_DTMF, msg); } } /*------------------------------------------------------------------*/ /* DTMF parameters */ /*------------------------------------------------------------------*/ static void dtmf_parameter_write (PLCI *plci) { word i; byte parameter_buffer[DTMF_PARAMETER_BUFFER_SIZE + 2]; dbug (1, dprintf ("[%06lx] %s,%d: dtmf_parameter_write", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); parameter_buffer[0] = plci->dtmf_parameter_length + 1; parameter_buffer[1] = DSP_CTRL_SET_DTMF_PARAMETERS; for (i = 0; i < plci->dtmf_parameter_length; i++) parameter_buffer[2+i] = plci->dtmf_parameter_buffer[i]; add_p (plci, FTY, parameter_buffer); sig_req (plci, TEL_CTRL, 0); send_req (plci); } static void dtmf_parameter_clear_config (PLCI *plci) { dbug (1, dprintf ("[%06lx] %s,%d: dtmf_parameter_clear_config", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); plci->dtmf_parameter_length = 0; } static void dtmf_parameter_prepare_switch (dword Id, PLCI *plci) { dbug (1, dprintf ("[%06lx] %s,%d: dtmf_parameter_prepare_switch", UnMapId (Id), (char *)(FILE_), __LINE__)); } static word dtmf_parameter_save_config (dword Id, PLCI *plci, byte Rc) { dbug (1, dprintf ("[%06lx] %s,%d: dtmf_parameter_save_config %02x %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state)); return (GOOD); } static word dtmf_parameter_restore_config (dword Id, PLCI *plci, byte Rc) { word Info; dbug (1, dprintf ("[%06lx] %s,%d: dtmf_parameter_restore_config %02x %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state)); Info = GOOD; if ((plci->B1_facilities & B1_FACILITY_DTMFR) && (plci->dtmf_parameter_length != 0)) { switch (plci->adjust_b_state) { case ADJUST_B_RESTORE_DTMF_PARAMETER_1: plci->internal_command = plci->adjust_b_command; if (plci->sig_req) { plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_1; break; } dtmf_parameter_write (plci); plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_2; break; case ADJUST_B_RESTORE_DTMF_PARAMETER_2: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: Restore DTMF parameters failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _WRONG_STATE; break; } break; } } return (Info); } /*------------------------------------------------------------------*/ /* Line interconnect facilities */ /*------------------------------------------------------------------*/ LI_CONFIG *li_config_table; word li_total_channels; /*------------------------------------------------------------------*/ /* translate a CHI information element to a channel number */ /* returns 0xff - any channel */ /* 0xfe - chi wrong coding */ /* 0xfd - D-channel */ /* 0x00 - no channel */ /* else channel number / PRI: timeslot */ /* if channels is provided we accept more than one channel. */ /*------------------------------------------------------------------*/ static byte chi_to_channel (byte *chi, dword *pchannelmap) { int p; int i; dword map; byte excl; byte ofs; byte ch; if (pchannelmap) *pchannelmap = 0; if(!chi[0]) return 0xff; excl = 0; if(chi[1] & 0x20) { if(chi[0]==1 && chi[1]==0xac) return 0xfd; /* exclusive d-channel */ for(i=1; i<chi[0] && !(chi[i] &0x80); i++); if(i==chi[0] || !(chi[i] &0x80)) return 0xfe; if((chi[1] |0xc8)!=0xe9) return 0xfe; if(chi[1] &0x08) excl = 0x40; /* int. id present */ if(chi[1] &0x40) { p=i+1; for(i=p; i<chi[0] && !(chi[i] &0x80); i++); if(i==chi[0] || !(chi[i] &0x80)) return 0xfe; } /* coding standard, Number/Map, Channel Type */ p=i+1; for(i=p; i<chi[0] && !(chi[i] &0x80); i++); if(i==chi[0] || !(chi[i] &0x80)) return 0xfe; if((chi[p]|0xd0)!=0xd3) return 0xfe; /* Number/Map */ if(chi[p] &0x10) { /* map */ if((chi[0]-p)==4) ofs = 0; else if((chi[0]-p)==3) ofs = 1; else return 0xfe; ch = 0; map = 0; for(i=0; i<4 && p<chi[0]; i++) { p++; ch += 8; map <<= 8; if(chi[p]) { for (ch=0; !(chi[p] & (1 << ch)); ch++); map |= chi[p]; } } ch += ofs; map <<= ofs; } else { /* number */ p=i+1; ch = chi[p] &0x3f; if(pchannelmap) { if((byte)(chi[0]-p)>30) return 0xfe; map = 0; for(i=p; i<=chi[0]; i++) { if ((chi[i] &0x7f) > 31) return 0xfe; map |= (1L << (chi[i] &0x7f)); } } else { if(p!=chi[0]) return 0xfe; if (ch > 31) return 0xfe; map = (1L << ch); } if(chi[p] &0x40) return 0xfe; } if (pchannelmap) *pchannelmap = map; else if (map != ((dword)(1L << ch))) return 0xfe; return (byte)(excl | ch); } else { /* not PRI */ for(i=1; i<chi[0] && !(chi[i] &0x80); i++); if(i!=chi[0] || !(chi[i] &0x80)) return 0xfe; if(chi[1] &0x08) excl = 0x40; switch(chi[1] |0x98) { case 0x98: return 0; case 0x99: if (pchannelmap) *pchannelmap = 2; return excl |1; case 0x9a: if (pchannelmap) *pchannelmap = 4; return excl |2; case 0x9b: return 0xff; case 0x9c: return 0xfd; /* d-ch */ default: return 0xfe; } } } static void mixer_set_bchannel_id_esc (PLCI *plci, byte bchannel_id) { DIVA_CAPI_ADAPTER *a; PLCI *splci; byte old_id; a = plci->adapter; old_id = plci->li_bchannel_id; if (a->li_pri) { if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci)) li_config_table[a->li_base + (old_id - 1)].plci = NULL; plci->li_bchannel_id = (bchannel_id & 0x1f) + 1; if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL) li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci; } else { if (((bchannel_id & 0x03) == 1) || ((bchannel_id & 0x03) == 2)) { if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci)) li_config_table[a->li_base + (old_id - 1)].plci = NULL; plci->li_bchannel_id = bchannel_id & 0x03; if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI != plci) && (a->AdvSignalPLCI->tel == ADV_VOICE)) { splci = a->AdvSignalPLCI; if (li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci == NULL) { if ((splci->li_bchannel_id != 0) && (li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci == splci)) { li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci = NULL; } splci->li_bchannel_id = 3 - plci->li_bchannel_id; li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci = splci; dbug (1, dprintf ("[%06lx] %s,%d: adv_voice_set_bchannel_id_esc %d", (dword)((splci->Id << 8) | UnMapController (splci->adapter->Id)), (char *)(FILE_), __LINE__, splci->li_bchannel_id)); } } if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL) li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci; } } if ((old_id == 0) && (plci->li_bchannel_id != 0) && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci)) { mixer_clear_config (plci); } dbug (1, dprintf ("[%06lx] %s,%d: mixer_set_bchannel_id_esc %d %d", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__, bchannel_id, plci->li_bchannel_id)); } static void mixer_set_bchannel_id (PLCI *plci, byte *chi) { DIVA_CAPI_ADAPTER *a; PLCI *splci; byte ch, old_id; a = plci->adapter; old_id = plci->li_bchannel_id; ch = chi_to_channel (chi, NULL); if (!(ch & 0x80)) { if (a->li_pri) { if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci)) li_config_table[a->li_base + (old_id - 1)].plci = NULL; plci->li_bchannel_id = (ch & 0x1f) + 1; if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL) li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci; } else { if (((ch & 0x1f) == 1) || ((ch & 0x1f) == 2)) { if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci)) li_config_table[a->li_base + (old_id - 1)].plci = NULL; plci->li_bchannel_id = ch & 0x1f; if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI != plci) && (a->AdvSignalPLCI->tel == ADV_VOICE)) { splci = a->AdvSignalPLCI; if (li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci == NULL) { if ((splci->li_bchannel_id != 0) && (li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci == splci)) { li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci = NULL; } splci->li_bchannel_id = 3 - plci->li_bchannel_id; li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci = splci; dbug (1, dprintf ("[%06lx] %s,%d: adv_voice_set_bchannel_id %d", (dword)((splci->Id << 8) | UnMapController (splci->adapter->Id)), (char *)(FILE_), __LINE__, splci->li_bchannel_id)); } } if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL) li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci; } } } if ((old_id == 0) && (plci->li_bchannel_id != 0) && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci)) { mixer_clear_config (plci); } dbug (1, dprintf ("[%06lx] %s,%d: mixer_set_bchannel_id %02x %d", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__, ch, plci->li_bchannel_id)); } #define MIXER_MAX_DUMP_CHANNELS 34 static void mixer_calculate_coefs (DIVA_CAPI_ADAPTER *a) { static char hex_digit_table[0x10] = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; word n, i, j; char *p; char hex_line[2 * MIXER_MAX_DUMP_CHANNELS + MIXER_MAX_DUMP_CHANNELS / 8 + 4]; dbug (1, dprintf ("[%06lx] %s,%d: mixer_calculate_coefs", (dword)(UnMapController (a->Id)), (char *)(FILE_), __LINE__)); for (i = 0; i < li_total_channels; i++) { li_config_table[i].channel &= LI_CHANNEL_ADDRESSES_SET; if (li_config_table[i].chflags != 0) li_config_table[i].channel |= LI_CHANNEL_INVOLVED; else { for (j = 0; j < li_total_channels; j++) { if (((li_config_table[i].flag_table[j]) != 0) || ((li_config_table[j].flag_table[i]) != 0)) { li_config_table[i].channel |= LI_CHANNEL_INVOLVED; } if (((li_config_table[i].flag_table[j] & LI_FLAG_CONFERENCE) != 0) || ((li_config_table[j].flag_table[i] & LI_FLAG_CONFERENCE) != 0)) { li_config_table[i].channel |= LI_CHANNEL_CONFERENCE; } } } } for (i = 0; i < li_total_channels; i++) { for (j = 0; j < li_total_channels; j++) { li_config_table[i].coef_table[j] &= ~(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC); if (li_config_table[i].flag_table[j] & LI_FLAG_CONFERENCE) li_config_table[i].coef_table[j] |= LI_COEF_CH_CH; } } for (n = 0; n < li_total_channels; n++) { if (li_config_table[n].channel & LI_CHANNEL_CONFERENCE) { for (i = 0; i < li_total_channels; i++) { if (li_config_table[i].channel & LI_CHANNEL_CONFERENCE) { for (j = 0; j < li_total_channels; j++) { li_config_table[i].coef_table[j] |= li_config_table[i].coef_table[n] & li_config_table[n].coef_table[j]; } } } } } for (i = 0; i < li_total_channels; i++) { if (li_config_table[i].channel & LI_CHANNEL_INVOLVED) { li_config_table[i].coef_table[i] &= ~LI_COEF_CH_CH; for (j = 0; j < li_total_channels; j++) { if (li_config_table[i].coef_table[j] & LI_COEF_CH_CH) li_config_table[i].flag_table[j] |= LI_FLAG_CONFERENCE; } if (li_config_table[i].flag_table[i] & LI_FLAG_CONFERENCE) li_config_table[i].coef_table[i] |= LI_COEF_CH_CH; } } for (i = 0; i < li_total_channels; i++) { if (li_config_table[i].channel & LI_CHANNEL_INVOLVED) { for (j = 0; j < li_total_channels; j++) { if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT) li_config_table[i].coef_table[j] |= LI_COEF_CH_CH; if (li_config_table[i].flag_table[j] & LI_FLAG_MONITOR) li_config_table[i].coef_table[j] |= LI_COEF_CH_PC; if (li_config_table[i].flag_table[j] & LI_FLAG_MIX) li_config_table[i].coef_table[j] |= LI_COEF_PC_CH; if (li_config_table[i].flag_table[j] & LI_FLAG_PCCONNECT) li_config_table[i].coef_table[j] |= LI_COEF_PC_PC; } if (li_config_table[i].chflags & LI_CHFLAG_MONITOR) { for (j = 0; j < li_total_channels; j++) { if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT) { li_config_table[i].coef_table[j] |= LI_COEF_CH_PC; if (li_config_table[j].chflags & LI_CHFLAG_MIX) li_config_table[i].coef_table[j] |= LI_COEF_PC_CH | LI_COEF_PC_PC; } } } if (li_config_table[i].chflags & LI_CHFLAG_MIX) { for (j = 0; j < li_total_channels; j++) { if (li_config_table[j].flag_table[i] & LI_FLAG_INTERCONNECT) li_config_table[j].coef_table[i] |= LI_COEF_PC_CH; } } if (li_config_table[i].chflags & LI_CHFLAG_LOOP) { for (j = 0; j < li_total_channels; j++) { if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT) { for (n = 0; n < li_total_channels; n++) { if (li_config_table[n].flag_table[i] & LI_FLAG_INTERCONNECT) { li_config_table[n].coef_table[j] |= LI_COEF_CH_CH; if (li_config_table[j].chflags & LI_CHFLAG_MIX) { li_config_table[n].coef_table[j] |= LI_COEF_PC_CH; if (li_config_table[n].chflags & LI_CHFLAG_MONITOR) li_config_table[n].coef_table[j] |= LI_COEF_CH_PC | LI_COEF_PC_PC; } else if (li_config_table[n].chflags & LI_CHFLAG_MONITOR) li_config_table[n].coef_table[j] |= LI_COEF_CH_PC; } } } } } } } for (i = 0; i < li_total_channels; i++) { if (li_config_table[i].channel & LI_CHANNEL_INVOLVED) { if (li_config_table[i].chflags & (LI_CHFLAG_MONITOR | LI_CHFLAG_MIX | LI_CHFLAG_LOOP)) li_config_table[i].channel |= LI_CHANNEL_ACTIVE; if (li_config_table[i].chflags & LI_CHFLAG_MONITOR) li_config_table[i].channel |= LI_CHANNEL_RX_DATA; if (li_config_table[i].chflags & LI_CHFLAG_MIX) li_config_table[i].channel |= LI_CHANNEL_TX_DATA; for (j = 0; j < li_total_channels; j++) { if ((li_config_table[i].flag_table[j] & (LI_FLAG_INTERCONNECT | LI_FLAG_PCCONNECT | LI_FLAG_CONFERENCE | LI_FLAG_MONITOR)) || (li_config_table[j].flag_table[i] & (LI_FLAG_INTERCONNECT | LI_FLAG_PCCONNECT | LI_FLAG_CONFERENCE | LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX))) { li_config_table[i].channel |= LI_CHANNEL_ACTIVE; } if (li_config_table[i].flag_table[j] & (LI_FLAG_PCCONNECT | LI_FLAG_MONITOR)) li_config_table[i].channel |= LI_CHANNEL_RX_DATA; if (li_config_table[j].flag_table[i] & (LI_FLAG_PCCONNECT | LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX)) li_config_table[i].channel |= LI_CHANNEL_TX_DATA; } if (!(li_config_table[i].channel & LI_CHANNEL_ACTIVE)) { li_config_table[i].coef_table[i] |= LI_COEF_PC_CH | LI_COEF_CH_PC; li_config_table[i].channel |= LI_CHANNEL_TX_DATA | LI_CHANNEL_RX_DATA; } } } for (i = 0; i < li_total_channels; i++) { if (li_config_table[i].channel & LI_CHANNEL_INVOLVED) { j = 0; while ((j < li_total_channels) && !(li_config_table[i].flag_table[j] & LI_FLAG_ANNOUNCEMENT)) j++; if (j < li_total_channels) { for (j = 0; j < li_total_channels; j++) { li_config_table[i].coef_table[j] &= ~(LI_COEF_CH_CH | LI_COEF_PC_CH); if (li_config_table[i].flag_table[j] & LI_FLAG_ANNOUNCEMENT) li_config_table[i].coef_table[j] |= LI_COEF_PC_CH; } } } } n = li_total_channels; if (n > MIXER_MAX_DUMP_CHANNELS) n = MIXER_MAX_DUMP_CHANNELS; p = hex_line; for (j = 0; j < n; j++) { if ((j & 0x7) == 0) *(p++) = ' '; *(p++) = hex_digit_table[li_config_table[j].curchnl >> 4]; *(p++) = hex_digit_table[li_config_table[j].curchnl & 0xf]; } *p = '\0'; dbug (1, dprintf ("[%06lx] CURRENT %s", (dword)(UnMapController (a->Id)), (char *) hex_line)); p = hex_line; for (j = 0; j < n; j++) { if ((j & 0x7) == 0) *(p++) = ' '; *(p++) = hex_digit_table[li_config_table[j].channel >> 4]; *(p++) = hex_digit_table[li_config_table[j].channel & 0xf]; } *p = '\0'; dbug (1, dprintf ("[%06lx] CHANNEL %s", (dword)(UnMapController (a->Id)), (char *) hex_line)); p = hex_line; for (j = 0; j < n; j++) { if ((j & 0x7) == 0) *(p++) = ' '; *(p++) = hex_digit_table[li_config_table[j].chflags >> 4]; *(p++) = hex_digit_table[li_config_table[j].chflags & 0xf]; } *p = '\0'; dbug (1, dprintf ("[%06lx] CHFLAG %s", (dword)(UnMapController (a->Id)), (char *) hex_line)); for (i = 0; i < n; i++) { p = hex_line; for (j = 0; j < n; j++) { if ((j & 0x7) == 0) *(p++) = ' '; *(p++) = hex_digit_table[li_config_table[i].flag_table[j] >> 4]; *(p++) = hex_digit_table[li_config_table[i].flag_table[j] & 0xf]; } *p = '\0'; dbug (1, dprintf ("[%06lx] FLAG[%02x]%s", (dword)(UnMapController (a->Id)), i, (char *) hex_line)); } for (i = 0; i < n; i++) { p = hex_line; for (j = 0; j < n; j++) { if ((j & 0x7) == 0) *(p++) = ' '; *(p++) = hex_digit_table[li_config_table[i].coef_table[j] >> 4]; *(p++) = hex_digit_table[li_config_table[i].coef_table[j] & 0xf]; } *p = '\0'; dbug (1, dprintf ("[%06lx] COEF[%02x]%s", (dword)(UnMapController (a->Id)), i, (char *) hex_line)); } } static struct { byte mask; byte line_flags; } mixer_write_prog_pri[] = { { LI_COEF_CH_CH, 0 }, { LI_COEF_CH_PC, MIXER_COEF_LINE_TO_PC_FLAG }, { LI_COEF_PC_CH, MIXER_COEF_LINE_FROM_PC_FLAG }, { LI_COEF_PC_PC, MIXER_COEF_LINE_TO_PC_FLAG | MIXER_COEF_LINE_FROM_PC_FLAG } }; static struct { byte from_ch; byte to_ch; byte mask; byte xconnect_override; } mixer_write_prog_bri[] = { { 0, 0, LI_COEF_CH_CH, 0x01 }, /* B to B */ { 1, 0, LI_COEF_CH_CH, 0x01 }, /* Alt B to B */ { 0, 0, LI_COEF_PC_CH, 0x80 }, /* PC to B */ { 1, 0, LI_COEF_PC_CH, 0x01 }, /* Alt PC to B */ { 2, 0, LI_COEF_CH_CH, 0x00 }, /* IC to B */ { 3, 0, LI_COEF_CH_CH, 0x00 }, /* Alt IC to B */ { 0, 0, LI_COEF_CH_PC, 0x80 }, /* B to PC */ { 1, 0, LI_COEF_CH_PC, 0x01 }, /* Alt B to PC */ { 0, 0, LI_COEF_PC_PC, 0x01 }, /* PC to PC */ { 1, 0, LI_COEF_PC_PC, 0x01 }, /* Alt PC to PC */ { 2, 0, LI_COEF_CH_PC, 0x00 }, /* IC to PC */ { 3, 0, LI_COEF_CH_PC, 0x00 }, /* Alt IC to PC */ { 0, 2, LI_COEF_CH_CH, 0x00 }, /* B to IC */ { 1, 2, LI_COEF_CH_CH, 0x00 }, /* Alt B to IC */ { 0, 2, LI_COEF_PC_CH, 0x00 }, /* PC to IC */ { 1, 2, LI_COEF_PC_CH, 0x00 }, /* Alt PC to IC */ { 2, 2, LI_COEF_CH_CH, 0x00 }, /* IC to IC */ { 3, 2, LI_COEF_CH_CH, 0x00 }, /* Alt IC to IC */ { 1, 1, LI_COEF_CH_CH, 0x01 }, /* Alt B to Alt B */ { 0, 1, LI_COEF_CH_CH, 0x01 }, /* B to Alt B */ { 1, 1, LI_COEF_PC_CH, 0x80 }, /* Alt PC to Alt B */ { 0, 1, LI_COEF_PC_CH, 0x01 }, /* PC to Alt B */ { 3, 1, LI_COEF_CH_CH, 0x00 }, /* Alt IC to Alt B */ { 2, 1, LI_COEF_CH_CH, 0x00 }, /* IC to Alt B */ { 1, 1, LI_COEF_CH_PC, 0x80 }, /* Alt B to Alt PC */ { 0, 1, LI_COEF_CH_PC, 0x01 }, /* B to Alt PC */ { 1, 1, LI_COEF_PC_PC, 0x01 }, /* Alt PC to Alt PC */ { 0, 1, LI_COEF_PC_PC, 0x01 }, /* PC to Alt PC */ { 3, 1, LI_COEF_CH_PC, 0x00 }, /* Alt IC to Alt PC */ { 2, 1, LI_COEF_CH_PC, 0x00 }, /* IC to Alt PC */ { 1, 3, LI_COEF_CH_CH, 0x00 }, /* Alt B to Alt IC */ { 0, 3, LI_COEF_CH_CH, 0x00 }, /* B to Alt IC */ { 1, 3, LI_COEF_PC_CH, 0x00 }, /* Alt PC to Alt IC */ { 0, 3, LI_COEF_PC_CH, 0x00 }, /* PC to Alt IC */ { 3, 3, LI_COEF_CH_CH, 0x00 }, /* Alt IC to Alt IC */ { 2, 3, LI_COEF_CH_CH, 0x00 } /* IC to Alt IC */ }; static byte mixer_swapped_index_bri[] = { 18, /* B to B */ 19, /* Alt B to B */ 20, /* PC to B */ 21, /* Alt PC to B */ 22, /* IC to B */ 23, /* Alt IC to B */ 24, /* B to PC */ 25, /* Alt B to PC */ 26, /* PC to PC */ 27, /* Alt PC to PC */ 28, /* IC to PC */ 29, /* Alt IC to PC */ 30, /* B to IC */ 31, /* Alt B to IC */ 32, /* PC to IC */ 33, /* Alt PC to IC */ 34, /* IC to IC */ 35, /* Alt IC to IC */ 0, /* Alt B to Alt B */ 1, /* B to Alt B */ 2, /* Alt PC to Alt B */ 3, /* PC to Alt B */ 4, /* Alt IC to Alt B */ 5, /* IC to Alt B */ 6, /* Alt B to Alt PC */ 7, /* B to Alt PC */ 8, /* Alt PC to Alt PC */ 9, /* PC to Alt PC */ 10, /* Alt IC to Alt PC */ 11, /* IC to Alt PC */ 12, /* Alt B to Alt IC */ 13, /* B to Alt IC */ 14, /* Alt PC to Alt IC */ 15, /* PC to Alt IC */ 16, /* Alt IC to Alt IC */ 17 /* IC to Alt IC */ }; static struct { byte mask; byte from_pc; byte to_pc; } xconnect_write_prog[] = { { LI_COEF_CH_CH, false, false }, { LI_COEF_CH_PC, false, true }, { LI_COEF_PC_CH, true, false }, { LI_COEF_PC_PC, true, true } }; static void xconnect_query_addresses (PLCI *plci) { DIVA_CAPI_ADAPTER *a; word w, ch; byte *p; dbug (1, dprintf ("[%06lx] %s,%d: xconnect_query_addresses", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); a = plci->adapter; if (a->li_pri && ((plci->li_bchannel_id == 0) || (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci))) { dbug (1, dprintf ("[%06x] %s,%d: Channel id wiped out", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); return; } p = plci->internal_req_buffer; ch = (a->li_pri) ? plci->li_bchannel_id - 1 : 0; *(p++) = UDATA_REQUEST_XCONNECT_FROM; w = ch; *(p++) = (byte) w; *(p++) = (byte)(w >> 8); w = ch | XCONNECT_CHANNEL_PORT_PC; *(p++) = (byte) w; *(p++) = (byte)(w >> 8); plci->NData[0].P = plci->internal_req_buffer; plci->NData[0].PLength = p - plci->internal_req_buffer; plci->NL.X = plci->NData; plci->NL.ReqCh = 0; plci->NL.Req = plci->nl_req = (byte) N_UDATA; plci->adapter->request (&plci->NL); } static void xconnect_write_coefs (PLCI *plci, word internal_command) { dbug (1, dprintf ("[%06lx] %s,%d: xconnect_write_coefs %04x", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__, internal_command)); plci->li_write_command = internal_command; plci->li_write_channel = 0; } static byte xconnect_write_coefs_process (dword Id, PLCI *plci, byte Rc) { DIVA_CAPI_ADAPTER *a; word w, n, i, j, r, s, to_ch; dword d; byte *p; struct xconnect_transfer_address_s *transfer_address; byte ch_map[MIXER_CHANNELS_BRI]; dbug (1, dprintf ("[%06x] %s,%d: xconnect_write_coefs_process %02x %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->li_write_channel)); a = plci->adapter; if ((plci->li_bchannel_id == 0) || (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci)) { dbug (1, dprintf ("[%06x] %s,%d: Channel id wiped out", UnMapId (Id), (char *)(FILE_), __LINE__)); return (true); } i = a->li_base + (plci->li_bchannel_id - 1); j = plci->li_write_channel; p = plci->internal_req_buffer; if (j != 0) { if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: LI write coefs failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); return (false); } } if (li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT) { r = 0; s = 0; if (j < li_total_channels) { if (li_config_table[i].channel & LI_CHANNEL_ADDRESSES_SET) { s = ((li_config_table[i].send_b.card_address.low | li_config_table[i].send_b.card_address.high) ? (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_PC | LI_COEF_PC_PC)) & ((li_config_table[i].send_pc.card_address.low | li_config_table[i].send_pc.card_address.high) ? (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_PC_CH)); } r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4)); while ((j < li_total_channels) && ((r == 0) || (!(li_config_table[j].channel & LI_CHANNEL_ADDRESSES_SET)) || (!li_config_table[j].adapter->li_pri && (j >= li_config_table[j].adapter->li_base + MIXER_BCHANNELS_BRI)) || (((li_config_table[j].send_b.card_address.low != li_config_table[i].send_b.card_address.low) || (li_config_table[j].send_b.card_address.high != li_config_table[i].send_b.card_address.high)) && (!(a->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT) || !(li_config_table[j].adapter->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT))) || ((li_config_table[j].adapter->li_base != a->li_base) && !(r & s & ((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ? (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) & ((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ? (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC)))))) { j++; if (j < li_total_channels) r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4)); } } if (j < li_total_channels) { plci->internal_command = plci->li_write_command; if (plci_nl_busy (plci)) return (true); to_ch = (a->li_pri) ? plci->li_bchannel_id - 1 : 0; *(p++) = UDATA_REQUEST_XCONNECT_TO; do { if (li_config_table[j].adapter->li_base != a->li_base) { r &= s & ((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ? (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) & ((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ? (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC)); } n = 0; do { if (r & xconnect_write_prog[n].mask) { if (xconnect_write_prog[n].from_pc) transfer_address = &(li_config_table[j].send_pc); else transfer_address = &(li_config_table[j].send_b); d = transfer_address->card_address.low; *(p++) = (byte) d; *(p++) = (byte)(d >> 8); *(p++) = (byte)(d >> 16); *(p++) = (byte)(d >> 24); d = transfer_address->card_address.high; *(p++) = (byte) d; *(p++) = (byte)(d >> 8); *(p++) = (byte)(d >> 16); *(p++) = (byte)(d >> 24); d = transfer_address->offset; *(p++) = (byte) d; *(p++) = (byte)(d >> 8); *(p++) = (byte)(d >> 16); *(p++) = (byte)(d >> 24); w = xconnect_write_prog[n].to_pc ? to_ch | XCONNECT_CHANNEL_PORT_PC : to_ch; *(p++) = (byte) w; *(p++) = (byte)(w >> 8); w = ((li_config_table[i].coef_table[j] & xconnect_write_prog[n].mask) == 0) ? 0x01 : (li_config_table[i].adapter->u_law ? (li_config_table[j].adapter->u_law ? 0x80 : 0x86) : (li_config_table[j].adapter->u_law ? 0x7a : 0x80)); *(p++) = (byte) w; *(p++) = (byte) 0; li_config_table[i].coef_table[j] ^= xconnect_write_prog[n].mask << 4; } n++; } while ((n < ARRAY_SIZE(xconnect_write_prog)) && ((p - plci->internal_req_buffer) + 16 < INTERNAL_REQ_BUFFER_SIZE)); if (n == ARRAY_SIZE(xconnect_write_prog)) { do { j++; if (j < li_total_channels) r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4)); } while ((j < li_total_channels) && ((r == 0) || (!(li_config_table[j].channel & LI_CHANNEL_ADDRESSES_SET)) || (!li_config_table[j].adapter->li_pri && (j >= li_config_table[j].adapter->li_base + MIXER_BCHANNELS_BRI)) || (((li_config_table[j].send_b.card_address.low != li_config_table[i].send_b.card_address.low) || (li_config_table[j].send_b.card_address.high != li_config_table[i].send_b.card_address.high)) && (!(a->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT) || !(li_config_table[j].adapter->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT))) || ((li_config_table[j].adapter->li_base != a->li_base) && !(r & s & ((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ? (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) & ((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ? (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC)))))); } } while ((j < li_total_channels) && ((p - plci->internal_req_buffer) + 16 < INTERNAL_REQ_BUFFER_SIZE)); } else if (j == li_total_channels) { plci->internal_command = plci->li_write_command; if (plci_nl_busy (plci)) return (true); if (a->li_pri) { *(p++) = UDATA_REQUEST_SET_MIXER_COEFS_PRI_SYNC; w = 0; if (li_config_table[i].channel & LI_CHANNEL_TX_DATA) w |= MIXER_FEATURE_ENABLE_TX_DATA; if (li_config_table[i].channel & LI_CHANNEL_RX_DATA) w |= MIXER_FEATURE_ENABLE_RX_DATA; *(p++) = (byte) w; *(p++) = (byte)(w >> 8); } else { *(p++) = UDATA_REQUEST_SET_MIXER_COEFS_BRI; w = 0; if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI) && (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length)) { w = GET_WORD (a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE); } if (li_config_table[i].channel & LI_CHANNEL_TX_DATA) w |= MIXER_FEATURE_ENABLE_TX_DATA; if (li_config_table[i].channel & LI_CHANNEL_RX_DATA) w |= MIXER_FEATURE_ENABLE_RX_DATA; *(p++) = (byte) w; *(p++) = (byte)(w >> 8); for (j = 0; j < sizeof(ch_map); j += 2) { if (plci->li_bchannel_id == 2) { ch_map[j] = (byte)(j+1); ch_map[j+1] = (byte) j; } else { ch_map[j] = (byte) j; ch_map[j+1] = (byte)(j+1); } } for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++) { i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch]; j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch]; if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED) { *p = (mixer_write_prog_bri[n].xconnect_override != 0) ? mixer_write_prog_bri[n].xconnect_override : ((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01); if ((i >= a->li_base + MIXER_BCHANNELS_BRI) || (j >= a->li_base + MIXER_BCHANNELS_BRI)) { w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4)); li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4; } } else { *p = 0x00; if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI->tel == ADV_VOICE)) { w = (plci == a->AdvSignalPLCI) ? n : mixer_swapped_index_bri[n]; if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w < a->adv_voice_coef_length) *p = a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w]; } } p++; } } j = li_total_channels + 1; } } else { if (j <= li_total_channels) { plci->internal_command = plci->li_write_command; if (plci_nl_busy (plci)) return (true); if (j < a->li_base) j = a->li_base; if (a->li_pri) { *(p++) = UDATA_REQUEST_SET_MIXER_COEFS_PRI_SYNC; w = 0; if (li_config_table[i].channel & LI_CHANNEL_TX_DATA) w |= MIXER_FEATURE_ENABLE_TX_DATA; if (li_config_table[i].channel & LI_CHANNEL_RX_DATA) w |= MIXER_FEATURE_ENABLE_RX_DATA; *(p++) = (byte) w; *(p++) = (byte)(w >> 8); for (n = 0; n < ARRAY_SIZE(mixer_write_prog_pri); n++) { *(p++) = (byte)((plci->li_bchannel_id - 1) | mixer_write_prog_pri[n].line_flags); for (j = a->li_base; j < a->li_base + MIXER_CHANNELS_PRI; j++) { w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4)); if (w & mixer_write_prog_pri[n].mask) { *(p++) = (li_config_table[i].coef_table[j] & mixer_write_prog_pri[n].mask) ? 0x80 : 0x01; li_config_table[i].coef_table[j] ^= mixer_write_prog_pri[n].mask << 4; } else *(p++) = 0x00; } *(p++) = (byte)((plci->li_bchannel_id - 1) | MIXER_COEF_LINE_ROW_FLAG | mixer_write_prog_pri[n].line_flags); for (j = a->li_base; j < a->li_base + MIXER_CHANNELS_PRI; j++) { w = ((li_config_table[j].coef_table[i] & 0xf) ^ (li_config_table[j].coef_table[i] >> 4)); if (w & mixer_write_prog_pri[n].mask) { *(p++) = (li_config_table[j].coef_table[i] & mixer_write_prog_pri[n].mask) ? 0x80 : 0x01; li_config_table[j].coef_table[i] ^= mixer_write_prog_pri[n].mask << 4; } else *(p++) = 0x00; } } } else { *(p++) = UDATA_REQUEST_SET_MIXER_COEFS_BRI; w = 0; if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI) && (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length)) { w = GET_WORD (a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE); } if (li_config_table[i].channel & LI_CHANNEL_TX_DATA) w |= MIXER_FEATURE_ENABLE_TX_DATA; if (li_config_table[i].channel & LI_CHANNEL_RX_DATA) w |= MIXER_FEATURE_ENABLE_RX_DATA; *(p++) = (byte) w; *(p++) = (byte)(w >> 8); for (j = 0; j < sizeof(ch_map); j += 2) { if (plci->li_bchannel_id == 2) { ch_map[j] = (byte)(j+1); ch_map[j+1] = (byte) j; } else { ch_map[j] = (byte) j; ch_map[j+1] = (byte)(j+1); } } for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++) { i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch]; j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch]; if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED) { *p = ((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01); w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4)); li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4; } else { *p = 0x00; if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI->tel == ADV_VOICE)) { w = (plci == a->AdvSignalPLCI) ? n : mixer_swapped_index_bri[n]; if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w < a->adv_voice_coef_length) *p = a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w]; } } p++; } } j = li_total_channels + 1; } } plci->li_write_channel = j; if (p != plci->internal_req_buffer) { plci->NData[0].P = plci->internal_req_buffer; plci->NData[0].PLength = p - plci->internal_req_buffer; plci->NL.X = plci->NData; plci->NL.ReqCh = 0; plci->NL.Req = plci->nl_req = (byte) N_UDATA; plci->adapter->request (&plci->NL); } return (true); } static void mixer_notify_update (PLCI *plci, byte others) { DIVA_CAPI_ADAPTER *a; word i, w; PLCI *notify_plci; byte msg[sizeof(CAPI_MSG_HEADER) + 6]; dbug (1, dprintf ("[%06lx] %s,%d: mixer_notify_update %d", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__, others)); a = plci->adapter; if (a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED) { if (others) plci->li_notify_update = true; i = 0; do { notify_plci = NULL; if (others) { while ((i < li_total_channels) && (li_config_table[i].plci == NULL)) i++; if (i < li_total_channels) notify_plci = li_config_table[i++].plci; } else { if ((plci->li_bchannel_id != 0) && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci)) { notify_plci = plci; } } if ((notify_plci != NULL) && !notify_plci->li_notify_update && (notify_plci->appl != NULL) && (notify_plci->State) && notify_plci->NL.Id && !notify_plci->nl_remove_id) { notify_plci->li_notify_update = true; ((CAPI_MSG *) msg)->header.length = 18; ((CAPI_MSG *) msg)->header.appl_id = notify_plci->appl->Id; ((CAPI_MSG *) msg)->header.command = _FACILITY_R; ((CAPI_MSG *) msg)->header.number = 0; ((CAPI_MSG *) msg)->header.controller = notify_plci->adapter->Id; ((CAPI_MSG *) msg)->header.plci = notify_plci->Id; ((CAPI_MSG *) msg)->header.ncci = 0; ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT; ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3; PUT_WORD (&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE); ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0; w = api_put (notify_plci->appl, (CAPI_MSG *) msg); if (w != _QUEUE_FULL) { if (w != 0) { dbug (1, dprintf ("[%06lx] %s,%d: Interconnect notify failed %06x %d", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__, (dword)((notify_plci->Id << 8) | UnMapController (notify_plci->adapter->Id)), w)); } notify_plci->li_notify_update = false; } } } while (others && (notify_plci != NULL)); if (others) plci->li_notify_update = false; } } static void mixer_clear_config (PLCI *plci) { DIVA_CAPI_ADAPTER *a; word i, j; dbug (1, dprintf ("[%06lx] %s,%d: mixer_clear_config", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); plci->li_notify_update = false; plci->li_plci_b_write_pos = 0; plci->li_plci_b_read_pos = 0; plci->li_plci_b_req_pos = 0; a = plci->adapter; if ((plci->li_bchannel_id != 0) && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci)) { i = a->li_base + (plci->li_bchannel_id - 1); li_config_table[i].curchnl = 0; li_config_table[i].channel = 0; li_config_table[i].chflags = 0; for (j = 0; j < li_total_channels; j++) { li_config_table[j].flag_table[i] = 0; li_config_table[i].flag_table[j] = 0; li_config_table[i].coef_table[j] = 0; li_config_table[j].coef_table[i] = 0; } if (!a->li_pri) { li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET; if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI)) { i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1); li_config_table[i].curchnl = 0; li_config_table[i].channel = 0; li_config_table[i].chflags = 0; for (j = 0; j < li_total_channels; j++) { li_config_table[i].flag_table[j] = 0; li_config_table[j].flag_table[i] = 0; li_config_table[i].coef_table[j] = 0; li_config_table[j].coef_table[i] = 0; } if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) { i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id); li_config_table[i].curchnl = 0; li_config_table[i].channel = 0; li_config_table[i].chflags = 0; for (j = 0; j < li_total_channels; j++) { li_config_table[i].flag_table[j] = 0; li_config_table[j].flag_table[i] = 0; li_config_table[i].coef_table[j] = 0; li_config_table[j].coef_table[i] = 0; } } } } } } static void mixer_prepare_switch (dword Id, PLCI *plci) { dbug (1, dprintf ("[%06lx] %s,%d: mixer_prepare_switch", UnMapId (Id), (char *)(FILE_), __LINE__)); do { mixer_indication_coefs_set (Id, plci); } while (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos); } static word mixer_save_config (dword Id, PLCI *plci, byte Rc) { DIVA_CAPI_ADAPTER *a; word i, j; dbug (1, dprintf ("[%06lx] %s,%d: mixer_save_config %02x %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state)); a = plci->adapter; if ((plci->li_bchannel_id != 0) && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci)) { i = a->li_base + (plci->li_bchannel_id - 1); for (j = 0; j < li_total_channels; j++) { li_config_table[i].coef_table[j] &= 0xf; li_config_table[j].coef_table[i] &= 0xf; } if (!a->li_pri) li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET; } return (GOOD); } static word mixer_restore_config (dword Id, PLCI *plci, byte Rc) { DIVA_CAPI_ADAPTER *a; word Info; dbug (1, dprintf ("[%06lx] %s,%d: mixer_restore_config %02x %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state)); Info = GOOD; a = plci->adapter; if ((plci->B1_facilities & B1_FACILITY_MIXER) && (plci->li_bchannel_id != 0) && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci)) { switch (plci->adjust_b_state) { case ADJUST_B_RESTORE_MIXER_1: if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT) { plci->internal_command = plci->adjust_b_command; if (plci_nl_busy (plci)) { plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_1; break; } xconnect_query_addresses (plci); plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_2; break; } plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5; Rc = OK; case ADJUST_B_RESTORE_MIXER_2: case ADJUST_B_RESTORE_MIXER_3: case ADJUST_B_RESTORE_MIXER_4: if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0)) { dbug (1, dprintf ("[%06lx] %s,%d: Adjust B query addresses failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _WRONG_STATE; break; } if (Rc == OK) { if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2) plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_3; else if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_4) plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5; } else if (Rc == 0) { if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2) plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_4; else if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_3) plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5; } if (plci->adjust_b_state != ADJUST_B_RESTORE_MIXER_5) { plci->internal_command = plci->adjust_b_command; break; } case ADJUST_B_RESTORE_MIXER_5: xconnect_write_coefs (plci, plci->adjust_b_command); plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_6; Rc = OK; case ADJUST_B_RESTORE_MIXER_6: if (!xconnect_write_coefs_process (Id, plci, Rc)) { dbug (1, dprintf ("[%06lx] %s,%d: Write mixer coefs failed", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _FACILITY_NOT_SUPPORTED; break; } if (plci->internal_command) break; plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_7; case ADJUST_B_RESTORE_MIXER_7: break; } } return (Info); } static void mixer_command (dword Id, PLCI *plci, byte Rc) { DIVA_CAPI_ADAPTER *a; word i, internal_command, Info; dbug (1, dprintf ("[%06lx] %s,%d: mixer_command %02x %04x %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command, plci->li_cmd)); Info = GOOD; a = plci->adapter; internal_command = plci->internal_command; plci->internal_command = 0; switch (plci->li_cmd) { case LI_REQ_CONNECT: case LI_REQ_DISCONNECT: case LI_REQ_SILENT_UPDATE: switch (internal_command) { default: if (plci->li_channel_bits & LI_CHANNEL_INVOLVED) { adjust_b1_resource (Id, plci, NULL, (word)(plci->B1_facilities | B1_FACILITY_MIXER), MIXER_COMMAND_1); } case MIXER_COMMAND_1: if (plci->li_channel_bits & LI_CHANNEL_INVOLVED) { if (adjust_b_process (Id, plci, Rc) != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: Load mixer failed", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _FACILITY_NOT_SUPPORTED; break; } if (plci->internal_command) return; } plci->li_plci_b_req_pos = plci->li_plci_b_write_pos; if ((plci->li_channel_bits & LI_CHANNEL_INVOLVED) || ((get_b1_facilities (plci, plci->B1_resource) & B1_FACILITY_MIXER) && (add_b1_facilities (plci, plci->B1_resource, (word)(plci->B1_facilities & ~B1_FACILITY_MIXER)) == plci->B1_resource))) { xconnect_write_coefs (plci, MIXER_COMMAND_2); } else { do { mixer_indication_coefs_set (Id, plci); } while (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos); } case MIXER_COMMAND_2: if ((plci->li_channel_bits & LI_CHANNEL_INVOLVED) || ((get_b1_facilities (plci, plci->B1_resource) & B1_FACILITY_MIXER) && (add_b1_facilities (plci, plci->B1_resource, (word)(plci->B1_facilities & ~B1_FACILITY_MIXER)) == plci->B1_resource))) { if (!xconnect_write_coefs_process (Id, plci, Rc)) { dbug (1, dprintf ("[%06lx] %s,%d: Write mixer coefs failed", UnMapId (Id), (char *)(FILE_), __LINE__)); if (plci->li_plci_b_write_pos != plci->li_plci_b_req_pos) { do { plci->li_plci_b_write_pos = (plci->li_plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES-1 : plci->li_plci_b_write_pos - 1; i = (plci->li_plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES-1 : plci->li_plci_b_write_pos - 1; } while ((plci->li_plci_b_write_pos != plci->li_plci_b_req_pos) && !(plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG)); } Info = _FACILITY_NOT_SUPPORTED; break; } if (plci->internal_command) return; } if (!(plci->li_channel_bits & LI_CHANNEL_INVOLVED)) { adjust_b1_resource (Id, plci, NULL, (word)(plci->B1_facilities & ~B1_FACILITY_MIXER), MIXER_COMMAND_3); } case MIXER_COMMAND_3: if (!(plci->li_channel_bits & LI_CHANNEL_INVOLVED)) { if (adjust_b_process (Id, plci, Rc) != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: Unload mixer failed", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _FACILITY_NOT_SUPPORTED; break; } if (plci->internal_command) return; } break; } break; } if ((plci->li_bchannel_id == 0) || (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci)) { dbug (1, dprintf ("[%06x] %s,%d: Channel id wiped out %d", UnMapId (Id), (char *)(FILE_), __LINE__, (int)(plci->li_bchannel_id))); } else { i = a->li_base + (plci->li_bchannel_id - 1); li_config_table[i].curchnl = plci->li_channel_bits; if (!a->li_pri && (plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI)) { i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1); li_config_table[i].curchnl = plci->li_channel_bits; if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) { i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id); li_config_table[i].curchnl = plci->li_channel_bits; } } } } static void li_update_connect (dword Id, DIVA_CAPI_ADAPTER *a, PLCI *plci, dword plci_b_id, byte connect, dword li_flags) { word i, ch_a, ch_a_v, ch_a_s, ch_b, ch_b_v, ch_b_s; PLCI *plci_b; DIVA_CAPI_ADAPTER *a_b; a_b = &(adapter[MapController ((byte)(plci_b_id & 0x7f)) - 1]); plci_b = &(a_b->plci[((plci_b_id >> 8) & 0xff) - 1]); ch_a = a->li_base + (plci->li_bchannel_id - 1); if (!a->li_pri && (plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI) && (Id & EXT_CONTROLLER)) { ch_a_v = ch_a + MIXER_IC_CHANNEL_BASE; ch_a_s = (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ? a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id) : ch_a_v; } else { ch_a_v = ch_a; ch_a_s = ch_a; } ch_b = a_b->li_base + (plci_b->li_bchannel_id - 1); if (!a_b->li_pri && (plci_b->tel == ADV_VOICE) && (plci_b == a_b->AdvSignalPLCI) && (plci_b_id & EXT_CONTROLLER)) { ch_b_v = ch_b + MIXER_IC_CHANNEL_BASE; ch_b_s = (a_b->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ? a_b->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci_b->li_bchannel_id) : ch_b_v; } else { ch_b_v = ch_b; ch_b_s = ch_b; } if (connect) { li_config_table[ch_a].flag_table[ch_a_v] &= ~LI_FLAG_MONITOR; li_config_table[ch_a].flag_table[ch_a_s] &= ~LI_FLAG_MONITOR; li_config_table[ch_a_v].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX); li_config_table[ch_a_s].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX); } li_config_table[ch_a].flag_table[ch_b_v] &= ~LI_FLAG_MONITOR; li_config_table[ch_a].flag_table[ch_b_s] &= ~LI_FLAG_MONITOR; li_config_table[ch_b_v].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX); li_config_table[ch_b_s].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX); if (ch_a_v == ch_b_v) { li_config_table[ch_a_v].flag_table[ch_b_v] &= ~LI_FLAG_CONFERENCE; li_config_table[ch_a_s].flag_table[ch_b_s] &= ~LI_FLAG_CONFERENCE; } else { if (li_config_table[ch_a_v].flag_table[ch_b_v] & LI_FLAG_CONFERENCE) { for (i = 0; i < li_total_channels; i++) { if (i != ch_a_v) li_config_table[ch_a_v].flag_table[i] &= ~LI_FLAG_CONFERENCE; } } if (li_config_table[ch_a_s].flag_table[ch_b_v] & LI_FLAG_CONFERENCE) { for (i = 0; i < li_total_channels; i++) { if (i != ch_a_s) li_config_table[ch_a_s].flag_table[i] &= ~LI_FLAG_CONFERENCE; } } if (li_config_table[ch_b_v].flag_table[ch_a_v] & LI_FLAG_CONFERENCE) { for (i = 0; i < li_total_channels; i++) { if (i != ch_a_v) li_config_table[i].flag_table[ch_a_v] &= ~LI_FLAG_CONFERENCE; } } if (li_config_table[ch_b_v].flag_table[ch_a_s] & LI_FLAG_CONFERENCE) { for (i = 0; i < li_total_channels; i++) { if (i != ch_a_s) li_config_table[i].flag_table[ch_a_s] &= ~LI_FLAG_CONFERENCE; } } } if (li_flags & LI_FLAG_CONFERENCE_A_B) { li_config_table[ch_b_v].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE; li_config_table[ch_b_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE; li_config_table[ch_b_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE; li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE; } if (li_flags & LI_FLAG_CONFERENCE_B_A) { li_config_table[ch_a_v].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE; li_config_table[ch_a_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE; li_config_table[ch_a_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE; li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE; } if (li_flags & LI_FLAG_MONITOR_A) { li_config_table[ch_a].flag_table[ch_a_v] |= LI_FLAG_MONITOR; li_config_table[ch_a].flag_table[ch_a_s] |= LI_FLAG_MONITOR; } if (li_flags & LI_FLAG_MONITOR_B) { li_config_table[ch_a].flag_table[ch_b_v] |= LI_FLAG_MONITOR; li_config_table[ch_a].flag_table[ch_b_s] |= LI_FLAG_MONITOR; } if (li_flags & LI_FLAG_ANNOUNCEMENT_A) { li_config_table[ch_a_v].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT; li_config_table[ch_a_s].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT; } if (li_flags & LI_FLAG_ANNOUNCEMENT_B) { li_config_table[ch_b_v].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT; li_config_table[ch_b_s].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT; } if (li_flags & LI_FLAG_MIX_A) { li_config_table[ch_a_v].flag_table[ch_a] |= LI_FLAG_MIX; li_config_table[ch_a_s].flag_table[ch_a] |= LI_FLAG_MIX; } if (li_flags & LI_FLAG_MIX_B) { li_config_table[ch_b_v].flag_table[ch_a] |= LI_FLAG_MIX; li_config_table[ch_b_s].flag_table[ch_a] |= LI_FLAG_MIX; } if (ch_a_v != ch_a_s) { li_config_table[ch_a_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE; li_config_table[ch_a_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE; } if (ch_b_v != ch_b_s) { li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE; li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE; } } static void li2_update_connect (dword Id, DIVA_CAPI_ADAPTER *a, PLCI *plci, dword plci_b_id, byte connect, dword li_flags) { word ch_a, ch_a_v, ch_a_s, ch_b, ch_b_v, ch_b_s; PLCI *plci_b; DIVA_CAPI_ADAPTER *a_b; a_b = &(adapter[MapController ((byte)(plci_b_id & 0x7f)) - 1]); plci_b = &(a_b->plci[((plci_b_id >> 8) & 0xff) - 1]); ch_a = a->li_base + (plci->li_bchannel_id - 1); if (!a->li_pri && (plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI) && (Id & EXT_CONTROLLER)) { ch_a_v = ch_a + MIXER_IC_CHANNEL_BASE; ch_a_s = (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ? a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id) : ch_a_v; } else { ch_a_v = ch_a; ch_a_s = ch_a; } ch_b = a_b->li_base + (plci_b->li_bchannel_id - 1); if (!a_b->li_pri && (plci_b->tel == ADV_VOICE) && (plci_b == a_b->AdvSignalPLCI) && (plci_b_id & EXT_CONTROLLER)) { ch_b_v = ch_b + MIXER_IC_CHANNEL_BASE; ch_b_s = (a_b->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ? a_b->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci_b->li_bchannel_id) : ch_b_v; } else { ch_b_v = ch_b; ch_b_s = ch_b; } if (connect) { li_config_table[ch_b].flag_table[ch_b_v] &= ~LI_FLAG_MONITOR; li_config_table[ch_b].flag_table[ch_b_s] &= ~LI_FLAG_MONITOR; li_config_table[ch_b_v].flag_table[ch_b] &= ~LI_FLAG_MIX; li_config_table[ch_b_s].flag_table[ch_b] &= ~LI_FLAG_MIX; li_config_table[ch_b].flag_table[ch_b] &= ~LI_FLAG_PCCONNECT; li_config_table[ch_b].chflags &= ~(LI_CHFLAG_MONITOR | LI_CHFLAG_MIX | LI_CHFLAG_LOOP); } li_config_table[ch_b_v].flag_table[ch_a_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE); li_config_table[ch_b_s].flag_table[ch_a_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE); li_config_table[ch_b_v].flag_table[ch_a_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE); li_config_table[ch_b_s].flag_table[ch_a_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE); li_config_table[ch_a_v].flag_table[ch_b_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE); li_config_table[ch_a_v].flag_table[ch_b_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE); li_config_table[ch_a_s].flag_table[ch_b_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE); li_config_table[ch_a_s].flag_table[ch_b_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE); if (li_flags & LI2_FLAG_INTERCONNECT_A_B) { li_config_table[ch_b_v].flag_table[ch_a_v] |= LI_FLAG_INTERCONNECT; li_config_table[ch_b_s].flag_table[ch_a_v] |= LI_FLAG_INTERCONNECT; li_config_table[ch_b_v].flag_table[ch_a_s] |= LI_FLAG_INTERCONNECT; li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_INTERCONNECT; } if (li_flags & LI2_FLAG_INTERCONNECT_B_A) { li_config_table[ch_a_v].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT; li_config_table[ch_a_v].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT; li_config_table[ch_a_s].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT; li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT; } if (li_flags & LI2_FLAG_MONITOR_B) { li_config_table[ch_b].flag_table[ch_b_v] |= LI_FLAG_MONITOR; li_config_table[ch_b].flag_table[ch_b_s] |= LI_FLAG_MONITOR; } if (li_flags & LI2_FLAG_MIX_B) { li_config_table[ch_b_v].flag_table[ch_b] |= LI_FLAG_MIX; li_config_table[ch_b_s].flag_table[ch_b] |= LI_FLAG_MIX; } if (li_flags & LI2_FLAG_MONITOR_X) li_config_table[ch_b].chflags |= LI_CHFLAG_MONITOR; if (li_flags & LI2_FLAG_MIX_X) li_config_table[ch_b].chflags |= LI_CHFLAG_MIX; if (li_flags & LI2_FLAG_LOOP_B) { li_config_table[ch_b_v].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT; li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT; li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT; li_config_table[ch_b_s].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT; } if (li_flags & LI2_FLAG_LOOP_PC) li_config_table[ch_b].flag_table[ch_b] |= LI_FLAG_PCCONNECT; if (li_flags & LI2_FLAG_LOOP_X) li_config_table[ch_b].chflags |= LI_CHFLAG_LOOP; if (li_flags & LI2_FLAG_PCCONNECT_A_B) li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_PCCONNECT; if (li_flags & LI2_FLAG_PCCONNECT_B_A) li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_PCCONNECT; if (ch_a_v != ch_a_s) { li_config_table[ch_a_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE; li_config_table[ch_a_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE; } if (ch_b_v != ch_b_s) { li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE; li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE; } } static word li_check_main_plci (dword Id, PLCI *plci) { if (plci == NULL) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong PLCI", UnMapId (Id), (char *)(FILE_), __LINE__)); return (_WRONG_IDENTIFIER); } if (!plci->State || !plci->NL.Id || plci->nl_remove_id || (plci->li_bchannel_id == 0)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong state", UnMapId (Id), (char *)(FILE_), __LINE__)); return (_WRONG_STATE); } li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci = plci; return (GOOD); } static PLCI *li_check_plci_b (dword Id, PLCI *plci, dword plci_b_id, word plci_b_write_pos, byte *p_result) { byte ctlr_b; PLCI *plci_b; if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos : LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2) { dbug (1, dprintf ("[%06lx] %s,%d: LI request overrun", UnMapId (Id), (char *)(FILE_), __LINE__)); PUT_WORD (p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE); return (NULL); } ctlr_b = 0; if ((plci_b_id & 0x7f) != 0) { ctlr_b = MapController ((byte)(plci_b_id & 0x7f)); if ((ctlr_b > max_adapter) || ((ctlr_b != 0) && (adapter[ctlr_b - 1].request == NULL))) ctlr_b = 0; } if ((ctlr_b == 0) || (((plci_b_id >> 8) & 0xff) == 0) || (((plci_b_id >> 8) & 0xff) > adapter[ctlr_b - 1].max_plci)) { dbug (1, dprintf ("[%06lx] %s,%d: LI invalid second PLCI %08lx", UnMapId (Id), (char *)(FILE_), __LINE__, plci_b_id)); PUT_WORD (p_result, _WRONG_IDENTIFIER); return (NULL); } plci_b = &(adapter[ctlr_b - 1].plci[((plci_b_id >> 8) & 0xff) - 1]); if (!plci_b->State || !plci_b->NL.Id || plci_b->nl_remove_id || (plci_b->li_bchannel_id == 0)) { dbug (1, dprintf ("[%06lx] %s,%d: LI peer in wrong state %08lx", UnMapId (Id), (char *)(FILE_), __LINE__, plci_b_id)); PUT_WORD (p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE); return (NULL); } li_config_table[plci_b->adapter->li_base + (plci_b->li_bchannel_id - 1)].plci = plci_b; if (((byte)(plci_b_id & ~EXT_CONTROLLER)) != ((byte)(UnMapController (plci->adapter->Id) & ~EXT_CONTROLLER)) && (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT) || !(plci_b->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT))) { dbug (1, dprintf ("[%06lx] %s,%d: LI not on same ctrl %08lx", UnMapId (Id), (char *)(FILE_), __LINE__, plci_b_id)); PUT_WORD (p_result, _WRONG_IDENTIFIER); return (NULL); } if (!(get_b1_facilities (plci_b, add_b1_facilities (plci_b, plci_b->B1_resource, (word)(plci_b->B1_facilities | B1_FACILITY_MIXER))) & B1_FACILITY_MIXER)) { dbug (1, dprintf ("[%06lx] %s,%d: Interconnect peer cannot mix %d", UnMapId (Id), (char *)(FILE_), __LINE__, plci_b->B1_resource)); PUT_WORD (p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE); return (NULL); } return (plci_b); } static PLCI *li2_check_plci_b (dword Id, PLCI *plci, dword plci_b_id, word plci_b_write_pos, byte *p_result) { byte ctlr_b; PLCI *plci_b; if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos : LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2) { dbug (1, dprintf ("[%06lx] %s,%d: LI request overrun", UnMapId (Id), (char *)(FILE_), __LINE__)); PUT_WORD (p_result, _WRONG_STATE); return (NULL); } ctlr_b = 0; if ((plci_b_id & 0x7f) != 0) { ctlr_b = MapController ((byte)(plci_b_id & 0x7f)); if ((ctlr_b > max_adapter) || ((ctlr_b != 0) && (adapter[ctlr_b - 1].request == NULL))) ctlr_b = 0; } if ((ctlr_b == 0) || (((plci_b_id >> 8) & 0xff) == 0) || (((plci_b_id >> 8) & 0xff) > adapter[ctlr_b - 1].max_plci)) { dbug (1, dprintf ("[%06lx] %s,%d: LI invalid second PLCI %08lx", UnMapId (Id), (char *)(FILE_), __LINE__, plci_b_id)); PUT_WORD (p_result, _WRONG_IDENTIFIER); return (NULL); } plci_b = &(adapter[ctlr_b - 1].plci[((plci_b_id >> 8) & 0xff) - 1]); if (!plci_b->State || !plci_b->NL.Id || plci_b->nl_remove_id || (plci_b->li_bchannel_id == 0) || (li_config_table[plci_b->adapter->li_base + (plci_b->li_bchannel_id - 1)].plci != plci_b)) { dbug (1, dprintf ("[%06lx] %s,%d: LI peer in wrong state %08lx", UnMapId (Id), (char *)(FILE_), __LINE__, plci_b_id)); PUT_WORD (p_result, _WRONG_STATE); return (NULL); } if (((byte)(plci_b_id & ~EXT_CONTROLLER)) != ((byte)(UnMapController (plci->adapter->Id) & ~EXT_CONTROLLER)) && (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT) || !(plci_b->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT))) { dbug (1, dprintf ("[%06lx] %s,%d: LI not on same ctrl %08lx", UnMapId (Id), (char *)(FILE_), __LINE__, plci_b_id)); PUT_WORD (p_result, _WRONG_IDENTIFIER); return (NULL); } if (!(get_b1_facilities (plci_b, add_b1_facilities (plci_b, plci_b->B1_resource, (word)(plci_b->B1_facilities | B1_FACILITY_MIXER))) & B1_FACILITY_MIXER)) { dbug (1, dprintf ("[%06lx] %s,%d: Interconnect peer cannot mix %d", UnMapId (Id), (char *)(FILE_), __LINE__, plci_b->B1_resource)); PUT_WORD (p_result, _WRONG_STATE); return (NULL); } return (plci_b); } static byte mixer_request (dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg) { word Info; word i; dword d, li_flags, plci_b_id; PLCI *plci_b; API_PARSE li_parms[3]; API_PARSE li_req_parms[3]; API_PARSE li_participant_struct[2]; API_PARSE li_participant_parms[3]; word participant_parms_pos; byte result_buffer[32]; byte *result; word result_pos; word plci_b_write_pos; dbug (1, dprintf ("[%06lx] %s,%d: mixer_request", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = GOOD; result = result_buffer; result_buffer[0] = 0; if (!(a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED)) { dbug (1, dprintf ("[%06lx] %s,%d: Facility not supported", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _FACILITY_NOT_SUPPORTED; } else if (api_parse (&msg[1].info[1], msg[1].length, "ws", li_parms)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong message format", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_MESSAGE_FORMAT; } else { result_buffer[0] = 3; PUT_WORD (&result_buffer[1], GET_WORD (li_parms[0].info)); result_buffer[3] = 0; switch (GET_WORD (li_parms[0].info)) { case LI_GET_SUPPORTED_SERVICES: if (appl->appl_flags & APPL_FLAG_OLD_LI_SPEC) { result_buffer[0] = 17; result_buffer[3] = 14; PUT_WORD (&result_buffer[4], GOOD); d = 0; if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_CH) d |= LI_CONFERENCING_SUPPORTED; if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_PC) d |= LI_MONITORING_SUPPORTED; if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_CH) d |= LI_ANNOUNCEMENTS_SUPPORTED | LI_MIXING_SUPPORTED; if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT) d |= LI_CROSS_CONTROLLER_SUPPORTED; PUT_DWORD (&result_buffer[6], d); if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT) { d = 0; for (i = 0; i < li_total_channels; i++) { if ((li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT) && (li_config_table[i].adapter->li_pri || (i < li_config_table[i].adapter->li_base + MIXER_BCHANNELS_BRI))) { d++; } } } else { d = a->li_pri ? a->li_channels : MIXER_BCHANNELS_BRI; } PUT_DWORD (&result_buffer[10], d / 2); PUT_DWORD (&result_buffer[14], d); } else { result_buffer[0] = 25; result_buffer[3] = 22; PUT_WORD (&result_buffer[4], GOOD); d = LI2_ASYMMETRIC_SUPPORTED | LI2_B_LOOPING_SUPPORTED | LI2_X_LOOPING_SUPPORTED; if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_PC) d |= LI2_MONITORING_SUPPORTED | LI2_REMOTE_MONITORING_SUPPORTED; if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_CH) d |= LI2_MIXING_SUPPORTED | LI2_REMOTE_MIXING_SUPPORTED; if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_PC) d |= LI2_PC_LOOPING_SUPPORTED; if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT) d |= LI2_CROSS_CONTROLLER_SUPPORTED; PUT_DWORD (&result_buffer[6], d); d = a->li_pri ? a->li_channels : MIXER_BCHANNELS_BRI; PUT_DWORD (&result_buffer[10], d / 2); PUT_DWORD (&result_buffer[14], d - 1); if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT) { d = 0; for (i = 0; i < li_total_channels; i++) { if ((li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT) && (li_config_table[i].adapter->li_pri || (i < li_config_table[i].adapter->li_base + MIXER_BCHANNELS_BRI))) { d++; } } } PUT_DWORD (&result_buffer[18], d / 2); PUT_DWORD (&result_buffer[22], d - 1); } break; case LI_REQ_CONNECT: if (li_parms[1].length == 8) { appl->appl_flags |= APPL_FLAG_OLD_LI_SPEC; if (api_parse (&li_parms[1].info[1], li_parms[1].length, "dd", li_req_parms)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong message format", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_MESSAGE_FORMAT; break; } plci_b_id = GET_DWORD (li_req_parms[0].info) & 0xffff; li_flags = GET_DWORD (li_req_parms[1].info); Info = li_check_main_plci (Id, plci); result_buffer[0] = 9; result_buffer[3] = 6; PUT_DWORD (&result_buffer[4], plci_b_id); PUT_WORD (&result_buffer[8], GOOD); if (Info != GOOD) break; result = plci->saved_msg.info; for (i = 0; i <= result_buffer[0]; i++) result[i] = result_buffer[i]; plci_b_write_pos = plci->li_plci_b_write_pos; plci_b = li_check_plci_b (Id, plci, plci_b_id, plci_b_write_pos, &result[8]); if (plci_b == NULL) break; li_update_connect (Id, a, plci, plci_b_id, true, li_flags); plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_LAST_FLAG; plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES-1) ? 0 : plci_b_write_pos + 1; plci->li_plci_b_write_pos = plci_b_write_pos; } else { appl->appl_flags &= ~APPL_FLAG_OLD_LI_SPEC; if (api_parse (&li_parms[1].info[1], li_parms[1].length, "ds", li_req_parms)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong message format", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_MESSAGE_FORMAT; break; } li_flags = GET_DWORD (li_req_parms[0].info) & ~(LI2_FLAG_INTERCONNECT_A_B | LI2_FLAG_INTERCONNECT_B_A); Info = li_check_main_plci (Id, plci); result_buffer[0] = 7; result_buffer[3] = 4; PUT_WORD (&result_buffer[4], Info); result_buffer[6] = 0; if (Info != GOOD) break; result = plci->saved_msg.info; for (i = 0; i <= result_buffer[0]; i++) result[i] = result_buffer[i]; plci_b_write_pos = plci->li_plci_b_write_pos; participant_parms_pos = 0; result_pos = 7; li2_update_connect (Id, a, plci, UnMapId (Id), true, li_flags); while (participant_parms_pos < li_req_parms[1].length) { result[result_pos] = 6; result_pos += 7; PUT_DWORD (&result[result_pos - 6], 0); PUT_WORD (&result[result_pos - 2], GOOD); if (api_parse (&li_req_parms[1].info[1 + participant_parms_pos], (word)(li_parms[1].length - participant_parms_pos), "s", li_participant_struct)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong message format", UnMapId (Id), (char *)(FILE_), __LINE__)); PUT_WORD (&result[result_pos - 2], _WRONG_MESSAGE_FORMAT); break; } if (api_parse (&li_participant_struct[0].info[1], li_participant_struct[0].length, "dd", li_participant_parms)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong message format", UnMapId (Id), (char *)(FILE_), __LINE__)); PUT_WORD (&result[result_pos - 2], _WRONG_MESSAGE_FORMAT); break; } plci_b_id = GET_DWORD (li_participant_parms[0].info) & 0xffff; li_flags = GET_DWORD (li_participant_parms[1].info); PUT_DWORD (&result[result_pos - 6], plci_b_id); if (sizeof(result) - result_pos < 7) { dbug (1, dprintf ("[%06lx] %s,%d: LI result overrun", UnMapId (Id), (char *)(FILE_), __LINE__)); PUT_WORD (&result[result_pos - 2], _WRONG_STATE); break; } plci_b = li2_check_plci_b (Id, plci, plci_b_id, plci_b_write_pos, &result[result_pos - 2]); if (plci_b != NULL) { li2_update_connect (Id, a, plci, plci_b_id, true, li_flags); plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | ((li_flags & (LI2_FLAG_INTERCONNECT_A_B | LI2_FLAG_INTERCONNECT_B_A | LI2_FLAG_PCCONNECT_A_B | LI2_FLAG_PCCONNECT_B_A)) ? 0 : LI_PLCI_B_DISC_FLAG); plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES-1) ? 0 : plci_b_write_pos + 1; } participant_parms_pos = (word)((&li_participant_struct[0].info[1 + li_participant_struct[0].length]) - (&li_req_parms[1].info[1])); } result[0] = (byte)(result_pos - 1); result[3] = (byte)(result_pos - 4); result[6] = (byte)(result_pos - 7); i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES-1 : plci_b_write_pos - 1; if ((plci_b_write_pos == plci->li_plci_b_read_pos) || (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG)) { plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG; plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES-1) ? 0 : plci_b_write_pos + 1; } else plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG; plci->li_plci_b_write_pos = plci_b_write_pos; } mixer_calculate_coefs (a); plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel; mixer_notify_update (plci, true); sendf (appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number, "wwS", Info, SELECTOR_LINE_INTERCONNECT, result); plci->command = 0; plci->li_cmd = GET_WORD (li_parms[0].info); start_internal_command (Id, plci, mixer_command); return (false); case LI_REQ_DISCONNECT: if (li_parms[1].length == 4) { appl->appl_flags |= APPL_FLAG_OLD_LI_SPEC; if (api_parse (&li_parms[1].info[1], li_parms[1].length, "d", li_req_parms)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong message format", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_MESSAGE_FORMAT; break; } plci_b_id = GET_DWORD (li_req_parms[0].info) & 0xffff; Info = li_check_main_plci (Id, plci); result_buffer[0] = 9; result_buffer[3] = 6; PUT_DWORD (&result_buffer[4], GET_DWORD (li_req_parms[0].info)); PUT_WORD (&result_buffer[8], GOOD); if (Info != GOOD) break; result = plci->saved_msg.info; for (i = 0; i <= result_buffer[0]; i++) result[i] = result_buffer[i]; plci_b_write_pos = plci->li_plci_b_write_pos; plci_b = li_check_plci_b (Id, plci, plci_b_id, plci_b_write_pos, &result[8]); if (plci_b == NULL) break; li_update_connect (Id, a, plci, plci_b_id, false, 0); plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG | LI_PLCI_B_LAST_FLAG; plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES-1) ? 0 : plci_b_write_pos + 1; plci->li_plci_b_write_pos = plci_b_write_pos; } else { appl->appl_flags &= ~APPL_FLAG_OLD_LI_SPEC; if (api_parse (&li_parms[1].info[1], li_parms[1].length, "s", li_req_parms)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong message format", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_MESSAGE_FORMAT; break; } Info = li_check_main_plci (Id, plci); result_buffer[0] = 7; result_buffer[3] = 4; PUT_WORD (&result_buffer[4], Info); result_buffer[6] = 0; if (Info != GOOD) break; result = plci->saved_msg.info; for (i = 0; i <= result_buffer[0]; i++) result[i] = result_buffer[i]; plci_b_write_pos = plci->li_plci_b_write_pos; participant_parms_pos = 0; result_pos = 7; while (participant_parms_pos < li_req_parms[0].length) { result[result_pos] = 6; result_pos += 7; PUT_DWORD (&result[result_pos - 6], 0); PUT_WORD (&result[result_pos - 2], GOOD); if (api_parse (&li_req_parms[0].info[1 + participant_parms_pos], (word)(li_parms[1].length - participant_parms_pos), "s", li_participant_struct)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong message format", UnMapId (Id), (char *)(FILE_), __LINE__)); PUT_WORD (&result[result_pos - 2], _WRONG_MESSAGE_FORMAT); break; } if (api_parse (&li_participant_struct[0].info[1], li_participant_struct[0].length, "d", li_participant_parms)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong message format", UnMapId (Id), (char *)(FILE_), __LINE__)); PUT_WORD (&result[result_pos - 2], _WRONG_MESSAGE_FORMAT); break; } plci_b_id = GET_DWORD (li_participant_parms[0].info) & 0xffff; PUT_DWORD (&result[result_pos - 6], plci_b_id); if (sizeof(result) - result_pos < 7) { dbug (1, dprintf ("[%06lx] %s,%d: LI result overrun", UnMapId (Id), (char *)(FILE_), __LINE__)); PUT_WORD (&result[result_pos - 2], _WRONG_STATE); break; } plci_b = li2_check_plci_b (Id, plci, plci_b_id, plci_b_write_pos, &result[result_pos - 2]); if (plci_b != NULL) { li2_update_connect (Id, a, plci, plci_b_id, false, 0); plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG; plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES-1) ? 0 : plci_b_write_pos + 1; } participant_parms_pos = (word)((&li_participant_struct[0].info[1 + li_participant_struct[0].length]) - (&li_req_parms[0].info[1])); } result[0] = (byte)(result_pos - 1); result[3] = (byte)(result_pos - 4); result[6] = (byte)(result_pos - 7); i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES-1 : plci_b_write_pos - 1; if ((plci_b_write_pos == plci->li_plci_b_read_pos) || (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG)) { plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG; plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES-1) ? 0 : plci_b_write_pos + 1; } else plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG; plci->li_plci_b_write_pos = plci_b_write_pos; } mixer_calculate_coefs (a); plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel; mixer_notify_update (plci, true); sendf (appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number, "wwS", Info, SELECTOR_LINE_INTERCONNECT, result); plci->command = 0; plci->li_cmd = GET_WORD (li_parms[0].info); start_internal_command (Id, plci, mixer_command); return (false); case LI_REQ_SILENT_UPDATE: if (!plci || !plci->State || !plci->NL.Id || plci->nl_remove_id || (plci->li_bchannel_id == 0) || (li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci != plci)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong state", UnMapId (Id), (char *)(FILE_), __LINE__)); return (false); } plci_b_write_pos = plci->li_plci_b_write_pos; if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos : LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2) { dbug (1, dprintf ("[%06lx] %s,%d: LI request overrun", UnMapId (Id), (char *)(FILE_), __LINE__)); return (false); } i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES-1 : plci_b_write_pos - 1; if ((plci_b_write_pos == plci->li_plci_b_read_pos) || (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG)) { plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG; plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES-1) ? 0 : plci_b_write_pos + 1; } else plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG; plci->li_plci_b_write_pos = plci_b_write_pos; plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel; plci->command = 0; plci->li_cmd = GET_WORD (li_parms[0].info); start_internal_command (Id, plci, mixer_command); return (false); default: dbug (1, dprintf ("[%06lx] %s,%d: LI unknown request %04x", UnMapId (Id), (char *)(FILE_), __LINE__, GET_WORD (li_parms[0].info))); Info = _FACILITY_NOT_SUPPORTED; } } sendf (appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number, "wwS", Info, SELECTOR_LINE_INTERCONNECT, result); return (false); } static void mixer_indication_coefs_set (dword Id, PLCI *plci) { dword d; DIVA_CAPI_ADAPTER *a; byte result[12]; dbug (1, dprintf ("[%06lx] %s,%d: mixer_indication_coefs_set", UnMapId (Id), (char *)(FILE_), __LINE__)); a = plci->adapter; if (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos) { do { d = plci->li_plci_b_queue[plci->li_plci_b_read_pos]; if (!(d & LI_PLCI_B_SKIP_FLAG)) { if (plci->appl->appl_flags & APPL_FLAG_OLD_LI_SPEC) { if (d & LI_PLCI_B_DISC_FLAG) { result[0] = 5; PUT_WORD (&result[1], LI_IND_DISCONNECT); result[3] = 2; PUT_WORD (&result[4], _LI_USER_INITIATED); } else { result[0] = 7; PUT_WORD (&result[1], LI_IND_CONNECT_ACTIVE); result[3] = 4; PUT_DWORD (&result[4], d & ~LI_PLCI_B_FLAG_MASK); } } else { if (d & LI_PLCI_B_DISC_FLAG) { result[0] = 9; PUT_WORD (&result[1], LI_IND_DISCONNECT); result[3] = 6; PUT_DWORD (&result[4], d & ~LI_PLCI_B_FLAG_MASK); PUT_WORD (&result[8], _LI_USER_INITIATED); } else { result[0] = 7; PUT_WORD (&result[1], LI_IND_CONNECT_ACTIVE); result[3] = 4; PUT_DWORD (&result[4], d & ~LI_PLCI_B_FLAG_MASK); } } sendf (plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", SELECTOR_LINE_INTERCONNECT, result); } plci->li_plci_b_read_pos = (plci->li_plci_b_read_pos == LI_PLCI_B_QUEUE_ENTRIES-1) ? 0 : plci->li_plci_b_read_pos + 1; } while (!(d & LI_PLCI_B_LAST_FLAG) && (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos)); } } static void mixer_indication_xconnect_from (dword Id, PLCI *plci, byte *msg, word length) { word i, j, ch; struct xconnect_transfer_address_s s, *p; DIVA_CAPI_ADAPTER *a; dbug (1, dprintf ("[%06lx] %s,%d: mixer_indication_xconnect_from %d", UnMapId (Id), (char *)(FILE_), __LINE__, (int) length)); a = plci->adapter; i = 1; for (i = 1; i < length; i += 16) { s.card_address.low = msg[i] | (msg[i+1] << 8) | (((dword)(msg[i+2])) << 16) | (((dword)(msg[i+3])) << 24); s.card_address.high = msg[i+4] | (msg[i+5] << 8) | (((dword)(msg[i+6])) << 16) | (((dword)(msg[i+7])) << 24); s.offset = msg[i+8] | (msg[i+9] << 8) | (((dword)(msg[i+10])) << 16) | (((dword)(msg[i+11])) << 24); ch = msg[i+12] | (msg[i+13] << 8); j = ch & XCONNECT_CHANNEL_NUMBER_MASK; if (!a->li_pri && (plci->li_bchannel_id == 2)) j = 1 - j; j += a->li_base; if (ch & XCONNECT_CHANNEL_PORT_PC) p = &(li_config_table[j].send_pc); else p = &(li_config_table[j].send_b); p->card_address.low = s.card_address.low; p->card_address.high = s.card_address.high; p->offset = s.offset; li_config_table[j].channel |= LI_CHANNEL_ADDRESSES_SET; } if (plci->internal_command_queue[0] && ((plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2) || (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_3) || (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_4))) { (*(plci->internal_command_queue[0]))(Id, plci, 0); if (!plci->internal_command) next_internal_command (Id, plci); } mixer_notify_update (plci, true); } static void mixer_indication_xconnect_to (dword Id, PLCI *plci, byte *msg, word length) { dbug (1, dprintf ("[%06lx] %s,%d: mixer_indication_xconnect_to %d", UnMapId (Id), (char *)(FILE_), __LINE__, (int) length)); } static byte mixer_notify_source_removed (PLCI *plci, dword plci_b_id) { word plci_b_write_pos; plci_b_write_pos = plci->li_plci_b_write_pos; if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos : LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 1) { dbug (1, dprintf ("[%06lx] %s,%d: LI request overrun", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); return (false); } plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG; plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES-1) ? 0 : plci_b_write_pos + 1; plci->li_plci_b_write_pos = plci_b_write_pos; return (true); } static void mixer_remove (PLCI *plci) { DIVA_CAPI_ADAPTER *a; PLCI *notify_plci; dword plci_b_id; word i, j; dbug (1, dprintf ("[%06lx] %s,%d: mixer_remove", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); a = plci->adapter; plci_b_id = (plci->Id << 8) | UnMapController (plci->adapter->Id); if (a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED) { if ((plci->li_bchannel_id != 0) && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci)) { i = a->li_base + (plci->li_bchannel_id - 1); if ((li_config_table[i].curchnl | li_config_table[i].channel) & LI_CHANNEL_INVOLVED) { for (j = 0; j < li_total_channels; j++) { if ((li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT) || (li_config_table[j].flag_table[i] & LI_FLAG_INTERCONNECT)) { notify_plci = li_config_table[j].plci; if ((notify_plci != NULL) && (notify_plci != plci) && (notify_plci->appl != NULL) && !(notify_plci->appl->appl_flags & APPL_FLAG_OLD_LI_SPEC) && (notify_plci->State) && notify_plci->NL.Id && !notify_plci->nl_remove_id) { mixer_notify_source_removed (notify_plci, plci_b_id); } } } mixer_clear_config (plci); mixer_calculate_coefs (a); mixer_notify_update (plci, true); } li_config_table[i].plci = NULL; plci->li_bchannel_id = 0; } } } /*------------------------------------------------------------------*/ /* Echo canceller facilities */ /*------------------------------------------------------------------*/ static void ec_write_parameters (PLCI *plci) { word w; byte parameter_buffer[6]; dbug (1, dprintf ("[%06lx] %s,%d: ec_write_parameters", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); parameter_buffer[0] = 5; parameter_buffer[1] = DSP_CTRL_SET_LEC_PARAMETERS; PUT_WORD (&parameter_buffer[2], plci->ec_idi_options); plci->ec_idi_options &= ~LEC_RESET_COEFFICIENTS; w = (plci->ec_tail_length == 0) ? 128 : plci->ec_tail_length; PUT_WORD (&parameter_buffer[4], w); add_p (plci, FTY, parameter_buffer); sig_req (plci, TEL_CTRL, 0); send_req (plci); } static void ec_clear_config (PLCI *plci) { dbug (1, dprintf ("[%06lx] %s,%d: ec_clear_config", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER | LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING; plci->ec_tail_length = 0; } static void ec_prepare_switch (dword Id, PLCI *plci) { dbug (1, dprintf ("[%06lx] %s,%d: ec_prepare_switch", UnMapId (Id), (char *)(FILE_), __LINE__)); } static word ec_save_config (dword Id, PLCI *plci, byte Rc) { dbug (1, dprintf ("[%06lx] %s,%d: ec_save_config %02x %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state)); return (GOOD); } static word ec_restore_config (dword Id, PLCI *plci, byte Rc) { word Info; dbug (1, dprintf ("[%06lx] %s,%d: ec_restore_config %02x %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state)); Info = GOOD; if (plci->B1_facilities & B1_FACILITY_EC) { switch (plci->adjust_b_state) { case ADJUST_B_RESTORE_EC_1: plci->internal_command = plci->adjust_b_command; if (plci->sig_req) { plci->adjust_b_state = ADJUST_B_RESTORE_EC_1; break; } ec_write_parameters (plci); plci->adjust_b_state = ADJUST_B_RESTORE_EC_2; break; case ADJUST_B_RESTORE_EC_2: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: Restore EC failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _WRONG_STATE; break; } break; } } return (Info); } static void ec_command (dword Id, PLCI *plci, byte Rc) { word internal_command, Info; byte result[8]; dbug (1, dprintf ("[%06lx] %s,%d: ec_command %02x %04x %04x %04x %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command, plci->ec_cmd, plci->ec_idi_options, plci->ec_tail_length)); Info = GOOD; if (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) { result[0] = 2; PUT_WORD (&result[1], EC_SUCCESS); } else { result[0] = 5; PUT_WORD (&result[1], plci->ec_cmd); result[3] = 2; PUT_WORD (&result[4], GOOD); } internal_command = plci->internal_command; plci->internal_command = 0; switch (plci->ec_cmd) { case EC_ENABLE_OPERATION: case EC_FREEZE_COEFFICIENTS: case EC_RESUME_COEFFICIENT_UPDATE: case EC_RESET_COEFFICIENTS: switch (internal_command) { default: adjust_b1_resource (Id, plci, NULL, (word)(plci->B1_facilities | B1_FACILITY_EC), EC_COMMAND_1); case EC_COMMAND_1: if (adjust_b_process (Id, plci, Rc) != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: Load EC failed", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _FACILITY_NOT_SUPPORTED; break; } if (plci->internal_command) return; case EC_COMMAND_2: if (plci->sig_req) { plci->internal_command = EC_COMMAND_2; return; } plci->internal_command = EC_COMMAND_3; ec_write_parameters (plci); return; case EC_COMMAND_3: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: Enable EC failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _FACILITY_NOT_SUPPORTED; break; } break; } break; case EC_DISABLE_OPERATION: switch (internal_command) { default: case EC_COMMAND_1: if (plci->B1_facilities & B1_FACILITY_EC) { if (plci->sig_req) { plci->internal_command = EC_COMMAND_1; return; } plci->internal_command = EC_COMMAND_2; ec_write_parameters (plci); return; } Rc = OK; case EC_COMMAND_2: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: Disable EC failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _FACILITY_NOT_SUPPORTED; break; } adjust_b1_resource (Id, plci, NULL, (word)(plci->B1_facilities & ~B1_FACILITY_EC), EC_COMMAND_3); case EC_COMMAND_3: if (adjust_b_process (Id, plci, Rc) != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: Unload EC failed", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _FACILITY_NOT_SUPPORTED; break; } if (plci->internal_command) return; break; } break; } sendf (plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->number, "wws", Info, (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ? PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result); } static byte ec_request (dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg) { word Info; word opt; API_PARSE ec_parms[3]; byte result[16]; dbug (1, dprintf ("[%06lx] %s,%d: ec_request", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = GOOD; result[0] = 0; if (!(a->man_profile.private_options & (1L << PRIVATE_ECHO_CANCELLER))) { dbug (1, dprintf ("[%06lx] %s,%d: Facility not supported", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _FACILITY_NOT_SUPPORTED; } else { if (appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) { if (api_parse (&msg[1].info[1], msg[1].length, "w", ec_parms)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong message format", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_MESSAGE_FORMAT; } else { if (plci == NULL) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong PLCI", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_IDENTIFIER; } else if (!plci->State || !plci->NL.Id || plci->nl_remove_id) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong state", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_STATE; } else { plci->command = 0; plci->ec_cmd = GET_WORD (ec_parms[0].info); plci->ec_idi_options &= ~(LEC_MANUAL_DISABLE | LEC_RESET_COEFFICIENTS); result[0] = 2; PUT_WORD (&result[1], EC_SUCCESS); if (msg[1].length >= 4) { opt = GET_WORD (&ec_parms[0].info[2]); plci->ec_idi_options &= ~(LEC_ENABLE_NONLINEAR_PROCESSING | LEC_ENABLE_2100HZ_DETECTOR | LEC_REQUIRE_2100HZ_REVERSALS); if (!(opt & EC_DISABLE_NON_LINEAR_PROCESSING)) plci->ec_idi_options |= LEC_ENABLE_NONLINEAR_PROCESSING; if (opt & EC_DETECT_DISABLE_TONE) plci->ec_idi_options |= LEC_ENABLE_2100HZ_DETECTOR; if (!(opt & EC_DO_NOT_REQUIRE_REVERSALS)) plci->ec_idi_options |= LEC_REQUIRE_2100HZ_REVERSALS; if (msg[1].length >= 6) { plci->ec_tail_length = GET_WORD (&ec_parms[0].info[4]); } } switch (plci->ec_cmd) { case EC_ENABLE_OPERATION: plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS; start_internal_command (Id, plci, ec_command); return (false); case EC_DISABLE_OPERATION: plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER | LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING | LEC_RESET_COEFFICIENTS; start_internal_command (Id, plci, ec_command); return (false); case EC_FREEZE_COEFFICIENTS: plci->ec_idi_options |= LEC_FREEZE_COEFFICIENTS; start_internal_command (Id, plci, ec_command); return (false); case EC_RESUME_COEFFICIENT_UPDATE: plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS; start_internal_command (Id, plci, ec_command); return (false); case EC_RESET_COEFFICIENTS: plci->ec_idi_options |= LEC_RESET_COEFFICIENTS; start_internal_command (Id, plci, ec_command); return (false); default: dbug (1, dprintf ("[%06lx] %s,%d: EC unknown request %04x", UnMapId (Id), (char *)(FILE_), __LINE__, plci->ec_cmd)); PUT_WORD (&result[1], EC_UNSUPPORTED_OPERATION); } } } } else { if (api_parse (&msg[1].info[1], msg[1].length, "ws", ec_parms)) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong message format", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_MESSAGE_FORMAT; } else { if (GET_WORD (ec_parms[0].info) == EC_GET_SUPPORTED_SERVICES) { result[0] = 11; PUT_WORD (&result[1], EC_GET_SUPPORTED_SERVICES); result[3] = 8; PUT_WORD (&result[4], GOOD); PUT_WORD (&result[6], 0x0007); PUT_WORD (&result[8], LEC_MAX_SUPPORTED_TAIL_LENGTH); PUT_WORD (&result[10], 0); } else if (plci == NULL) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong PLCI", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_IDENTIFIER; } else if (!plci->State || !plci->NL.Id || plci->nl_remove_id) { dbug (1, dprintf ("[%06lx] %s,%d: Wrong state", UnMapId (Id), (char *)(FILE_), __LINE__)); Info = _WRONG_STATE; } else { plci->command = 0; plci->ec_cmd = GET_WORD (ec_parms[0].info); plci->ec_idi_options &= ~(LEC_MANUAL_DISABLE | LEC_RESET_COEFFICIENTS); result[0] = 5; PUT_WORD (&result[1], plci->ec_cmd); result[3] = 2; PUT_WORD (&result[4], GOOD); plci->ec_idi_options &= ~(LEC_ENABLE_NONLINEAR_PROCESSING | LEC_ENABLE_2100HZ_DETECTOR | LEC_REQUIRE_2100HZ_REVERSALS); plci->ec_tail_length = 0; if (ec_parms[1].length >= 2) { opt = GET_WORD (&ec_parms[1].info[1]); if (opt & EC_ENABLE_NON_LINEAR_PROCESSING) plci->ec_idi_options |= LEC_ENABLE_NONLINEAR_PROCESSING; if (opt & EC_DETECT_DISABLE_TONE) plci->ec_idi_options |= LEC_ENABLE_2100HZ_DETECTOR; if (!(opt & EC_DO_NOT_REQUIRE_REVERSALS)) plci->ec_idi_options |= LEC_REQUIRE_2100HZ_REVERSALS; if (ec_parms[1].length >= 4) { plci->ec_tail_length = GET_WORD (&ec_parms[1].info[3]); } } switch (plci->ec_cmd) { case EC_ENABLE_OPERATION: plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS; start_internal_command (Id, plci, ec_command); return (false); case EC_DISABLE_OPERATION: plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER | LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING | LEC_RESET_COEFFICIENTS; start_internal_command (Id, plci, ec_command); return (false); default: dbug (1, dprintf ("[%06lx] %s,%d: EC unknown request %04x", UnMapId (Id), (char *)(FILE_), __LINE__, plci->ec_cmd)); PUT_WORD (&result[4], _FACILITY_SPECIFIC_FUNCTION_NOT_SUPP); } } } } } sendf (appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number, "wws", Info, (appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ? PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result); return (false); } static void ec_indication (dword Id, PLCI *plci, byte *msg, word length) { byte result[8]; dbug (1, dprintf ("[%06lx] %s,%d: ec_indication", UnMapId (Id), (char *)(FILE_), __LINE__)); if (!(plci->ec_idi_options & LEC_MANUAL_DISABLE)) { if (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) { result[0] = 2; PUT_WORD (&result[1], 0); switch (msg[1]) { case LEC_DISABLE_TYPE_CONTIGNUOUS_2100HZ: PUT_WORD (&result[1], EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ); break; case LEC_DISABLE_TYPE_REVERSED_2100HZ: PUT_WORD (&result[1], EC_BYPASS_DUE_TO_REVERSED_2100HZ); break; case LEC_DISABLE_RELEASED: PUT_WORD (&result[1], EC_BYPASS_RELEASED); break; } } else { result[0] = 5; PUT_WORD (&result[1], EC_BYPASS_INDICATION); result[3] = 2; PUT_WORD (&result[4], 0); switch (msg[1]) { case LEC_DISABLE_TYPE_CONTIGNUOUS_2100HZ: PUT_WORD (&result[4], EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ); break; case LEC_DISABLE_TYPE_REVERSED_2100HZ: PUT_WORD (&result[4], EC_BYPASS_DUE_TO_REVERSED_2100HZ); break; case LEC_DISABLE_RELEASED: PUT_WORD (&result[4], EC_BYPASS_RELEASED); break; } } sendf (plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ? PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result); } } /*------------------------------------------------------------------*/ /* Advanced voice */ /*------------------------------------------------------------------*/ static void adv_voice_write_coefs (PLCI *plci, word write_command) { DIVA_CAPI_ADAPTER *a; word i; byte *p; word w, n, j, k; byte ch_map[MIXER_CHANNELS_BRI]; byte coef_buffer[ADV_VOICE_COEF_BUFFER_SIZE + 2]; dbug (1, dprintf ("[%06lx] %s,%d: adv_voice_write_coefs %d", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__, write_command)); a = plci->adapter; p = coef_buffer + 1; *(p++) = DSP_CTRL_OLD_SET_MIXER_COEFFICIENTS; i = 0; while (i + sizeof(word) <= a->adv_voice_coef_length) { PUT_WORD (p, GET_WORD (a->adv_voice_coef_buffer + i)); p += 2; i += 2; } while (i < ADV_VOICE_OLD_COEF_COUNT * sizeof(word)) { PUT_WORD (p, 0x8000); p += 2; i += 2; } if (!a->li_pri && (plci->li_bchannel_id == 0)) { if ((li_config_table[a->li_base].plci == NULL) && (li_config_table[a->li_base + 1].plci != NULL)) { plci->li_bchannel_id = 1; li_config_table[a->li_base].plci = plci; dbug (1, dprintf ("[%06lx] %s,%d: adv_voice_set_bchannel_id %d", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__, plci->li_bchannel_id)); } else if ((li_config_table[a->li_base].plci != NULL) && (li_config_table[a->li_base + 1].plci == NULL)) { plci->li_bchannel_id = 2; li_config_table[a->li_base + 1].plci = plci; dbug (1, dprintf ("[%06lx] %s,%d: adv_voice_set_bchannel_id %d", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__, plci->li_bchannel_id)); } } if (!a->li_pri && (plci->li_bchannel_id != 0) && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci)) { i = a->li_base + (plci->li_bchannel_id - 1); switch (write_command) { case ADV_VOICE_WRITE_ACTIVATION: j = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1); k = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id); if (!(plci->B1_facilities & B1_FACILITY_MIXER)) { li_config_table[j].flag_table[i] |= LI_FLAG_CONFERENCE | LI_FLAG_MIX; li_config_table[i].flag_table[j] |= LI_FLAG_CONFERENCE | LI_FLAG_MONITOR; } if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) { li_config_table[k].flag_table[i] |= LI_FLAG_CONFERENCE | LI_FLAG_MIX; li_config_table[i].flag_table[k] |= LI_FLAG_CONFERENCE | LI_FLAG_MONITOR; li_config_table[k].flag_table[j] |= LI_FLAG_CONFERENCE; li_config_table[j].flag_table[k] |= LI_FLAG_CONFERENCE; } mixer_calculate_coefs (a); li_config_table[i].curchnl = li_config_table[i].channel; li_config_table[j].curchnl = li_config_table[j].channel; if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) li_config_table[k].curchnl = li_config_table[k].channel; break; case ADV_VOICE_WRITE_DEACTIVATION: for (j = 0; j < li_total_channels; j++) { li_config_table[i].flag_table[j] = 0; li_config_table[j].flag_table[i] = 0; } k = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1); for (j = 0; j < li_total_channels; j++) { li_config_table[k].flag_table[j] = 0; li_config_table[j].flag_table[k] = 0; } if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) { k = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id); for (j = 0; j < li_total_channels; j++) { li_config_table[k].flag_table[j] = 0; li_config_table[j].flag_table[k] = 0; } } mixer_calculate_coefs (a); break; } if (plci->B1_facilities & B1_FACILITY_MIXER) { w = 0; if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length) w = GET_WORD (a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE); if (li_config_table[i].channel & LI_CHANNEL_TX_DATA) w |= MIXER_FEATURE_ENABLE_TX_DATA; if (li_config_table[i].channel & LI_CHANNEL_RX_DATA) w |= MIXER_FEATURE_ENABLE_RX_DATA; *(p++) = (byte) w; *(p++) = (byte)(w >> 8); for (j = 0; j < sizeof(ch_map); j += 2) { ch_map[j] = (byte)(j + (plci->li_bchannel_id - 1)); ch_map[j+1] = (byte)(j + (2 - plci->li_bchannel_id)); } for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++) { i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch]; j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch]; if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED) { *(p++) = ((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01); w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4)); li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4; } else { *(p++) = (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + n < a->adv_voice_coef_length) ? a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + n] : 0x00; } } } else { for (i = ADV_VOICE_NEW_COEF_BASE; i < a->adv_voice_coef_length; i++) *(p++) = a->adv_voice_coef_buffer[i]; } } else { for (i = ADV_VOICE_NEW_COEF_BASE; i < a->adv_voice_coef_length; i++) *(p++) = a->adv_voice_coef_buffer[i]; } coef_buffer[0] = (p - coef_buffer) - 1; add_p (plci, FTY, coef_buffer); sig_req (plci, TEL_CTRL, 0); send_req (plci); } static void adv_voice_clear_config (PLCI *plci) { DIVA_CAPI_ADAPTER *a; word i, j; dbug (1, dprintf ("[%06lx] %s,%d: adv_voice_clear_config", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); a = plci->adapter; if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI)) { a->adv_voice_coef_length = 0; if (!a->li_pri && (plci->li_bchannel_id != 0) && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci)) { i = a->li_base + (plci->li_bchannel_id - 1); li_config_table[i].curchnl = 0; li_config_table[i].channel = 0; li_config_table[i].chflags = 0; for (j = 0; j < li_total_channels; j++) { li_config_table[i].flag_table[j] = 0; li_config_table[j].flag_table[i] = 0; li_config_table[i].coef_table[j] = 0; li_config_table[j].coef_table[i] = 0; } li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET; i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1); li_config_table[i].curchnl = 0; li_config_table[i].channel = 0; li_config_table[i].chflags = 0; for (j = 0; j < li_total_channels; j++) { li_config_table[i].flag_table[j] = 0; li_config_table[j].flag_table[i] = 0; li_config_table[i].coef_table[j] = 0; li_config_table[j].coef_table[i] = 0; } if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) { i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id); li_config_table[i].curchnl = 0; li_config_table[i].channel = 0; li_config_table[i].chflags = 0; for (j = 0; j < li_total_channels; j++) { li_config_table[i].flag_table[j] = 0; li_config_table[j].flag_table[i] = 0; li_config_table[i].coef_table[j] = 0; li_config_table[j].coef_table[i] = 0; } } } } } static void adv_voice_prepare_switch (dword Id, PLCI *plci) { dbug (1, dprintf ("[%06lx] %s,%d: adv_voice_prepare_switch", UnMapId (Id), (char *)(FILE_), __LINE__)); } static word adv_voice_save_config (dword Id, PLCI *plci, byte Rc) { dbug (1, dprintf ("[%06lx] %s,%d: adv_voice_save_config %02x %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state)); return (GOOD); } static word adv_voice_restore_config (dword Id, PLCI *plci, byte Rc) { DIVA_CAPI_ADAPTER *a; word Info; dbug (1, dprintf ("[%06lx] %s,%d: adv_voice_restore_config %02x %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state)); Info = GOOD; a = plci->adapter; if ((plci->B1_facilities & B1_FACILITY_VOICE) && (plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI)) { switch (plci->adjust_b_state) { case ADJUST_B_RESTORE_VOICE_1: plci->internal_command = plci->adjust_b_command; if (plci->sig_req) { plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_1; break; } adv_voice_write_coefs (plci, ADV_VOICE_WRITE_UPDATE); plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_2; break; case ADJUST_B_RESTORE_VOICE_2: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: Restore voice config failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _WRONG_STATE; break; } break; } } return (Info); } /*------------------------------------------------------------------*/ /* B1 resource switching */ /*------------------------------------------------------------------*/ static byte b1_facilities_table[] = { 0x00, /* 0 No bchannel resources */ 0x00, /* 1 Codec (automatic law) */ 0x00, /* 2 Codec (A-law) */ 0x00, /* 3 Codec (y-law) */ 0x00, /* 4 HDLC for X.21 */ 0x00, /* 5 HDLC */ 0x00, /* 6 External Device 0 */ 0x00, /* 7 External Device 1 */ 0x00, /* 8 HDLC 56k */ 0x00, /* 9 Transparent */ 0x00, /* 10 Loopback to network */ 0x00, /* 11 Test pattern to net */ 0x00, /* 12 Rate adaptation sync */ 0x00, /* 13 Rate adaptation async */ 0x00, /* 14 R-Interface */ 0x00, /* 15 HDLC 128k leased line */ 0x00, /* 16 FAX */ 0x00, /* 17 Modem async */ 0x00, /* 18 Modem sync HDLC */ 0x00, /* 19 V.110 async HDLC */ 0x12, /* 20 Adv voice (Trans,mixer) */ 0x00, /* 21 Codec connected to IC */ 0x0c, /* 22 Trans,DTMF */ 0x1e, /* 23 Trans,DTMF+mixer */ 0x1f, /* 24 Trans,DTMF+mixer+local */ 0x13, /* 25 Trans,mixer+local */ 0x12, /* 26 HDLC,mixer */ 0x12, /* 27 HDLC 56k,mixer */ 0x2c, /* 28 Trans,LEC+DTMF */ 0x3e, /* 29 Trans,LEC+DTMF+mixer */ 0x3f, /* 30 Trans,LEC+DTMF+mixer+local */ 0x2c, /* 31 RTP,LEC+DTMF */ 0x3e, /* 32 RTP,LEC+DTMF+mixer */ 0x3f, /* 33 RTP,LEC+DTMF+mixer+local */ 0x00, /* 34 Signaling task */ 0x00, /* 35 PIAFS */ 0x0c, /* 36 Trans,DTMF+TONE */ 0x1e, /* 37 Trans,DTMF+TONE+mixer */ 0x1f /* 38 Trans,DTMF+TONE+mixer+local*/ }; static word get_b1_facilities (PLCI * plci, byte b1_resource) { word b1_facilities; b1_facilities = b1_facilities_table[b1_resource]; if ((b1_resource == 9) || (b1_resource == 20) || (b1_resource == 25)) { if (!(((plci->requested_options_conn | plci->requested_options) & (1L << PRIVATE_DTMF_TONE)) || (plci->appl && (plci->adapter->requested_options_table[plci->appl->Id-1] & (1L << PRIVATE_DTMF_TONE))))) { if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_SEND) b1_facilities |= B1_FACILITY_DTMFX; if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE) b1_facilities |= B1_FACILITY_DTMFR; } } if ((b1_resource == 17) || (b1_resource == 18)) { if (plci->adapter->manufacturer_features & (MANUFACTURER_FEATURE_V18 | MANUFACTURER_FEATURE_VOWN)) b1_facilities |= B1_FACILITY_DTMFX | B1_FACILITY_DTMFR; } /* dbug (1, dprintf ("[%06lx] %s,%d: get_b1_facilities %d %04x", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char far *)(FILE_), __LINE__, b1_resource, b1_facilites)); */ return (b1_facilities); } static byte add_b1_facilities (PLCI * plci, byte b1_resource, word b1_facilities) { byte b; switch (b1_resource) { case 5: case 26: if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE)) b = 26; else b = 5; break; case 8: case 27: if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE)) b = 27; else b = 8; break; case 9: case 20: case 22: case 23: case 24: case 25: case 28: case 29: case 30: case 36: case 37: case 38: if (b1_facilities & B1_FACILITY_EC) { if (b1_facilities & B1_FACILITY_LOCAL) b = 30; else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE)) b = 29; else b = 28; } else if ((b1_facilities & (B1_FACILITY_DTMFX | B1_FACILITY_DTMFR | B1_FACILITY_MIXER)) && (((plci->requested_options_conn | plci->requested_options) & (1L << PRIVATE_DTMF_TONE)) || (plci->appl && (plci->adapter->requested_options_table[plci->appl->Id-1] & (1L << PRIVATE_DTMF_TONE))))) { if (b1_facilities & B1_FACILITY_LOCAL) b = 38; else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE)) b = 37; else b = 36; } else if (((plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_HARDDTMF) && !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE)) || ((b1_facilities & B1_FACILITY_DTMFR) && ((b1_facilities & B1_FACILITY_MIXER) || !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE))) || ((b1_facilities & B1_FACILITY_DTMFX) && ((b1_facilities & B1_FACILITY_MIXER) || !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_SEND)))) { if (b1_facilities & B1_FACILITY_LOCAL) b = 24; else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE)) b = 23; else b = 22; } else { if (b1_facilities & B1_FACILITY_LOCAL) b = 25; else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE)) b = 20; else b = 9; } break; case 31: case 32: case 33: if (b1_facilities & B1_FACILITY_LOCAL) b = 33; else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE)) b = 32; else b = 31; break; default: b = b1_resource; } dbug (1, dprintf ("[%06lx] %s,%d: add_b1_facilities %d %04x %d %04x", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__, b1_resource, b1_facilities, b, get_b1_facilities (plci, b))); return (b); } static void adjust_b1_facilities (PLCI *plci, byte new_b1_resource, word new_b1_facilities) { word removed_facilities; dbug (1, dprintf ("[%06lx] %s,%d: adjust_b1_facilities %d %04x %04x", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__, new_b1_resource, new_b1_facilities, new_b1_facilities & get_b1_facilities (plci, new_b1_resource))); new_b1_facilities &= get_b1_facilities (plci, new_b1_resource); removed_facilities = plci->B1_facilities & ~new_b1_facilities; if (removed_facilities & B1_FACILITY_EC) ec_clear_config (plci); if (removed_facilities & B1_FACILITY_DTMFR) { dtmf_rec_clear_config (plci); dtmf_parameter_clear_config (plci); } if (removed_facilities & B1_FACILITY_DTMFX) dtmf_send_clear_config (plci); if (removed_facilities & B1_FACILITY_MIXER) mixer_clear_config (plci); if (removed_facilities & B1_FACILITY_VOICE) adv_voice_clear_config (plci); plci->B1_facilities = new_b1_facilities; } static void adjust_b_clear (PLCI *plci) { dbug (1, dprintf ("[%06lx] %s,%d: adjust_b_clear", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); plci->adjust_b_restore = false; } static word adjust_b_process (dword Id, PLCI *plci, byte Rc) { word Info; byte b1_resource; NCCI * ncci_ptr; API_PARSE bp[2]; dbug (1, dprintf ("[%06lx] %s,%d: adjust_b_process %02x %d", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state)); Info = GOOD; switch (plci->adjust_b_state) { case ADJUST_B_START: if ((plci->adjust_b_parms_msg == NULL) && (plci->adjust_b_mode & ADJUST_B_MODE_SWITCH_L1) && ((plci->adjust_b_mode & ~(ADJUST_B_MODE_SAVE | ADJUST_B_MODE_SWITCH_L1 | ADJUST_B_MODE_NO_RESOURCE | ADJUST_B_MODE_RESTORE)) == 0)) { b1_resource = (plci->adjust_b_mode == ADJUST_B_MODE_NO_RESOURCE) ? 0 : add_b1_facilities (plci, plci->B1_resource, plci->adjust_b_facilities); if (b1_resource == plci->B1_resource) { adjust_b1_facilities (plci, b1_resource, plci->adjust_b_facilities); break; } if (plci->adjust_b_facilities & ~get_b1_facilities (plci, b1_resource)) { dbug (1, dprintf ("[%06lx] %s,%d: Adjust B nonsupported facilities %d %d %04x", UnMapId (Id), (char *)(FILE_), __LINE__, plci->B1_resource, b1_resource, plci->adjust_b_facilities)); Info = _WRONG_STATE; break; } } if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE) { mixer_prepare_switch (Id, plci); dtmf_prepare_switch (Id, plci); dtmf_parameter_prepare_switch (Id, plci); ec_prepare_switch (Id, plci); adv_voice_prepare_switch (Id, plci); } plci->adjust_b_state = ADJUST_B_SAVE_MIXER_1; Rc = OK; case ADJUST_B_SAVE_MIXER_1: if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE) { Info = mixer_save_config (Id, plci, Rc); if ((Info != GOOD) || plci->internal_command) break; } plci->adjust_b_state = ADJUST_B_SAVE_DTMF_1; Rc = OK; case ADJUST_B_SAVE_DTMF_1: if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE) { Info = dtmf_save_config (Id, plci, Rc); if ((Info != GOOD) || plci->internal_command) break; } plci->adjust_b_state = ADJUST_B_REMOVE_L23_1; case ADJUST_B_REMOVE_L23_1: if ((plci->adjust_b_mode & ADJUST_B_MODE_REMOVE_L23) && plci->NL.Id && !plci->nl_remove_id) { plci->internal_command = plci->adjust_b_command; if (plci->adjust_b_ncci != 0) { ncci_ptr = &(plci->adapter->ncci[plci->adjust_b_ncci]); while (ncci_ptr->data_pending) { plci->data_sent_ptr = ncci_ptr->DBuffer[ncci_ptr->data_out].P; data_rc (plci, plci->adapter->ncci_ch[plci->adjust_b_ncci]); } while (ncci_ptr->data_ack_pending) data_ack (plci, plci->adapter->ncci_ch[plci->adjust_b_ncci]); } nl_req_ncci (plci, REMOVE, (byte)((plci->adjust_b_mode & ADJUST_B_MODE_CONNECT) ? plci->adjust_b_ncci : 0)); send_req (plci); plci->adjust_b_state = ADJUST_B_REMOVE_L23_2; break; } plci->adjust_b_state = ADJUST_B_REMOVE_L23_2; Rc = OK; case ADJUST_B_REMOVE_L23_2: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: Adjust B remove failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _WRONG_STATE; break; } if (plci->adjust_b_mode & ADJUST_B_MODE_REMOVE_L23) { if (plci_nl_busy (plci)) { plci->internal_command = plci->adjust_b_command; break; } } plci->adjust_b_state = ADJUST_B_SAVE_EC_1; Rc = OK; case ADJUST_B_SAVE_EC_1: if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE) { Info = ec_save_config (Id, plci, Rc); if ((Info != GOOD) || plci->internal_command) break; } plci->adjust_b_state = ADJUST_B_SAVE_DTMF_PARAMETER_1; Rc = OK; case ADJUST_B_SAVE_DTMF_PARAMETER_1: if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE) { Info = dtmf_parameter_save_config (Id, plci, Rc); if ((Info != GOOD) || plci->internal_command) break; } plci->adjust_b_state = ADJUST_B_SAVE_VOICE_1; Rc = OK; case ADJUST_B_SAVE_VOICE_1: if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE) { Info = adv_voice_save_config (Id, plci, Rc); if ((Info != GOOD) || plci->internal_command) break; } plci->adjust_b_state = ADJUST_B_SWITCH_L1_1; case ADJUST_B_SWITCH_L1_1: if (plci->adjust_b_mode & ADJUST_B_MODE_SWITCH_L1) { if (plci->sig_req) { plci->internal_command = plci->adjust_b_command; break; } if (plci->adjust_b_parms_msg != NULL) api_load_msg (plci->adjust_b_parms_msg, bp); else api_load_msg (&plci->B_protocol, bp); Info = add_b1 (plci, bp, (word)((plci->adjust_b_mode & ADJUST_B_MODE_NO_RESOURCE) ? 2 : 0), plci->adjust_b_facilities); if (Info != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: Adjust B invalid L1 parameters %d %04x", UnMapId (Id), (char *)(FILE_), __LINE__, plci->B1_resource, plci->adjust_b_facilities)); break; } plci->internal_command = plci->adjust_b_command; sig_req (plci, RESOURCES, 0); send_req (plci); plci->adjust_b_state = ADJUST_B_SWITCH_L1_2; break; } plci->adjust_b_state = ADJUST_B_SWITCH_L1_2; Rc = OK; case ADJUST_B_SWITCH_L1_2: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: Adjust B switch failed %02x %d %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->B1_resource, plci->adjust_b_facilities)); Info = _WRONG_STATE; break; } plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_1; Rc = OK; case ADJUST_B_RESTORE_VOICE_1: case ADJUST_B_RESTORE_VOICE_2: if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE) { Info = adv_voice_restore_config (Id, plci, Rc); if ((Info != GOOD) || plci->internal_command) break; } plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_1; Rc = OK; case ADJUST_B_RESTORE_DTMF_PARAMETER_1: case ADJUST_B_RESTORE_DTMF_PARAMETER_2: if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE) { Info = dtmf_parameter_restore_config (Id, plci, Rc); if ((Info != GOOD) || plci->internal_command) break; } plci->adjust_b_state = ADJUST_B_RESTORE_EC_1; Rc = OK; case ADJUST_B_RESTORE_EC_1: case ADJUST_B_RESTORE_EC_2: if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE) { Info = ec_restore_config (Id, plci, Rc); if ((Info != GOOD) || plci->internal_command) break; } plci->adjust_b_state = ADJUST_B_ASSIGN_L23_1; case ADJUST_B_ASSIGN_L23_1: if (plci->adjust_b_mode & ADJUST_B_MODE_ASSIGN_L23) { if (plci_nl_busy (plci)) { plci->internal_command = plci->adjust_b_command; break; } if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT) plci->call_dir |= CALL_DIR_FORCE_OUTG_NL; if (plci->adjust_b_parms_msg != NULL) api_load_msg (plci->adjust_b_parms_msg, bp); else api_load_msg (&plci->B_protocol, bp); Info = add_b23 (plci, bp); if (Info != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: Adjust B invalid L23 parameters %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Info)); break; } plci->internal_command = plci->adjust_b_command; nl_req_ncci (plci, ASSIGN, 0); send_req (plci); plci->adjust_b_state = ADJUST_B_ASSIGN_L23_2; break; } plci->adjust_b_state = ADJUST_B_ASSIGN_L23_2; Rc = ASSIGN_OK; case ADJUST_B_ASSIGN_L23_2: if ((Rc != OK) && (Rc != OK_FC) && (Rc != ASSIGN_OK)) { dbug (1, dprintf ("[%06lx] %s,%d: Adjust B assign failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _WRONG_STATE; break; } if (plci->adjust_b_mode & ADJUST_B_MODE_ASSIGN_L23) { if (Rc != ASSIGN_OK) { plci->internal_command = plci->adjust_b_command; break; } } if (plci->adjust_b_mode & ADJUST_B_MODE_USER_CONNECT) { plci->adjust_b_restore = true; break; } plci->adjust_b_state = ADJUST_B_CONNECT_1; case ADJUST_B_CONNECT_1: if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT) { plci->internal_command = plci->adjust_b_command; if (plci_nl_busy (plci)) break; nl_req_ncci (plci, N_CONNECT, 0); send_req (plci); plci->adjust_b_state = ADJUST_B_CONNECT_2; break; } plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1; Rc = OK; case ADJUST_B_CONNECT_2: case ADJUST_B_CONNECT_3: case ADJUST_B_CONNECT_4: if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0)) { dbug (1, dprintf ("[%06lx] %s,%d: Adjust B connect failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _WRONG_STATE; break; } if (Rc == OK) { if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT) { get_ncci (plci, (byte)(Id >> 16), plci->adjust_b_ncci); Id = (Id & 0xffff) | (((dword)(plci->adjust_b_ncci)) << 16); } if (plci->adjust_b_state == ADJUST_B_CONNECT_2) plci->adjust_b_state = ADJUST_B_CONNECT_3; else if (plci->adjust_b_state == ADJUST_B_CONNECT_4) plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1; } else if (Rc == 0) { if (plci->adjust_b_state == ADJUST_B_CONNECT_2) plci->adjust_b_state = ADJUST_B_CONNECT_4; else if (plci->adjust_b_state == ADJUST_B_CONNECT_3) plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1; } if (plci->adjust_b_state != ADJUST_B_RESTORE_DTMF_1) { plci->internal_command = plci->adjust_b_command; break; } Rc = OK; case ADJUST_B_RESTORE_DTMF_1: case ADJUST_B_RESTORE_DTMF_2: if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE) { Info = dtmf_restore_config (Id, plci, Rc); if ((Info != GOOD) || plci->internal_command) break; } plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_1; Rc = OK; case ADJUST_B_RESTORE_MIXER_1: case ADJUST_B_RESTORE_MIXER_2: case ADJUST_B_RESTORE_MIXER_3: case ADJUST_B_RESTORE_MIXER_4: case ADJUST_B_RESTORE_MIXER_5: case ADJUST_B_RESTORE_MIXER_6: case ADJUST_B_RESTORE_MIXER_7: if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE) { Info = mixer_restore_config (Id, plci, Rc); if ((Info != GOOD) || plci->internal_command) break; } plci->adjust_b_state = ADJUST_B_END; case ADJUST_B_END: break; } return (Info); } static void adjust_b1_resource (dword Id, PLCI *plci, API_SAVE *bp_msg, word b1_facilities, word internal_command) { dbug (1, dprintf ("[%06lx] %s,%d: adjust_b1_resource %d %04x", UnMapId (Id), (char *)(FILE_), __LINE__, plci->B1_resource, b1_facilities)); plci->adjust_b_parms_msg = bp_msg; plci->adjust_b_facilities = b1_facilities; plci->adjust_b_command = internal_command; plci->adjust_b_ncci = (word)(Id >> 16); if ((bp_msg == NULL) && (plci->B1_resource == 0)) plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_NO_RESOURCE | ADJUST_B_MODE_SWITCH_L1; else plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_SWITCH_L1 | ADJUST_B_MODE_RESTORE; plci->adjust_b_state = ADJUST_B_START; dbug (1, dprintf ("[%06lx] %s,%d: Adjust B1 resource %d %04x...", UnMapId (Id), (char *)(FILE_), __LINE__, plci->B1_resource, b1_facilities)); } static void adjust_b_restore (dword Id, PLCI *plci, byte Rc) { word internal_command; dbug (1, dprintf ("[%06lx] %s,%d: adjust_b_restore %02x %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); internal_command = plci->internal_command; plci->internal_command = 0; switch (internal_command) { default: plci->command = 0; if (plci->req_in != 0) { plci->internal_command = ADJUST_B_RESTORE_1; break; } Rc = OK; case ADJUST_B_RESTORE_1: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: Adjust B enqueued failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); } plci->adjust_b_parms_msg = NULL; plci->adjust_b_facilities = plci->B1_facilities; plci->adjust_b_command = ADJUST_B_RESTORE_2; plci->adjust_b_ncci = (word)(Id >> 16); plci->adjust_b_mode = ADJUST_B_MODE_RESTORE; plci->adjust_b_state = ADJUST_B_START; dbug (1, dprintf ("[%06lx] %s,%d: Adjust B restore...", UnMapId (Id), (char *)(FILE_), __LINE__)); case ADJUST_B_RESTORE_2: if (adjust_b_process (Id, plci, Rc) != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: Adjust B restore failed", UnMapId (Id), (char *)(FILE_), __LINE__)); } if (plci->internal_command) break; break; } } static void reset_b3_command (dword Id, PLCI *plci, byte Rc) { word Info; word internal_command; dbug (1, dprintf ("[%06lx] %s,%d: reset_b3_command %02x %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); Info = GOOD; internal_command = plci->internal_command; plci->internal_command = 0; switch (internal_command) { default: plci->command = 0; plci->adjust_b_parms_msg = NULL; plci->adjust_b_facilities = plci->B1_facilities; plci->adjust_b_command = RESET_B3_COMMAND_1; plci->adjust_b_ncci = (word)(Id >> 16); plci->adjust_b_mode = ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_CONNECT; plci->adjust_b_state = ADJUST_B_START; dbug (1, dprintf ("[%06lx] %s,%d: Reset B3...", UnMapId (Id), (char *)(FILE_), __LINE__)); case RESET_B3_COMMAND_1: Info = adjust_b_process (Id, plci, Rc); if (Info != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: Reset failed", UnMapId (Id), (char *)(FILE_), __LINE__)); break; } if (plci->internal_command) return; break; } /* sendf (plci->appl, _RESET_B3_R | CONFIRM, Id, plci->number, "w", Info);*/ sendf(plci->appl,_RESET_B3_I,Id,0,"s",""); } static void select_b_command (dword Id, PLCI *plci, byte Rc) { word Info; word internal_command; byte esc_chi[3]; dbug (1, dprintf ("[%06lx] %s,%d: select_b_command %02x %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); Info = GOOD; internal_command = plci->internal_command; plci->internal_command = 0; switch (internal_command) { default: plci->command = 0; plci->adjust_b_parms_msg = &plci->saved_msg; if ((plci->tel == ADV_VOICE) && (plci == plci->adapter->AdvSignalPLCI)) plci->adjust_b_facilities = plci->B1_facilities | B1_FACILITY_VOICE; else plci->adjust_b_facilities = plci->B1_facilities & ~B1_FACILITY_VOICE; plci->adjust_b_command = SELECT_B_COMMAND_1; plci->adjust_b_ncci = (word)(Id >> 16); if (plci->saved_msg.parms[0].length == 0) { plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_SWITCH_L1 | ADJUST_B_MODE_NO_RESOURCE; } else { plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_SWITCH_L1 | ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_USER_CONNECT | ADJUST_B_MODE_RESTORE; } plci->adjust_b_state = ADJUST_B_START; dbug (1, dprintf ("[%06lx] %s,%d: Select B protocol...", UnMapId (Id), (char *)(FILE_), __LINE__)); case SELECT_B_COMMAND_1: Info = adjust_b_process (Id, plci, Rc); if (Info != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: Select B protocol failed", UnMapId (Id), (char *)(FILE_), __LINE__)); break; } if (plci->internal_command) return; if (plci->tel == ADV_VOICE) { esc_chi[0] = 0x02; esc_chi[1] = 0x18; esc_chi[2] = plci->b_channel; SetVoiceChannel (plci->adapter->AdvCodecPLCI, esc_chi, plci->adapter); } break; } sendf (plci->appl, _SELECT_B_REQ | CONFIRM, Id, plci->number, "w", Info); } static void fax_connect_ack_command (dword Id, PLCI *plci, byte Rc) { word Info; word internal_command; dbug (1, dprintf ("[%06lx] %s,%d: fax_connect_ack_command %02x %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); Info = GOOD; internal_command = plci->internal_command; plci->internal_command = 0; switch (internal_command) { default: plci->command = 0; case FAX_CONNECT_ACK_COMMAND_1: if (plci_nl_busy (plci)) { plci->internal_command = FAX_CONNECT_ACK_COMMAND_1; return; } plci->internal_command = FAX_CONNECT_ACK_COMMAND_2; plci->NData[0].P = plci->fax_connect_info_buffer; plci->NData[0].PLength = plci->fax_connect_info_length; plci->NL.X = plci->NData; plci->NL.ReqCh = 0; plci->NL.Req = plci->nl_req = (byte) N_CONNECT_ACK; plci->adapter->request (&plci->NL); return; case FAX_CONNECT_ACK_COMMAND_2: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: FAX issue CONNECT ACK failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); break; } } if ((plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT) && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT)) { if (plci->B3_prot == 4) sendf(plci->appl,_CONNECT_B3_ACTIVE_I,Id,0,"s",""); else sendf(plci->appl,_CONNECT_B3_ACTIVE_I,Id,0,"S",plci->ncpi_buffer); plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT; } } static void fax_edata_ack_command (dword Id, PLCI *plci, byte Rc) { word Info; word internal_command; dbug (1, dprintf ("[%06lx] %s,%d: fax_edata_ack_command %02x %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); Info = GOOD; internal_command = plci->internal_command; plci->internal_command = 0; switch (internal_command) { default: plci->command = 0; case FAX_EDATA_ACK_COMMAND_1: if (plci_nl_busy (plci)) { plci->internal_command = FAX_EDATA_ACK_COMMAND_1; return; } plci->internal_command = FAX_EDATA_ACK_COMMAND_2; plci->NData[0].P = plci->fax_connect_info_buffer; plci->NData[0].PLength = plci->fax_edata_ack_length; plci->NL.X = plci->NData; plci->NL.ReqCh = 0; plci->NL.Req = plci->nl_req = (byte) N_EDATA; plci->adapter->request (&plci->NL); return; case FAX_EDATA_ACK_COMMAND_2: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: FAX issue EDATA ACK failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); break; } } } static void fax_connect_info_command (dword Id, PLCI *plci, byte Rc) { word Info; word internal_command; dbug (1, dprintf ("[%06lx] %s,%d: fax_connect_info_command %02x %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); Info = GOOD; internal_command = plci->internal_command; plci->internal_command = 0; switch (internal_command) { default: plci->command = 0; case FAX_CONNECT_INFO_COMMAND_1: if (plci_nl_busy (plci)) { plci->internal_command = FAX_CONNECT_INFO_COMMAND_1; return; } plci->internal_command = FAX_CONNECT_INFO_COMMAND_2; plci->NData[0].P = plci->fax_connect_info_buffer; plci->NData[0].PLength = plci->fax_connect_info_length; plci->NL.X = plci->NData; plci->NL.ReqCh = 0; plci->NL.Req = plci->nl_req = (byte) N_EDATA; plci->adapter->request (&plci->NL); return; case FAX_CONNECT_INFO_COMMAND_2: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: FAX setting connect info failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _WRONG_STATE; break; } if (plci_nl_busy (plci)) { plci->internal_command = FAX_CONNECT_INFO_COMMAND_2; return; } plci->command = _CONNECT_B3_R; nl_req_ncci (plci, N_CONNECT, 0); send_req (plci); return; } sendf (plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info); } static void fax_adjust_b23_command (dword Id, PLCI *plci, byte Rc) { word Info; word internal_command; dbug (1, dprintf ("[%06lx] %s,%d: fax_adjust_b23_command %02x %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); Info = GOOD; internal_command = plci->internal_command; plci->internal_command = 0; switch (internal_command) { default: plci->command = 0; plci->adjust_b_parms_msg = NULL; plci->adjust_b_facilities = plci->B1_facilities; plci->adjust_b_command = FAX_ADJUST_B23_COMMAND_1; plci->adjust_b_ncci = (word)(Id >> 16); plci->adjust_b_mode = ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_ASSIGN_L23; plci->adjust_b_state = ADJUST_B_START; dbug (1, dprintf ("[%06lx] %s,%d: FAX adjust B23...", UnMapId (Id), (char *)(FILE_), __LINE__)); case FAX_ADJUST_B23_COMMAND_1: Info = adjust_b_process (Id, plci, Rc); if (Info != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: FAX adjust failed", UnMapId (Id), (char *)(FILE_), __LINE__)); break; } if (plci->internal_command) return; case FAX_ADJUST_B23_COMMAND_2: if (plci_nl_busy (plci)) { plci->internal_command = FAX_ADJUST_B23_COMMAND_2; return; } plci->command = _CONNECT_B3_R; nl_req_ncci (plci, N_CONNECT, 0); send_req (plci); return; } sendf (plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info); } static void fax_disconnect_command (dword Id, PLCI *plci, byte Rc) { word internal_command; dbug (1, dprintf ("[%06lx] %s,%d: fax_disconnect_command %02x %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); internal_command = plci->internal_command; plci->internal_command = 0; switch (internal_command) { default: plci->command = 0; plci->internal_command = FAX_DISCONNECT_COMMAND_1; return; case FAX_DISCONNECT_COMMAND_1: case FAX_DISCONNECT_COMMAND_2: case FAX_DISCONNECT_COMMAND_3: if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0)) { dbug (1, dprintf ("[%06lx] %s,%d: FAX disconnect EDATA failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); break; } if (Rc == OK) { if ((internal_command == FAX_DISCONNECT_COMMAND_1) || (internal_command == FAX_DISCONNECT_COMMAND_2)) { plci->internal_command = FAX_DISCONNECT_COMMAND_2; } } else if (Rc == 0) { if (internal_command == FAX_DISCONNECT_COMMAND_1) plci->internal_command = FAX_DISCONNECT_COMMAND_3; } return; } } static void rtp_connect_b3_req_command (dword Id, PLCI *plci, byte Rc) { word Info; word internal_command; dbug (1, dprintf ("[%06lx] %s,%d: rtp_connect_b3_req_command %02x %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); Info = GOOD; internal_command = plci->internal_command; plci->internal_command = 0; switch (internal_command) { default: plci->command = 0; case RTP_CONNECT_B3_REQ_COMMAND_1: if (plci_nl_busy (plci)) { plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_1; return; } plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_2; nl_req_ncci (plci, N_CONNECT, 0); send_req (plci); return; case RTP_CONNECT_B3_REQ_COMMAND_2: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: RTP setting connect info failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _WRONG_STATE; break; } if (plci_nl_busy (plci)) { plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_2; return; } plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_3; plci->NData[0].PLength = plci->internal_req_buffer[0]; plci->NData[0].P = plci->internal_req_buffer + 1; plci->NL.X = plci->NData; plci->NL.ReqCh = 0; plci->NL.Req = plci->nl_req = (byte) N_UDATA; plci->adapter->request (&plci->NL); break; case RTP_CONNECT_B3_REQ_COMMAND_3: return; } sendf (plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info); } static void rtp_connect_b3_res_command (dword Id, PLCI *plci, byte Rc) { word Info; word internal_command; dbug (1, dprintf ("[%06lx] %s,%d: rtp_connect_b3_res_command %02x %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); Info = GOOD; internal_command = plci->internal_command; plci->internal_command = 0; switch (internal_command) { default: plci->command = 0; case RTP_CONNECT_B3_RES_COMMAND_1: if (plci_nl_busy (plci)) { plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_1; return; } plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_2; nl_req_ncci (plci, N_CONNECT_ACK, (byte)(Id >> 16)); send_req (plci); return; case RTP_CONNECT_B3_RES_COMMAND_2: if ((Rc != OK) && (Rc != OK_FC)) { dbug (1, dprintf ("[%06lx] %s,%d: RTP setting connect resp info failed %02x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc)); Info = _WRONG_STATE; break; } if (plci_nl_busy (plci)) { plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_2; return; } sendf (plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", ""); plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_3; plci->NData[0].PLength = plci->internal_req_buffer[0]; plci->NData[0].P = plci->internal_req_buffer + 1; plci->NL.X = plci->NData; plci->NL.ReqCh = 0; plci->NL.Req = plci->nl_req = (byte) N_UDATA; plci->adapter->request (&plci->NL); return; case RTP_CONNECT_B3_RES_COMMAND_3: return; } } static void hold_save_command (dword Id, PLCI *plci, byte Rc) { byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/ word Info; word internal_command; dbug (1, dprintf ("[%06lx] %s,%d: hold_save_command %02x %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); Info = GOOD; internal_command = plci->internal_command; plci->internal_command = 0; switch (internal_command) { default: if (!plci->NL.Id) break; plci->command = 0; plci->adjust_b_parms_msg = NULL; plci->adjust_b_facilities = plci->B1_facilities; plci->adjust_b_command = HOLD_SAVE_COMMAND_1; plci->adjust_b_ncci = (word)(Id >> 16); plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23; plci->adjust_b_state = ADJUST_B_START; dbug (1, dprintf ("[%06lx] %s,%d: HOLD save...", UnMapId (Id), (char *)(FILE_), __LINE__)); case HOLD_SAVE_COMMAND_1: Info = adjust_b_process (Id, plci, Rc); if (Info != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: HOLD save failed", UnMapId (Id), (char *)(FILE_), __LINE__)); break; } if (plci->internal_command) return; } sendf (plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", 3, SS_Ind); } static void retrieve_restore_command (dword Id, PLCI *plci, byte Rc) { byte SS_Ind[] = "\x05\x03\x00\x02\x00\x00"; /* Retrieve_Ind struct*/ word Info; word internal_command; dbug (1, dprintf ("[%06lx] %s,%d: retrieve_restore_command %02x %04x", UnMapId (Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command)); Info = GOOD; internal_command = plci->internal_command; plci->internal_command = 0; switch (internal_command) { default: plci->command = 0; plci->adjust_b_parms_msg = NULL; plci->adjust_b_facilities = plci->B1_facilities; plci->adjust_b_command = RETRIEVE_RESTORE_COMMAND_1; plci->adjust_b_ncci = (word)(Id >> 16); plci->adjust_b_mode = ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_USER_CONNECT | ADJUST_B_MODE_RESTORE; plci->adjust_b_state = ADJUST_B_START; dbug (1, dprintf ("[%06lx] %s,%d: RETRIEVE restore...", UnMapId (Id), (char *)(FILE_), __LINE__)); case RETRIEVE_RESTORE_COMMAND_1: Info = adjust_b_process (Id, plci, Rc); if (Info != GOOD) { dbug (1, dprintf ("[%06lx] %s,%d: RETRIEVE restore failed", UnMapId (Id), (char *)(FILE_), __LINE__)); break; } if (plci->internal_command) return; } sendf (plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", 3, SS_Ind); } static void init_b1_config (PLCI *plci) { dbug (1, dprintf ("[%06lx] %s,%d: init_b1_config", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); plci->B1_resource = 0; plci->B1_facilities = 0; plci->li_bchannel_id = 0; mixer_clear_config (plci); ec_clear_config (plci); dtmf_rec_clear_config (plci); dtmf_send_clear_config (plci); dtmf_parameter_clear_config (plci); adv_voice_clear_config (plci); adjust_b_clear (plci); } static void clear_b1_config (PLCI *plci) { dbug (1, dprintf ("[%06lx] %s,%d: clear_b1_config", (dword)((plci->Id << 8) | UnMapController (plci->adapter->Id)), (char *)(FILE_), __LINE__)); adv_voice_clear_config (plci); adjust_b_clear (plci); ec_clear_config (plci); dtmf_rec_clear_config (plci); dtmf_send_clear_config (plci); dtmf_parameter_clear_config (plci); if ((plci->li_bchannel_id != 0) && (li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci == plci)) { mixer_clear_config (plci); li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci = NULL; plci->li_bchannel_id = 0; } plci->B1_resource = 0; plci->B1_facilities = 0; } /* ----------------------------------------------------------------- XON protocol local helpers ----------------------------------------------------------------- */ static void channel_flow_control_remove (PLCI * plci) { DIVA_CAPI_ADAPTER * a = plci->adapter; word i; for(i=1;i<MAX_NL_CHANNEL+1;i++) { if (a->ch_flow_plci[i] == plci->Id) { a->ch_flow_plci[i] = 0; a->ch_flow_control[i] = 0; } } } static void channel_x_on (PLCI * plci, byte ch) { DIVA_CAPI_ADAPTER * a = plci->adapter; if (a->ch_flow_control[ch] & N_XON_SENT) { a->ch_flow_control[ch] &= ~N_XON_SENT; } } static void channel_x_off (PLCI * plci, byte ch, byte flag) { DIVA_CAPI_ADAPTER * a = plci->adapter; if ((a->ch_flow_control[ch] & N_RX_FLOW_CONTROL_MASK) == 0) { a->ch_flow_control[ch] |= (N_CH_XOFF | flag); a->ch_flow_plci[ch] = plci->Id; a->ch_flow_control_pending++; } } static void channel_request_xon (PLCI * plci, byte ch) { DIVA_CAPI_ADAPTER * a = plci->adapter; if (a->ch_flow_control[ch] & N_CH_XOFF) { a->ch_flow_control[ch] |= N_XON_REQ; a->ch_flow_control[ch] &= ~N_CH_XOFF; a->ch_flow_control[ch] &= ~N_XON_CONNECT_IND; } } static void channel_xmit_extended_xon (PLCI * plci) { DIVA_CAPI_ADAPTER * a; int max_ch = ARRAY_SIZE(a->ch_flow_control); int i, one_requested = 0; if ((!plci) || (!plci->Id) || ((a = plci->adapter) == NULL)) { return; } for (i = 0; i < max_ch; i++) { if ((a->ch_flow_control[i] & N_CH_XOFF) && (a->ch_flow_control[i] & N_XON_CONNECT_IND) && (plci->Id == a->ch_flow_plci[i])) { channel_request_xon (plci, (byte)i); one_requested = 1; } } if (one_requested) { channel_xmit_xon (plci); } } /* Try to xmit next X_ON */ static int find_channel_with_pending_x_on (DIVA_CAPI_ADAPTER * a, PLCI * plci) { int max_ch = ARRAY_SIZE(a->ch_flow_control); int i; if (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)) { return (0); } if (a->last_flow_control_ch >= max_ch) { a->last_flow_control_ch = 1; } for (i=a->last_flow_control_ch; i < max_ch; i++) { if ((a->ch_flow_control[i] & N_XON_REQ) && (plci->Id == a->ch_flow_plci[i])) { a->last_flow_control_ch = i+1; return (i); } } for (i = 1; i < a->last_flow_control_ch; i++) { if ((a->ch_flow_control[i] & N_XON_REQ) && (plci->Id == a->ch_flow_plci[i])) { a->last_flow_control_ch = i+1; return (i); } } return (0); } static void channel_xmit_xon (PLCI * plci) { DIVA_CAPI_ADAPTER * a = plci->adapter; byte ch; if (plci->nl_req || !plci->NL.Id || plci->nl_remove_id) { return; } if ((ch = (byte)find_channel_with_pending_x_on (a, plci)) == 0) { return; } a->ch_flow_control[ch] &= ~N_XON_REQ; a->ch_flow_control[ch] |= N_XON_SENT; plci->NL.Req = plci->nl_req = (byte)N_XON; plci->NL.ReqCh = ch; plci->NL.X = plci->NData; plci->NL.XNum = 1; plci->NData[0].P = &plci->RBuffer[0]; plci->NData[0].PLength = 0; plci->adapter->request(&plci->NL); } static int channel_can_xon (PLCI * plci, byte ch) { APPL * APPLptr; DIVA_CAPI_ADAPTER * a; word NCCIcode; dword count; word Num; word i; APPLptr = plci->appl; a = plci->adapter; if (!APPLptr) return (0); NCCIcode = a->ch_ncci[ch] | (((word) a->Id) << 8); /* count all buffers within the Application pool */ /* belonging to the same NCCI. XON if a first is */ /* used. */ count = 0; Num = 0xffff; for(i=0; i<APPLptr->MaxBuffer; i++) { if(NCCIcode==APPLptr->DataNCCI[i]) count++; if(!APPLptr->DataNCCI[i] && Num==0xffff) Num = i; } if ((count > 2) || (Num == 0xffff)) { return (0); } return (1); } /*------------------------------------------------------------------*/ static word CPN_filter_ok(byte *cpn,DIVA_CAPI_ADAPTER * a,word offset) { return 1; } /**********************************************************************************/ /* function groups the listening applications according to the CIP mask and the */ /* Info_Mask. Each group gets just one Connect_Ind. Some application manufacturer */ /* are not multi-instance capable, so they start e.g. 30 applications what causes */ /* big problems on application level (one call, 30 Connect_Ind, ect). The */ /* function must be enabled by setting "a->group_optimization_enabled" from the */ /* OS specific part (per adapter). */ /**********************************************************************************/ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci) { word i,j,k,busy,group_found; dword info_mask_group[MAX_CIP_TYPES]; dword cip_mask_group[MAX_CIP_TYPES]; word appl_number_group_type[MAX_APPL]; PLCI *auxplci; set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */ if(!a->group_optimization_enabled) { dbug(1,dprintf("No group optimization")); return; } dbug(1,dprintf("Group optimization = 0x%x...", a->group_optimization_enabled)); for(i=0;i<MAX_CIP_TYPES;i++) { info_mask_group[i] = 0; cip_mask_group [i] = 0; } for(i=0;i<MAX_APPL;i++) { appl_number_group_type[i] = 0; } for(i=0; i<max_appl; i++) /* check if any multi instance capable application is present */ { /* group_optimization set to 1 means not to optimize multi-instance capable applications (default) */ if(application[i].Id && (application[i].MaxNCCI) > 1 && (a->CIP_Mask[i]) && (a->group_optimization_enabled ==1) ) { dbug(1,dprintf("Multi-Instance capable, no optimization required")); return; /* allow good application unfiltered access */ } } for(i=0; i<max_appl; i++) /* Build CIP Groups */ { if(application[i].Id && a->CIP_Mask[i] ) { for(k=0,busy=false; k<a->max_plci; k++) { if(a->plci[k].Id) { auxplci = &a->plci[k]; if(auxplci->appl == &application[i]) /* application has a busy PLCI */ { busy = true; dbug(1,dprintf("Appl 0x%x is busy",i+1)); } else if(test_c_ind_mask_bit (auxplci, i)) /* application has an incoming call pending */ { busy = true; dbug(1,dprintf("Appl 0x%x has inc. call pending",i+1)); } } } for(j=0,group_found=0; j<=(MAX_CIP_TYPES) && !busy &&!group_found; j++) /* build groups with free applications only */ { if(j==MAX_CIP_TYPES) /* all groups are in use but group still not found */ { /* the MAX_CIP_TYPES group enables all calls because of field overflow */ appl_number_group_type[i] = MAX_CIP_TYPES; group_found=true; dbug(1,dprintf("Field overflow appl 0x%x",i+1)); } else if( (info_mask_group[j]==a->CIP_Mask[i]) && (cip_mask_group[j]==a->Info_Mask[i]) ) { /* is group already present ? */ appl_number_group_type[i] = j|0x80; /* store the group number for each application */ group_found=true; dbug(1,dprintf("Group 0x%x found with appl 0x%x, CIP=0x%lx",appl_number_group_type[i],i+1,info_mask_group[j])); } else if(!info_mask_group[j]) { /* establish a new group */ appl_number_group_type[i] = j|0x80; /* store the group number for each application */ info_mask_group[j] = a->CIP_Mask[i]; /* store the new CIP mask for the new group */ cip_mask_group[j] = a->Info_Mask[i]; /* store the new Info_Mask for this new group */ group_found=true; dbug(1,dprintf("New Group 0x%x established with appl 0x%x, CIP=0x%lx",appl_number_group_type[i],i+1,info_mask_group[j])); } } } } for(i=0; i<max_appl; i++) /* Build group_optimization_mask_table */ { if(appl_number_group_type[i]) /* application is free, has listens and is member of a group */ { if(appl_number_group_type[i] == MAX_CIP_TYPES) { dbug(1,dprintf("OverflowGroup 0x%x, valid appl = 0x%x, call enabled",appl_number_group_type[i],i+1)); } else { dbug(1,dprintf("Group 0x%x, valid appl = 0x%x",appl_number_group_type[i],i+1)); for(j=i+1; j<max_appl; j++) /* search other group members and mark them as busy */ { if(appl_number_group_type[i] == appl_number_group_type[j]) { dbug(1,dprintf("Appl 0x%x is member of group 0x%x, no call",j+1,appl_number_group_type[j])); clear_group_ind_mask_bit (plci, j); /* disable call on other group members */ appl_number_group_type[j] = 0; /* remove disabled group member from group list */ } } } } else /* application should not get a call */ { clear_group_ind_mask_bit (plci, i); } } } /* OS notifies the driver about a application Capi_Register */ word CapiRegister(word id) { word i,j,appls_found; PLCI *plci; DIVA_CAPI_ADAPTER *a; for(i=0,appls_found=0; i<max_appl; i++) { if( application[i].Id && (application[i].Id!=id) ) { appls_found++; /* an application has been found */ } } if(appls_found) return true; for(i=0; i<max_adapter; i++) /* scan all adapters... */ { a = &adapter[i]; if(a->request) { if(a->flag_dynamic_l1_down) /* remove adapter from L1 tristate (Huntgroup) */ { if(!appls_found) /* first application does a capi register */ { if((j=get_plci(a))) /* activate L1 of all adapters */ { plci = &a->plci[j-1]; plci->command = 0; add_p(plci,OAD,"\x01\xfd"); add_p(plci,CAI,"\x01\x80"); add_p(plci,UID,"\x06\x43\x61\x70\x69\x32\x30"); add_p(plci,SHIFT|6,NULL); add_p(plci,SIN,"\x02\x00\x00"); plci->internal_command = START_L1_SIG_ASSIGN_PEND; sig_req(plci,ASSIGN,DSIG_ID); add_p(plci,FTY,"\x02\xff\x07"); /* l1 start */ sig_req(plci,SIG_CTRL,0); send_req(plci); } } } } } return false; } /*------------------------------------------------------------------*/ /* Functions for virtual Switching e.g. Transfer by join, Conference */ static void VSwitchReqInd(PLCI *plci, dword Id, byte **parms) { word i; /* Format of vswitch_t: 0 byte length 1 byte VSWITCHIE 2 byte VSWITCH_REQ/VSWITCH_IND 3 byte reserved 4 word VSwitchcommand 6 word returnerror 8... Params */ if(!plci || !plci->appl || !plci->State || plci->Sig.Ind==NCR_FACILITY ) return; for(i=0;i<MAX_MULTI_IE;i++) { if(!parms[i][0]) continue; if(parms[i][0]<7) { parms[i][0]=0; /* kill it */ continue; } dbug(1,dprintf("VSwitchReqInd(%d)",parms[i][4])); switch(parms[i][4]) { case VSJOIN: if(!plci->relatedPTYPLCI || (plci->ptyState!=S_ECT && plci->relatedPTYPLCI->ptyState!=S_ECT)) { /* Error */ break; } /* remember all necessary informations */ if(parms[i][0]!=11 || parms[i][8]!=3) /* Length Test */ { break; } if(parms[i][2]==VSWITCH_IND && parms[i][9]==1) { /* first indication after ECT-Request on Consultation Call */ plci->vswitchstate=parms[i][9]; parms[i][9]=2; /* State */ /* now ask first Call to join */ } else if(parms[i][2]==VSWITCH_REQ && parms[i][9]==3) { /* Answer of VSWITCH_REQ from first Call */ plci->vswitchstate=parms[i][9]; /* tell consultation call to join and the protocol capabilities of the first call */ } else { /* Error */ break; } plci->vsprot=parms[i][10]; /* protocol */ plci->vsprotdialect=parms[i][11]; /* protocoldialect */ /* send join request to related PLCI */ parms[i][1]=VSWITCHIE; parms[i][2]=VSWITCH_REQ; plci->relatedPTYPLCI->command = 0; plci->relatedPTYPLCI->internal_command = VSWITCH_REQ_PEND; add_p(plci->relatedPTYPLCI,ESC,&parms[i][0]); sig_req(plci->relatedPTYPLCI,VSWITCH_REQ,0); send_req(plci->relatedPTYPLCI); break; case VSTRANSPORT: default: if(plci->relatedPTYPLCI && plci->vswitchstate==3 && plci->relatedPTYPLCI->vswitchstate==3) { add_p(plci->relatedPTYPLCI,ESC,&parms[i][0]); sig_req(plci->relatedPTYPLCI,VSWITCH_REQ,0); send_req(plci->relatedPTYPLCI); } break; } parms[i][0]=0; /* kill it */ } } /*------------------------------------------------------------------*/ static int diva_get_dma_descriptor (PLCI *plci, dword *dma_magic) { ENTITY e; IDI_SYNC_REQ* pReq = (IDI_SYNC_REQ*)&e; if (!(diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_RX_DMA)) { return (-1); } pReq->xdi_dma_descriptor_operation.Req = 0; pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION; pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC; pReq->xdi_dma_descriptor_operation.info.descriptor_number = -1; pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL; pReq->xdi_dma_descriptor_operation.info.descriptor_magic = 0; e.user[0] = plci->adapter->Id - 1; plci->adapter->request((ENTITY*)pReq); if (!pReq->xdi_dma_descriptor_operation.info.operation && (pReq->xdi_dma_descriptor_operation.info.descriptor_number >= 0) && pReq->xdi_dma_descriptor_operation.info.descriptor_magic) { *dma_magic = pReq->xdi_dma_descriptor_operation.info.descriptor_magic; dbug(3,dprintf("dma_alloc, a:%d (%d-%08x)", plci->adapter->Id, pReq->xdi_dma_descriptor_operation.info.descriptor_number, *dma_magic)); return (pReq->xdi_dma_descriptor_operation.info.descriptor_number); } else { dbug(1,dprintf("dma_alloc failed")); return (-1); } } static void diva_free_dma_descriptor (PLCI *plci, int nr) { ENTITY e; IDI_SYNC_REQ* pReq = (IDI_SYNC_REQ*)&e; if (nr < 0) { return; } pReq->xdi_dma_descriptor_operation.Req = 0; pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION; pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE; pReq->xdi_dma_descriptor_operation.info.descriptor_number = nr; pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL; pReq->xdi_dma_descriptor_operation.info.descriptor_magic = 0; e.user[0] = plci->adapter->Id - 1; plci->adapter->request((ENTITY*)pReq); if (!pReq->xdi_dma_descriptor_operation.info.operation) { dbug(1,dprintf("dma_free(%d)", nr)); } else { dbug(1,dprintf("dma_free failed (%d)", nr)); } } /*------------------------------------------------------------------*/
gpl-2.0
AnupBansod/linux-btrfs
drivers/power/pcf50633-charger.c
2085
12714
/* NXP PCF50633 Main Battery Charger Driver * * (C) 2006-2008 by Openmoko, Inc. * Author: Balaji Rao <balajirrao@openmoko.org> * All rights reserved. * * Broken down from monstrous PCF50633 driver mainly by * Harald Welte, Andy Green and Werner Almesberger * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> #include <linux/sysfs.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/mfd/pcf50633/core.h> #include <linux/mfd/pcf50633/mbc.h> struct pcf50633_mbc { struct pcf50633 *pcf; int adapter_online; int usb_online; struct power_supply usb; struct power_supply adapter; struct power_supply ac; }; int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) { struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); int ret = 0; u8 bits; int charging_start = 1; u8 mbcs2, chgmod; unsigned int mbcc5; if (ma >= 1000) { bits = PCF50633_MBCC7_USB_1000mA; ma = 1000; } else if (ma >= 500) { bits = PCF50633_MBCC7_USB_500mA; ma = 500; } else if (ma >= 100) { bits = PCF50633_MBCC7_USB_100mA; ma = 100; } else { bits = PCF50633_MBCC7_USB_SUSPEND; charging_start = 0; ma = 0; } ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7, PCF50633_MBCC7_USB_MASK, bits); if (ret) dev_err(pcf->dev, "error setting usb curlim to %d mA\n", ma); else dev_info(pcf->dev, "usb curlim to %d mA\n", ma); /* * We limit the charging current to be the USB current limit. * The reason is that on pcf50633, when it enters PMU Standby mode, * which it does when the device goes "off", the USB current limit * reverts to the variant default. In at least one common case, that * default is 500mA. By setting the charging current to be the same * as the USB limit we set here before PMU standby, we enforce it only * using the correct amount of current even when the USB current limit * gets reset to the wrong thing */ if (mbc->pcf->pdata->charger_reference_current_ma) { mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma; if (mbcc5 > 255) mbcc5 = 255; pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5); } mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2); chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); /* If chgmod == BATFULL, setting chgena has no effect. * Datasheet says we need to set resume instead but when autoresume is * used resume doesn't work. Clear and set chgena instead. */ if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL) pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1, PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA); else { pcf50633_reg_clear_bits(pcf, PCF50633_REG_MBCC1, PCF50633_MBCC1_CHGENA); pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1, PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA); } power_supply_changed(&mbc->usb); return ret; } EXPORT_SYMBOL_GPL(pcf50633_mbc_usb_curlim_set); int pcf50633_mbc_get_status(struct pcf50633 *pcf) { struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); int status = 0; u8 chgmod; if (!mbc) return 0; chgmod = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2) & PCF50633_MBCS2_MBC_MASK; if (mbc->usb_online) status |= PCF50633_MBC_USB_ONLINE; if (chgmod == PCF50633_MBCS2_MBC_USB_PRE || chgmod == PCF50633_MBCS2_MBC_USB_PRE_WAIT || chgmod == PCF50633_MBCS2_MBC_USB_FAST || chgmod == PCF50633_MBCS2_MBC_USB_FAST_WAIT) status |= PCF50633_MBC_USB_ACTIVE; if (mbc->adapter_online) status |= PCF50633_MBC_ADAPTER_ONLINE; if (chgmod == PCF50633_MBCS2_MBC_ADP_PRE || chgmod == PCF50633_MBCS2_MBC_ADP_PRE_WAIT || chgmod == PCF50633_MBCS2_MBC_ADP_FAST || chgmod == PCF50633_MBCS2_MBC_ADP_FAST_WAIT) status |= PCF50633_MBC_ADAPTER_ACTIVE; return status; } EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status); int pcf50633_mbc_get_usb_online_status(struct pcf50633 *pcf) { struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); if (!mbc) return 0; return mbc->usb_online; } EXPORT_SYMBOL_GPL(pcf50633_mbc_get_usb_online_status); static ssize_t show_chgmode(struct device *dev, struct device_attribute *attr, char *buf) { struct pcf50633_mbc *mbc = dev_get_drvdata(dev); u8 mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2); u8 chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); return sprintf(buf, "%d\n", chgmod); } static DEVICE_ATTR(chgmode, S_IRUGO, show_chgmode, NULL); static ssize_t show_usblim(struct device *dev, struct device_attribute *attr, char *buf) { struct pcf50633_mbc *mbc = dev_get_drvdata(dev); u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) & PCF50633_MBCC7_USB_MASK; unsigned int ma; if (usblim == PCF50633_MBCC7_USB_1000mA) ma = 1000; else if (usblim == PCF50633_MBCC7_USB_500mA) ma = 500; else if (usblim == PCF50633_MBCC7_USB_100mA) ma = 100; else ma = 0; return sprintf(buf, "%u\n", ma); } static ssize_t set_usblim(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pcf50633_mbc *mbc = dev_get_drvdata(dev); unsigned long ma; int ret; ret = strict_strtoul(buf, 10, &ma); if (ret) return -EINVAL; pcf50633_mbc_usb_curlim_set(mbc->pcf, ma); return count; } static DEVICE_ATTR(usb_curlim, S_IRUGO | S_IWUSR, show_usblim, set_usblim); static ssize_t show_chglim(struct device *dev, struct device_attribute *attr, char *buf) { struct pcf50633_mbc *mbc = dev_get_drvdata(dev); u8 mbcc5 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC5); unsigned int ma; if (!mbc->pcf->pdata->charger_reference_current_ma) return -ENODEV; ma = (mbc->pcf->pdata->charger_reference_current_ma * mbcc5) >> 8; return sprintf(buf, "%u\n", ma); } static ssize_t set_chglim(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pcf50633_mbc *mbc = dev_get_drvdata(dev); unsigned long ma; unsigned int mbcc5; int ret; if (!mbc->pcf->pdata->charger_reference_current_ma) return -ENODEV; ret = strict_strtoul(buf, 10, &ma); if (ret) return -EINVAL; mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma; if (mbcc5 > 255) mbcc5 = 255; pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5); return count; } /* * This attribute allows to change MBC charging limit on the fly * independently of usb current limit. It also gets set automatically every * time usb current limit is changed. */ static DEVICE_ATTR(chg_curlim, S_IRUGO | S_IWUSR, show_chglim, set_chglim); static struct attribute *pcf50633_mbc_sysfs_entries[] = { &dev_attr_chgmode.attr, &dev_attr_usb_curlim.attr, &dev_attr_chg_curlim.attr, NULL, }; static struct attribute_group mbc_attr_group = { .name = NULL, /* put in device directory */ .attrs = pcf50633_mbc_sysfs_entries, }; static void pcf50633_mbc_irq_handler(int irq, void *data) { struct pcf50633_mbc *mbc = data; /* USB */ if (irq == PCF50633_IRQ_USBINS) { mbc->usb_online = 1; } else if (irq == PCF50633_IRQ_USBREM) { mbc->usb_online = 0; pcf50633_mbc_usb_curlim_set(mbc->pcf, 0); } /* Adapter */ if (irq == PCF50633_IRQ_ADPINS) mbc->adapter_online = 1; else if (irq == PCF50633_IRQ_ADPREM) mbc->adapter_online = 0; power_supply_changed(&mbc->ac); power_supply_changed(&mbc->usb); power_supply_changed(&mbc->adapter); if (mbc->pcf->pdata->mbc_event_callback) mbc->pcf->pdata->mbc_event_callback(mbc->pcf, irq); } static int adapter_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, adapter); int ret = 0; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = mbc->adapter_online; break; default: ret = -EINVAL; break; } return ret; } static int usb_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, usb); int ret = 0; u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) & PCF50633_MBCC7_USB_MASK; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = mbc->usb_online && (usblim <= PCF50633_MBCC7_USB_500mA); break; default: ret = -EINVAL; break; } return ret; } static int ac_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, ac); int ret = 0; u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) & PCF50633_MBCC7_USB_MASK; switch (psp) { case POWER_SUPPLY_PROP_ONLINE: val->intval = mbc->usb_online && (usblim == PCF50633_MBCC7_USB_1000mA); break; default: ret = -EINVAL; break; } return ret; } static enum power_supply_property power_props[] = { POWER_SUPPLY_PROP_ONLINE, }; static const u8 mbc_irq_handlers[] = { PCF50633_IRQ_ADPINS, PCF50633_IRQ_ADPREM, PCF50633_IRQ_USBINS, PCF50633_IRQ_USBREM, PCF50633_IRQ_BATFULL, PCF50633_IRQ_CHGHALT, PCF50633_IRQ_THLIMON, PCF50633_IRQ_THLIMOFF, PCF50633_IRQ_USBLIMON, PCF50633_IRQ_USBLIMOFF, PCF50633_IRQ_LOWSYS, PCF50633_IRQ_LOWBAT, }; static int pcf50633_mbc_probe(struct platform_device *pdev) { struct pcf50633_mbc *mbc; int ret; int i; u8 mbcs1; mbc = devm_kzalloc(&pdev->dev, sizeof(*mbc), GFP_KERNEL); if (!mbc) return -ENOMEM; platform_set_drvdata(pdev, mbc); mbc->pcf = dev_to_pcf50633(pdev->dev.parent); /* Set up IRQ handlers */ for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++) pcf50633_register_irq(mbc->pcf, mbc_irq_handlers[i], pcf50633_mbc_irq_handler, mbc); /* Create power supplies */ mbc->adapter.name = "adapter"; mbc->adapter.type = POWER_SUPPLY_TYPE_MAINS; mbc->adapter.properties = power_props; mbc->adapter.num_properties = ARRAY_SIZE(power_props); mbc->adapter.get_property = &adapter_get_property; mbc->adapter.supplied_to = mbc->pcf->pdata->batteries; mbc->adapter.num_supplicants = mbc->pcf->pdata->num_batteries; mbc->usb.name = "usb"; mbc->usb.type = POWER_SUPPLY_TYPE_USB; mbc->usb.properties = power_props; mbc->usb.num_properties = ARRAY_SIZE(power_props); mbc->usb.get_property = usb_get_property; mbc->usb.supplied_to = mbc->pcf->pdata->batteries; mbc->usb.num_supplicants = mbc->pcf->pdata->num_batteries; mbc->ac.name = "ac"; mbc->ac.type = POWER_SUPPLY_TYPE_MAINS; mbc->ac.properties = power_props; mbc->ac.num_properties = ARRAY_SIZE(power_props); mbc->ac.get_property = ac_get_property; mbc->ac.supplied_to = mbc->pcf->pdata->batteries; mbc->ac.num_supplicants = mbc->pcf->pdata->num_batteries; ret = power_supply_register(&pdev->dev, &mbc->adapter); if (ret) { dev_err(mbc->pcf->dev, "failed to register adapter\n"); return ret; } ret = power_supply_register(&pdev->dev, &mbc->usb); if (ret) { dev_err(mbc->pcf->dev, "failed to register usb\n"); power_supply_unregister(&mbc->adapter); return ret; } ret = power_supply_register(&pdev->dev, &mbc->ac); if (ret) { dev_err(mbc->pcf->dev, "failed to register ac\n"); power_supply_unregister(&mbc->adapter); power_supply_unregister(&mbc->usb); return ret; } ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group); if (ret) dev_err(mbc->pcf->dev, "failed to create sysfs entries\n"); mbcs1 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS1); if (mbcs1 & PCF50633_MBCS1_USBPRES) pcf50633_mbc_irq_handler(PCF50633_IRQ_USBINS, mbc); if (mbcs1 & PCF50633_MBCS1_ADAPTPRES) pcf50633_mbc_irq_handler(PCF50633_IRQ_ADPINS, mbc); return 0; } static int pcf50633_mbc_remove(struct platform_device *pdev) { struct pcf50633_mbc *mbc = platform_get_drvdata(pdev); int i; /* Remove IRQ handlers */ for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++) pcf50633_free_irq(mbc->pcf, mbc_irq_handlers[i]); sysfs_remove_group(&pdev->dev.kobj, &mbc_attr_group); power_supply_unregister(&mbc->usb); power_supply_unregister(&mbc->adapter); power_supply_unregister(&mbc->ac); return 0; } static struct platform_driver pcf50633_mbc_driver = { .driver = { .name = "pcf50633-mbc", }, .probe = pcf50633_mbc_probe, .remove = pcf50633_mbc_remove, }; module_platform_driver(pcf50633_mbc_driver); MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>"); MODULE_DESCRIPTION("PCF50633 mbc driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pcf50633-mbc");
gpl-2.0
acheron1502/android_kernel_BLU_BLU_PURE_XL
drivers/pwm/pwm-mxs.c
2085
4839
/* * Copyright 2012 Freescale Semiconductor, Inc. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/slab.h> #include <linux/stmp_device.h> #define SET 0x4 #define CLR 0x8 #define TOG 0xc #define PWM_CTRL 0x0 #define PWM_ACTIVE0 0x10 #define PWM_PERIOD0 0x20 #define PERIOD_PERIOD(p) ((p) & 0xffff) #define PERIOD_PERIOD_MAX 0x10000 #define PERIOD_ACTIVE_HIGH (3 << 16) #define PERIOD_INACTIVE_LOW (2 << 18) #define PERIOD_CDIV(div) (((div) & 0x7) << 20) #define PERIOD_CDIV_MAX 8 struct mxs_pwm_chip { struct pwm_chip chip; struct clk *clk; void __iomem *base; }; #define to_mxs_pwm_chip(_chip) container_of(_chip, struct mxs_pwm_chip, chip) static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip); int ret, div = 0; unsigned int period_cycles, duty_cycles; unsigned long rate; unsigned long long c; rate = clk_get_rate(mxs->clk); while (1) { c = rate / (1 << div); c = c * period_ns; do_div(c, 1000000000); if (c < PERIOD_PERIOD_MAX) break; div++; if (div > PERIOD_CDIV_MAX) return -EINVAL; } period_cycles = c; c *= duty_ns; do_div(c, period_ns); duty_cycles = c; /* * If the PWM channel is disabled, make sure to turn on the clock * before writing the register. Otherwise, keep it enabled. */ if (!test_bit(PWMF_ENABLED, &pwm->flags)) { ret = clk_prepare_enable(mxs->clk); if (ret) return ret; } writel(duty_cycles << 16, mxs->base + PWM_ACTIVE0 + pwm->hwpwm * 0x20); writel(PERIOD_PERIOD(period_cycles) | PERIOD_ACTIVE_HIGH | PERIOD_INACTIVE_LOW | PERIOD_CDIV(div), mxs->base + PWM_PERIOD0 + pwm->hwpwm * 0x20); /* * If the PWM is not enabled, turn the clock off again to save power. */ if (!test_bit(PWMF_ENABLED, &pwm->flags)) clk_disable_unprepare(mxs->clk); return 0; } static int mxs_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip); int ret; ret = clk_prepare_enable(mxs->clk); if (ret) return ret; writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + SET); return 0; } static void mxs_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip); writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + CLR); clk_disable_unprepare(mxs->clk); } static const struct pwm_ops mxs_pwm_ops = { .config = mxs_pwm_config, .enable = mxs_pwm_enable, .disable = mxs_pwm_disable, .owner = THIS_MODULE, }; static int mxs_pwm_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mxs_pwm_chip *mxs; struct resource *res; struct pinctrl *pinctrl; int ret; mxs = devm_kzalloc(&pdev->dev, sizeof(*mxs), GFP_KERNEL); if (!mxs) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mxs->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(mxs->base)) return PTR_ERR(mxs->base); pinctrl = devm_pinctrl_get_select_default(&pdev->dev); if (IS_ERR(pinctrl)) return PTR_ERR(pinctrl); mxs->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(mxs->clk)) return PTR_ERR(mxs->clk); mxs->chip.dev = &pdev->dev; mxs->chip.ops = &mxs_pwm_ops; mxs->chip.base = -1; ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm); if (ret < 0) { dev_err(&pdev->dev, "failed to get pwm number: %d\n", ret); return ret; } ret = pwmchip_add(&mxs->chip); if (ret < 0) { dev_err(&pdev->dev, "failed to add pwm chip %d\n", ret); return ret; } platform_set_drvdata(pdev, mxs); stmp_reset_block(mxs->base); return 0; } static int mxs_pwm_remove(struct platform_device *pdev) { struct mxs_pwm_chip *mxs = platform_get_drvdata(pdev); return pwmchip_remove(&mxs->chip); } static const struct of_device_id mxs_pwm_dt_ids[] = { { .compatible = "fsl,imx23-pwm", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mxs_pwm_dt_ids); static struct platform_driver mxs_pwm_driver = { .driver = { .name = "mxs-pwm", .of_match_table = of_match_ptr(mxs_pwm_dt_ids), }, .probe = mxs_pwm_probe, .remove = mxs_pwm_remove, }; module_platform_driver(mxs_pwm_driver); MODULE_ALIAS("platform:mxs-pwm"); MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); MODULE_DESCRIPTION("Freescale MXS PWM Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
MoKee/android_kernel_lge_omap4-common
drivers/staging/iio/adc/ad7314.c
2341
6209
/* * AD7314 digital temperature sensor driver for AD7314, ADT7301 and ADT7302 * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/spi/spi.h> #include "../iio.h" #include "../sysfs.h" /* * AD7314 power mode */ #define AD7314_PD 0x2000 /* * AD7314 temperature masks */ #define AD7314_TEMP_SIGN 0x200 #define AD7314_TEMP_MASK 0x7FE0 #define AD7314_TEMP_OFFSET 5 #define AD7314_TEMP_FLOAT_OFFSET 2 #define AD7314_TEMP_FLOAT_MASK 0x3 /* * ADT7301 and ADT7302 temperature masks */ #define ADT7301_TEMP_SIGN 0x2000 #define ADT7301_TEMP_MASK 0x2FFF #define ADT7301_TEMP_FLOAT_OFFSET 5 #define ADT7301_TEMP_FLOAT_MASK 0x1F /* * struct ad7314_chip_info - chip specifc information */ struct ad7314_chip_info { struct spi_device *spi_dev; struct iio_dev *indio_dev; s64 last_timestamp; u8 mode; }; /* * ad7314 register access by SPI */ static int ad7314_spi_read(struct ad7314_chip_info *chip, u16 *data) { struct spi_device *spi_dev = chip->spi_dev; int ret = 0; u16 value; ret = spi_read(spi_dev, (u8 *)&value, sizeof(value)); if (ret < 0) { dev_err(&spi_dev->dev, "SPI read error\n"); return ret; } *data = be16_to_cpu((u16)value); return ret; } static int ad7314_spi_write(struct ad7314_chip_info *chip, u16 data) { struct spi_device *spi_dev = chip->spi_dev; int ret = 0; u16 value = cpu_to_be16(data); ret = spi_write(spi_dev, (u8 *)&value, sizeof(value)); if (ret < 0) dev_err(&spi_dev->dev, "SPI write error\n"); return ret; } static ssize_t ad7314_show_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct ad7314_chip_info *chip = dev_info->dev_data; if (chip->mode) return sprintf(buf, "power-save\n"); else return sprintf(buf, "full\n"); } static ssize_t ad7314_store_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct ad7314_chip_info *chip = dev_info->dev_data; u16 mode = 0; int ret; if (!strcmp(buf, "full")) mode = AD7314_PD; ret = ad7314_spi_write(chip, mode); if (ret) return -EIO; chip->mode = mode; return len; } static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, ad7314_show_mode, ad7314_store_mode, 0); static ssize_t ad7314_show_available_modes(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "full\npower-save\n"); } static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7314_show_available_modes, NULL, 0); static ssize_t ad7314_show_temperature(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct ad7314_chip_info *chip = dev_info->dev_data; u16 data; char sign = ' '; int ret; if (chip->mode) { ret = ad7314_spi_write(chip, 0); if (ret) return -EIO; } ret = ad7314_spi_read(chip, &data); if (ret) return -EIO; if (chip->mode) ad7314_spi_write(chip, chip->mode); if (strcmp(dev_info->name, "ad7314")) { data = (data & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET; if (data & AD7314_TEMP_SIGN) { data = (AD7314_TEMP_SIGN << 1) - data; sign = '-'; } return sprintf(buf, "%c%d.%.2d\n", sign, data >> AD7314_TEMP_FLOAT_OFFSET, (data & AD7314_TEMP_FLOAT_MASK) * 25); } else { data &= ADT7301_TEMP_MASK; if (data & ADT7301_TEMP_SIGN) { data = (ADT7301_TEMP_SIGN << 1) - data; sign = '-'; } return sprintf(buf, "%c%d.%.5d\n", sign, data >> ADT7301_TEMP_FLOAT_OFFSET, (data & ADT7301_TEMP_FLOAT_MASK) * 3125); } } static IIO_DEVICE_ATTR(temperature, S_IRUGO, ad7314_show_temperature, NULL, 0); static struct attribute *ad7314_attributes[] = { &iio_dev_attr_available_modes.dev_attr.attr, &iio_dev_attr_mode.dev_attr.attr, &iio_dev_attr_temperature.dev_attr.attr, NULL, }; static const struct attribute_group ad7314_attribute_group = { .attrs = ad7314_attributes, }; static const struct iio_info ad7314_info = { .attrs = &ad7314_attribute_group, .driver_module = THIS_MODULE, }; /* * device probe and remove */ static int __devinit ad7314_probe(struct spi_device *spi_dev) { struct ad7314_chip_info *chip; int ret = 0; chip = kzalloc(sizeof(struct ad7314_chip_info), GFP_KERNEL); if (chip == NULL) return -ENOMEM; /* this is only used for device removal purposes */ dev_set_drvdata(&spi_dev->dev, chip); chip->spi_dev = spi_dev; chip->indio_dev = iio_allocate_device(0); if (chip->indio_dev == NULL) { ret = -ENOMEM; goto error_free_chip; } chip->indio_dev->name = spi_get_device_id(spi_dev)->name; chip->indio_dev->dev.parent = &spi_dev->dev; chip->indio_dev->info = &ad7314_info; chip->indio_dev->dev_data = (void *)chip; ret = iio_device_register(chip->indio_dev); if (ret) goto error_free_dev; dev_info(&spi_dev->dev, "%s temperature sensor registered.\n", chip->indio_dev->name); return 0; error_free_dev: iio_free_device(chip->indio_dev); error_free_chip: kfree(chip); return ret; } static int __devexit ad7314_remove(struct spi_device *spi_dev) { struct ad7314_chip_info *chip = dev_get_drvdata(&spi_dev->dev); struct iio_dev *indio_dev = chip->indio_dev; dev_set_drvdata(&spi_dev->dev, NULL); iio_device_unregister(indio_dev); iio_free_device(chip->indio_dev); kfree(chip); return 0; } static const struct spi_device_id ad7314_id[] = { { "adt7301", 0 }, { "adt7302", 0 }, { "ad7314", 0 }, {} }; static struct spi_driver ad7314_driver = { .driver = { .name = "ad7314", .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = ad7314_probe, .remove = __devexit_p(ad7314_remove), .id_table = ad7314_id, }; static __init int ad7314_init(void) { return spi_register_driver(&ad7314_driver); } static __exit void ad7314_exit(void) { spi_unregister_driver(&ad7314_driver); } MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>"); MODULE_DESCRIPTION("Analog Devices AD7314, ADT7301 and ADT7302 digital" " temperature sensor driver"); MODULE_LICENSE("GPL v2"); module_init(ad7314_init); module_exit(ad7314_exit);
gpl-2.0
warped-rudi/linux-sunxi
arch/arm/mach-rpc/irq.c
2341
3503
#include <linux/init.h> #include <linux/list.h> #include <linux/io.h> #include <asm/mach/irq.h> #include <asm/hardware/iomd.h> #include <asm/irq.h> #include <asm/fiq.h> static void iomd_ack_irq_a(struct irq_data *d) { unsigned int val, mask; mask = 1 << d->irq; val = iomd_readb(IOMD_IRQMASKA); iomd_writeb(val & ~mask, IOMD_IRQMASKA); iomd_writeb(mask, IOMD_IRQCLRA); } static void iomd_mask_irq_a(struct irq_data *d) { unsigned int val, mask; mask = 1 << d->irq; val = iomd_readb(IOMD_IRQMASKA); iomd_writeb(val & ~mask, IOMD_IRQMASKA); } static void iomd_unmask_irq_a(struct irq_data *d) { unsigned int val, mask; mask = 1 << d->irq; val = iomd_readb(IOMD_IRQMASKA); iomd_writeb(val | mask, IOMD_IRQMASKA); } static struct irq_chip iomd_a_chip = { .irq_ack = iomd_ack_irq_a, .irq_mask = iomd_mask_irq_a, .irq_unmask = iomd_unmask_irq_a, }; static void iomd_mask_irq_b(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_IRQMASKB); iomd_writeb(val & ~mask, IOMD_IRQMASKB); } static void iomd_unmask_irq_b(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_IRQMASKB); iomd_writeb(val | mask, IOMD_IRQMASKB); } static struct irq_chip iomd_b_chip = { .irq_ack = iomd_mask_irq_b, .irq_mask = iomd_mask_irq_b, .irq_unmask = iomd_unmask_irq_b, }; static void iomd_mask_irq_dma(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_DMAMASK); iomd_writeb(val & ~mask, IOMD_DMAMASK); } static void iomd_unmask_irq_dma(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_DMAMASK); iomd_writeb(val | mask, IOMD_DMAMASK); } static struct irq_chip iomd_dma_chip = { .irq_ack = iomd_mask_irq_dma, .irq_mask = iomd_mask_irq_dma, .irq_unmask = iomd_unmask_irq_dma, }; static void iomd_mask_irq_fiq(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_FIQMASK); iomd_writeb(val & ~mask, IOMD_FIQMASK); } static void iomd_unmask_irq_fiq(struct irq_data *d) { unsigned int val, mask; mask = 1 << (d->irq & 7); val = iomd_readb(IOMD_FIQMASK); iomd_writeb(val | mask, IOMD_FIQMASK); } static struct irq_chip iomd_fiq_chip = { .irq_ack = iomd_mask_irq_fiq, .irq_mask = iomd_mask_irq_fiq, .irq_unmask = iomd_unmask_irq_fiq, }; extern unsigned char rpc_default_fiq_start, rpc_default_fiq_end; void __init rpc_init_irq(void) { unsigned int irq, flags; iomd_writeb(0, IOMD_IRQMASKA); iomd_writeb(0, IOMD_IRQMASKB); iomd_writeb(0, IOMD_FIQMASK); iomd_writeb(0, IOMD_DMAMASK); set_fiq_handler(&rpc_default_fiq_start, &rpc_default_fiq_end - &rpc_default_fiq_start); for (irq = 0; irq < NR_IRQS; irq++) { flags = IRQF_VALID; if (irq <= 6 || (irq >= 9 && irq <= 15)) flags |= IRQF_PROBE; if (irq == 21 || (irq >= 16 && irq <= 19) || irq == IRQ_KEYBOARDTX) flags |= IRQF_NOAUTOEN; switch (irq) { case 0 ... 7: irq_set_chip_and_handler(irq, &iomd_a_chip, handle_level_irq); set_irq_flags(irq, flags); break; case 8 ... 15: irq_set_chip_and_handler(irq, &iomd_b_chip, handle_level_irq); set_irq_flags(irq, flags); break; case 16 ... 21: irq_set_chip_and_handler(irq, &iomd_dma_chip, handle_level_irq); set_irq_flags(irq, flags); break; case 64 ... 71: irq_set_chip(irq, &iomd_fiq_chip); set_irq_flags(irq, IRQF_VALID); break; } } init_FIQ(); }
gpl-2.0
omkar062/linux-4.0.4
arch/arm/mach-ep93xx/micro9.c
4645
3566
/* * linux/arch/arm/mach-ep93xx/micro9.c * * Copyright (C) 2006 Contec Steuerungstechnik & Automation GmbH * Manfred Gruber <m.gruber@tirol.com> * Copyright (C) 2009 Contec Steuerungstechnik & Automation GmbH * Hubert Feurstein <hubert.feurstein@contec.at> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "soc.h" /************************************************************************* * Micro9 NOR Flash * * Micro9-High has up to 64MB of 32-bit flash on CS1 * Micro9-Mid has up to 64MB of either 32-bit or 16-bit flash on CS1 * Micro9-Lite uses a separate MTD map driver for flash support * Micro9-Slim has up to 64MB of either 32-bit or 16-bit flash on CS1 *************************************************************************/ static unsigned int __init micro9_detect_bootwidth(void) { u32 v; /* Detect the bus width of the external flash memory */ v = __raw_readl(EP93XX_SYSCON_SYSCFG); if (v & EP93XX_SYSCON_SYSCFG_LCSN7) return 4; /* 32-bit */ else return 2; /* 16-bit */ } static void __init micro9_register_flash(void) { unsigned int width; if (machine_is_micro9()) width = 4; else if (machine_is_micro9m() || machine_is_micro9s()) width = micro9_detect_bootwidth(); else width = 0; if (width) ep93xx_register_flash(width, EP93XX_CS1_PHYS_BASE, SZ_64M); } /************************************************************************* * Micro9 Ethernet *************************************************************************/ static struct ep93xx_eth_data __initdata micro9_eth_data = { .phy_id = 0x1f, }; static void __init micro9_init_machine(void) { ep93xx_init_devices(); ep93xx_register_eth(&micro9_eth_data, 1); micro9_register_flash(); } #ifdef CONFIG_MACH_MICRO9H MACHINE_START(MICRO9, "Contec Micro9-High") /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = micro9_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_MICRO9M MACHINE_START(MICRO9M, "Contec Micro9-Mid") /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = micro9_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_MICRO9L MACHINE_START(MICRO9L, "Contec Micro9-Lite") /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = micro9_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif #ifdef CONFIG_MACH_MICRO9S MACHINE_START(MICRO9S, "Contec Micro9-Slim") /* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = micro9_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END #endif
gpl-2.0
GeeteshKhatavkar/gh0st_kernel_samsung_royxx
drivers/scsi/device_handler/scsi_dh_rdac.c
4901
23296
/* * LSI/Engenio/NetApp E-Series RDAC SCSI Device Handler * * Copyright (C) 2005 Mike Christie. All rights reserved. * Copyright (C) Chandra Seetharaman, IBM Corp. 2007 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <scsi/scsi.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dh.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/module.h> #define RDAC_NAME "rdac" #define RDAC_RETRY_COUNT 5 /* * LSI mode page stuff * * These struct definitions and the forming of the * mode page were taken from the LSI RDAC 2.4 GPL'd * driver, and then converted to Linux conventions. */ #define RDAC_QUIESCENCE_TIME 20 /* * Page Codes */ #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c /* * Controller modes definitions */ #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02 /* * RDAC Options field */ #define RDAC_FORCED_QUIESENCE 0x02 #define RDAC_TIMEOUT (60 * HZ) #define RDAC_RETRIES 3 struct rdac_mode_6_hdr { u8 data_len; u8 medium_type; u8 device_params; u8 block_desc_len; }; struct rdac_mode_10_hdr { u16 data_len; u8 medium_type; u8 device_params; u16 reserved; u16 block_desc_len; }; struct rdac_mode_common { u8 controller_serial[16]; u8 alt_controller_serial[16]; u8 rdac_mode[2]; u8 alt_rdac_mode[2]; u8 quiescence_timeout; u8 rdac_options; }; struct rdac_pg_legacy { struct rdac_mode_6_hdr hdr; u8 page_code; u8 page_len; struct rdac_mode_common common; #define MODE6_MAX_LUN 32 u8 lun_table[MODE6_MAX_LUN]; u8 reserved2[32]; u8 reserved3; u8 reserved4; }; struct rdac_pg_expanded { struct rdac_mode_10_hdr hdr; u8 page_code; u8 subpage_code; u8 page_len[2]; struct rdac_mode_common common; u8 lun_table[256]; u8 reserved3; u8 reserved4; }; struct c9_inquiry { u8 peripheral_info; u8 page_code; /* 0xC9 */ u8 reserved1; u8 page_len; u8 page_id[4]; /* "vace" */ u8 avte_cvp; u8 path_prio; u8 reserved2[38]; }; #define SUBSYS_ID_LEN 16 #define SLOT_ID_LEN 2 #define ARRAY_LABEL_LEN 31 struct c4_inquiry { u8 peripheral_info; u8 page_code; /* 0xC4 */ u8 reserved1; u8 page_len; u8 page_id[4]; /* "subs" */ u8 subsys_id[SUBSYS_ID_LEN]; u8 revision[4]; u8 slot_id[SLOT_ID_LEN]; u8 reserved[2]; }; #define UNIQUE_ID_LEN 16 struct c8_inquiry { u8 peripheral_info; u8 page_code; /* 0xC8 */ u8 reserved1; u8 page_len; u8 page_id[4]; /* "edid" */ u8 reserved2[3]; u8 vol_uniq_id_len; u8 vol_uniq_id[16]; u8 vol_user_label_len; u8 vol_user_label[60]; u8 array_uniq_id_len; u8 array_unique_id[UNIQUE_ID_LEN]; u8 array_user_label_len; u8 array_user_label[60]; u8 lun[8]; }; struct rdac_controller { u8 array_id[UNIQUE_ID_LEN]; int use_ms10; struct kref kref; struct list_head node; /* list of all controllers */ union { struct rdac_pg_legacy legacy; struct rdac_pg_expanded expanded; } mode_select; u8 index; u8 array_name[ARRAY_LABEL_LEN]; struct Scsi_Host *host; spinlock_t ms_lock; int ms_queued; struct work_struct ms_work; struct scsi_device *ms_sdev; struct list_head ms_head; }; struct c2_inquiry { u8 peripheral_info; u8 page_code; /* 0xC2 */ u8 reserved1; u8 page_len; u8 page_id[4]; /* "swr4" */ u8 sw_version[3]; u8 sw_date[3]; u8 features_enabled; u8 max_lun_supported; u8 partitions[239]; /* Total allocation length should be 0xFF */ }; struct rdac_dh_data { struct rdac_controller *ctlr; #define UNINITIALIZED_LUN (1 << 8) unsigned lun; #define RDAC_MODE 0 #define RDAC_MODE_AVT 1 #define RDAC_MODE_IOSHIP 2 unsigned char mode; #define RDAC_STATE_ACTIVE 0 #define RDAC_STATE_PASSIVE 1 unsigned char state; #define RDAC_LUN_UNOWNED 0 #define RDAC_LUN_OWNED 1 char lun_state; #define RDAC_PREFERRED 0 #define RDAC_NON_PREFERRED 1 char preferred; unsigned char sense[SCSI_SENSE_BUFFERSIZE]; union { struct c2_inquiry c2; struct c4_inquiry c4; struct c8_inquiry c8; struct c9_inquiry c9; } inq; }; static const char *mode[] = { "RDAC", "AVT", "IOSHIP", }; static const char *lun_state[] = { "unowned", "owned", }; struct rdac_queue_data { struct list_head entry; struct rdac_dh_data *h; activate_complete callback_fn; void *callback_data; }; static LIST_HEAD(ctlr_list); static DEFINE_SPINLOCK(list_lock); static struct workqueue_struct *kmpath_rdacd; static void send_mode_select(struct work_struct *work); /* * module parameter to enable rdac debug logging. * 2 bits for each type of logging, only two types defined for now * Can be enhanced if required at later point */ static int rdac_logging = 1; module_param(rdac_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, " "Default is 1 - failover logging enabled, " "set it to 0xF to enable all the logs"); #define RDAC_LOG_FAILOVER 0 #define RDAC_LOG_SENSE 2 #define RDAC_LOG_BITS 2 #define RDAC_LOG_LEVEL(SHIFT) \ ((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1)) #define RDAC_LOG(SHIFT, sdev, f, arg...) \ do { \ if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \ sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \ } while (0); static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; BUG_ON(scsi_dh_data == NULL); return ((struct rdac_dh_data *) scsi_dh_data->buf); } static struct request *get_rdac_req(struct scsi_device *sdev, void *buffer, unsigned buflen, int rw) { struct request *rq; struct request_queue *q = sdev->request_queue; rq = blk_get_request(q, rw, GFP_NOIO); if (!rq) { sdev_printk(KERN_INFO, sdev, "get_rdac_req: blk_get_request failed.\n"); return NULL; } if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { blk_put_request(rq); sdev_printk(KERN_INFO, sdev, "get_rdac_req: blk_rq_map_kern failed.\n"); return NULL; } rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; rq->retries = RDAC_RETRIES; rq->timeout = RDAC_TIMEOUT; return rq; } static struct request *rdac_failover_get(struct scsi_device *sdev, struct rdac_dh_data *h, struct list_head *list) { struct request *rq; struct rdac_mode_common *common; unsigned data_size; struct rdac_queue_data *qdata; u8 *lun_table; if (h->ctlr->use_ms10) { struct rdac_pg_expanded *rdac_pg; data_size = sizeof(struct rdac_pg_expanded); rdac_pg = &h->ctlr->mode_select.expanded; memset(rdac_pg, 0, data_size); common = &rdac_pg->common; rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; rdac_pg->subpage_code = 0x1; rdac_pg->page_len[0] = 0x01; rdac_pg->page_len[1] = 0x28; lun_table = rdac_pg->lun_table; } else { struct rdac_pg_legacy *rdac_pg; data_size = sizeof(struct rdac_pg_legacy); rdac_pg = &h->ctlr->mode_select.legacy; memset(rdac_pg, 0, data_size); common = &rdac_pg->common; rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; rdac_pg->page_len = 0x68; lun_table = rdac_pg->lun_table; } common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; common->quiescence_timeout = RDAC_QUIESCENCE_TIME; common->rdac_options = RDAC_FORCED_QUIESENCE; list_for_each_entry(qdata, list, entry) { lun_table[qdata->h->lun] = 0x81; } /* get request for block layer packet command */ rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE); if (!rq) return NULL; /* Prepare the command. */ if (h->ctlr->use_ms10) { rq->cmd[0] = MODE_SELECT_10; rq->cmd[7] = data_size >> 8; rq->cmd[8] = data_size & 0xff; } else { rq->cmd[0] = MODE_SELECT; rq->cmd[4] = data_size; } rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); rq->sense = h->sense; memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); rq->sense_len = 0; return rq; } static void release_controller(struct kref *kref) { struct rdac_controller *ctlr; ctlr = container_of(kref, struct rdac_controller, kref); list_del(&ctlr->node); kfree(ctlr); } static struct rdac_controller *get_controller(int index, char *array_name, u8 *array_id, struct scsi_device *sdev) { struct rdac_controller *ctlr, *tmp; list_for_each_entry(tmp, &ctlr_list, node) { if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) && (tmp->index == index) && (tmp->host == sdev->host)) { kref_get(&tmp->kref); return tmp; } } ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC); if (!ctlr) return NULL; /* initialize fields of controller */ memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN); ctlr->index = index; ctlr->host = sdev->host; memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN); kref_init(&ctlr->kref); ctlr->use_ms10 = -1; ctlr->ms_queued = 0; ctlr->ms_sdev = NULL; spin_lock_init(&ctlr->ms_lock); INIT_WORK(&ctlr->ms_work, send_mode_select); INIT_LIST_HEAD(&ctlr->ms_head); list_add(&ctlr->node, &ctlr_list); return ctlr; } static int submit_inquiry(struct scsi_device *sdev, int page_code, unsigned int len, struct rdac_dh_data *h) { struct request *rq; struct request_queue *q = sdev->request_queue; int err = SCSI_DH_RES_TEMP_UNAVAIL; rq = get_rdac_req(sdev, &h->inq, len, READ); if (!rq) goto done; /* Prepare the command. */ rq->cmd[0] = INQUIRY; rq->cmd[1] = 1; rq->cmd[2] = page_code; rq->cmd[4] = len; rq->cmd_len = COMMAND_SIZE(INQUIRY); rq->sense = h->sense; memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); rq->sense_len = 0; err = blk_execute_rq(q, NULL, rq, 1); if (err == -EIO) err = SCSI_DH_IO; blk_put_request(rq); done: return err; } static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h, char *array_name, u8 *array_id) { int err, i; struct c8_inquiry *inqp; err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h); if (err == SCSI_DH_OK) { inqp = &h->inq.c8; if (inqp->page_code != 0xc8) return SCSI_DH_NOSYS; if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' || inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd') return SCSI_DH_NOSYS; h->lun = inqp->lun[7]; /* Uses only the last byte */ for(i=0; i<ARRAY_LABEL_LEN-1; ++i) *(array_name+i) = inqp->array_user_label[(2*i)+1]; *(array_name+ARRAY_LABEL_LEN-1) = '\0'; memset(array_id, 0, UNIQUE_ID_LEN); memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len); } return err; } static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h) { int err; struct c9_inquiry *inqp; h->state = RDAC_STATE_ACTIVE; err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h); if (err == SCSI_DH_OK) { inqp = &h->inq.c9; /* detect the operating mode */ if ((inqp->avte_cvp >> 5) & 0x1) h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */ else if (inqp->avte_cvp >> 7) h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */ else h->mode = RDAC_MODE; /* LUN in RDAC mode */ /* Update ownership */ if (inqp->avte_cvp & 0x1) h->lun_state = RDAC_LUN_OWNED; else { h->lun_state = RDAC_LUN_UNOWNED; if (h->mode == RDAC_MODE) h->state = RDAC_STATE_PASSIVE; } /* Update path prio*/ if (inqp->path_prio & 0x1) h->preferred = RDAC_PREFERRED; else h->preferred = RDAC_NON_PREFERRED; } return err; } static int initialize_controller(struct scsi_device *sdev, struct rdac_dh_data *h, char *array_name, u8 *array_id) { int err, index; struct c4_inquiry *inqp; err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h); if (err == SCSI_DH_OK) { inqp = &h->inq.c4; /* get the controller index */ if (inqp->slot_id[1] == 0x31) index = 0; else index = 1; spin_lock(&list_lock); h->ctlr = get_controller(index, array_name, array_id, sdev); if (!h->ctlr) err = SCSI_DH_RES_TEMP_UNAVAIL; spin_unlock(&list_lock); } return err; } static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h) { int err; struct c2_inquiry *inqp; err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h); if (err == SCSI_DH_OK) { inqp = &h->inq.c2; /* * If more than MODE6_MAX_LUN luns are supported, use * mode select 10 */ if (inqp->max_lun_supported >= MODE6_MAX_LUN) h->ctlr->use_ms10 = 1; else h->ctlr->use_ms10 = 0; } return err; } static int mode_select_handle_sense(struct scsi_device *sdev, unsigned char *sensebuf) { struct scsi_sense_hdr sense_hdr; int err = SCSI_DH_IO, ret; struct rdac_dh_data *h = get_rdac_data(sdev); ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr); if (!ret) goto done; switch (sense_hdr.sense_key) { case NO_SENSE: case ABORTED_COMMAND: case UNIT_ATTENTION: err = SCSI_DH_RETRY; break; case NOT_READY: if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01) /* LUN Not Ready and is in the Process of Becoming * Ready */ err = SCSI_DH_RETRY; break; case ILLEGAL_REQUEST: if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36) /* * Command Lock contention */ err = SCSI_DH_RETRY; break; default: break; } RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " "MODE_SELECT returned with sense %02x/%02x/%02x", (char *) h->ctlr->array_name, h->ctlr->index, sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq); done: return err; } static void send_mode_select(struct work_struct *work) { struct rdac_controller *ctlr = container_of(work, struct rdac_controller, ms_work); struct request *rq; struct scsi_device *sdev = ctlr->ms_sdev; struct rdac_dh_data *h = get_rdac_data(sdev); struct request_queue *q = sdev->request_queue; int err, retry_cnt = RDAC_RETRY_COUNT; struct rdac_queue_data *tmp, *qdata; LIST_HEAD(list); spin_lock(&ctlr->ms_lock); list_splice_init(&ctlr->ms_head, &list); ctlr->ms_queued = 0; ctlr->ms_sdev = NULL; spin_unlock(&ctlr->ms_lock); retry: err = SCSI_DH_RES_TEMP_UNAVAIL; rq = rdac_failover_get(sdev, h, &list); if (!rq) goto done; RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " "%s MODE_SELECT command", (char *) h->ctlr->array_name, h->ctlr->index, (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying"); err = blk_execute_rq(q, NULL, rq, 1); blk_put_request(rq); if (err != SCSI_DH_OK) { err = mode_select_handle_sense(sdev, h->sense); if (err == SCSI_DH_RETRY && retry_cnt--) goto retry; } if (err == SCSI_DH_OK) { h->state = RDAC_STATE_ACTIVE; RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " "MODE_SELECT completed", (char *) h->ctlr->array_name, h->ctlr->index); } done: list_for_each_entry_safe(qdata, tmp, &list, entry) { list_del(&qdata->entry); if (err == SCSI_DH_OK) qdata->h->state = RDAC_STATE_ACTIVE; if (qdata->callback_fn) qdata->callback_fn(qdata->callback_data, err); kfree(qdata); } return; } static int queue_mode_select(struct scsi_device *sdev, activate_complete fn, void *data) { struct rdac_queue_data *qdata; struct rdac_controller *ctlr; qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); if (!qdata) return SCSI_DH_RETRY; qdata->h = get_rdac_data(sdev); qdata->callback_fn = fn; qdata->callback_data = data; ctlr = qdata->h->ctlr; spin_lock(&ctlr->ms_lock); list_add_tail(&qdata->entry, &ctlr->ms_head); if (!ctlr->ms_queued) { ctlr->ms_queued = 1; ctlr->ms_sdev = sdev; queue_work(kmpath_rdacd, &ctlr->ms_work); } spin_unlock(&ctlr->ms_lock); return SCSI_DH_OK; } static int rdac_activate(struct scsi_device *sdev, activate_complete fn, void *data) { struct rdac_dh_data *h = get_rdac_data(sdev); int err = SCSI_DH_OK; int act = 0; err = check_ownership(sdev, h); if (err != SCSI_DH_OK) goto done; switch (h->mode) { case RDAC_MODE: if (h->lun_state == RDAC_LUN_UNOWNED) act = 1; break; case RDAC_MODE_IOSHIP: if ((h->lun_state == RDAC_LUN_UNOWNED) && (h->preferred == RDAC_PREFERRED)) act = 1; break; default: break; } if (act) { err = queue_mode_select(sdev, fn, data); if (err == SCSI_DH_OK) return 0; } done: if (fn) fn(data, err); return 0; } static int rdac_prep_fn(struct scsi_device *sdev, struct request *req) { struct rdac_dh_data *h = get_rdac_data(sdev); int ret = BLKPREP_OK; if (h->state != RDAC_STATE_ACTIVE) { ret = BLKPREP_KILL; req->cmd_flags |= REQ_QUIET; } return ret; } static int rdac_check_sense(struct scsi_device *sdev, struct scsi_sense_hdr *sense_hdr) { struct rdac_dh_data *h = get_rdac_data(sdev); RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, " "I/O returned with sense %02x/%02x/%02x", (char *) h->ctlr->array_name, h->ctlr->index, sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq); switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) /* LUN Not Ready - Logical Unit Not Ready and is in * the process of becoming ready * Just retry. */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) /* LUN Not Ready - Storage firmware incompatible * Manual code synchonisation required. * * Nothing we can do here. Try to bypass the path. */ return SUCCESS; if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1) /* LUN Not Ready - Quiescense in progress * * Just retry and wait. */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0xA1 && sense_hdr->ascq == 0x02) /* LUN Not Ready - Quiescense in progress * or has been achieved * Just retry. */ return ADD_TO_MLQUEUE; break; case ILLEGAL_REQUEST: if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) { /* Invalid Request - Current Logical Unit Ownership. * Controller is not the current owner of the LUN, * Fail the path, so that the other path be used. */ h->state = RDAC_STATE_PASSIVE; return SUCCESS; } break; case UNIT_ATTENTION: if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) /* * Power On, Reset, or Bus Device Reset, just retry. */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02) /* * Quiescence in progress , just retry. */ return ADD_TO_MLQUEUE; break; } /* success just means we do not care what scsi-ml does */ return SCSI_RETURN_NOT_HANDLED; } static const struct scsi_dh_devlist rdac_dev_list[] = { {"IBM", "1722"}, {"IBM", "1724"}, {"IBM", "1726"}, {"IBM", "1742"}, {"IBM", "1745"}, {"IBM", "1746"}, {"IBM", "1814"}, {"IBM", "1815"}, {"IBM", "1818"}, {"IBM", "3526"}, {"SGI", "TP9400"}, {"SGI", "TP9500"}, {"SGI", "TP9700"}, {"SGI", "IS"}, {"STK", "OPENstorage D280"}, {"SUN", "CSM200_R"}, {"SUN", "LCSM100_I"}, {"SUN", "LCSM100_S"}, {"SUN", "LCSM100_E"}, {"SUN", "LCSM100_F"}, {"DELL", "MD3000"}, {"DELL", "MD3000i"}, {"DELL", "MD32xx"}, {"DELL", "MD32xxi"}, {"DELL", "MD36xxi"}, {"DELL", "MD36xxf"}, {"LSI", "INF-01-00"}, {"ENGENIO", "INF-01-00"}, {"STK", "FLEXLINE 380"}, {"SUN", "CSM100_R_FC"}, {"SUN", "STK6580_6780"}, {"SUN", "SUN_6180"}, {"SUN", "ArrayStorage"}, {NULL, NULL}, }; static bool rdac_match(struct scsi_device *sdev) { int i; if (scsi_device_tpgs(sdev)) return false; for (i = 0; rdac_dev_list[i].vendor; i++) { if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor, strlen(rdac_dev_list[i].vendor)) && !strncmp(sdev->model, rdac_dev_list[i].model, strlen(rdac_dev_list[i].model))) { return true; } } return false; } static int rdac_bus_attach(struct scsi_device *sdev); static void rdac_bus_detach(struct scsi_device *sdev); static struct scsi_device_handler rdac_dh = { .name = RDAC_NAME, .module = THIS_MODULE, .devlist = rdac_dev_list, .prep_fn = rdac_prep_fn, .check_sense = rdac_check_sense, .attach = rdac_bus_attach, .detach = rdac_bus_detach, .activate = rdac_activate, .match = rdac_match, }; static int rdac_bus_attach(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data; struct rdac_dh_data *h; unsigned long flags; int err; char array_name[ARRAY_LABEL_LEN]; char array_id[UNIQUE_ID_LEN]; scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", RDAC_NAME); return 0; } scsi_dh_data->scsi_dh = &rdac_dh; h = (struct rdac_dh_data *) scsi_dh_data->buf; h->lun = UNINITIALIZED_LUN; h->state = RDAC_STATE_ACTIVE; err = get_lun_info(sdev, h, array_name, array_id); if (err != SCSI_DH_OK) goto failed; err = initialize_controller(sdev, h, array_name, array_id); if (err != SCSI_DH_OK) goto failed; err = check_ownership(sdev, h); if (err != SCSI_DH_OK) goto clean_ctlr; err = set_mode_select(sdev, h); if (err != SCSI_DH_OK) goto clean_ctlr; if (!try_module_get(THIS_MODULE)) goto clean_ctlr; spin_lock_irqsave(sdev->request_queue->queue_lock, flags); sdev->scsi_dh_data = scsi_dh_data; spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); sdev_printk(KERN_NOTICE, sdev, "%s: LUN %d (%s) (%s)\n", RDAC_NAME, h->lun, mode[(int)h->mode], lun_state[(int)h->lun_state]); return 0; clean_ctlr: spin_lock(&list_lock); kref_put(&h->ctlr->kref, release_controller); spin_unlock(&list_lock); failed: kfree(scsi_dh_data); sdev_printk(KERN_ERR, sdev, "%s: not attached\n", RDAC_NAME); return -EINVAL; } static void rdac_bus_detach( struct scsi_device *sdev ) { struct scsi_dh_data *scsi_dh_data; struct rdac_dh_data *h; unsigned long flags; scsi_dh_data = sdev->scsi_dh_data; h = (struct rdac_dh_data *) scsi_dh_data->buf; if (h->ctlr && h->ctlr->ms_queued) flush_workqueue(kmpath_rdacd); spin_lock_irqsave(sdev->request_queue->queue_lock, flags); sdev->scsi_dh_data = NULL; spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); spin_lock(&list_lock); if (h->ctlr) kref_put(&h->ctlr->kref, release_controller); spin_unlock(&list_lock); kfree(scsi_dh_data); module_put(THIS_MODULE); sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME); } static int __init rdac_init(void) { int r; r = scsi_register_device_handler(&rdac_dh); if (r != 0) { printk(KERN_ERR "Failed to register scsi device handler."); goto done; } /* * Create workqueue to handle mode selects for rdac */ kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd"); if (!kmpath_rdacd) { scsi_unregister_device_handler(&rdac_dh); printk(KERN_ERR "kmpath_rdacd creation failed.\n"); r = -EINVAL; } done: return r; } static void __exit rdac_exit(void) { destroy_workqueue(kmpath_rdacd); scsi_unregister_device_handler(&rdac_dh); } module_init(rdac_init); module_exit(rdac_exit); MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver"); MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); MODULE_VERSION("01.00.0000.0000"); MODULE_LICENSE("GPL");
gpl-2.0
garwynn/SC02E_LJF_Kernel
fs/ubifs/find.c
5413
30508
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter */ /* * This file contains functions for finding LEBs for various purposes e.g. * garbage collection. In general, lprops category heaps and lists are used * for fast access, falling back on scanning the LPT as a last resort. */ #include <linux/sort.h> #include "ubifs.h" /** * struct scan_data - data provided to scan callback functions * @min_space: minimum number of bytes for which to scan * @pick_free: whether it is OK to scan for empty LEBs * @lnum: LEB number found is returned here * @exclude_index: whether to exclude index LEBs */ struct scan_data { int min_space; int pick_free; int lnum; int exclude_index; }; /** * valuable - determine whether LEB properties are valuable. * @c: the UBIFS file-system description object * @lprops: LEB properties * * This function return %1 if the LEB properties should be added to the LEB * properties tree in memory. Otherwise %0 is returned. */ static int valuable(struct ubifs_info *c, const struct ubifs_lprops *lprops) { int n, cat = lprops->flags & LPROPS_CAT_MASK; struct ubifs_lpt_heap *heap; switch (cat) { case LPROPS_DIRTY: case LPROPS_DIRTY_IDX: case LPROPS_FREE: heap = &c->lpt_heap[cat - 1]; if (heap->cnt < heap->max_cnt) return 1; if (lprops->free + lprops->dirty >= c->dark_wm) return 1; return 0; case LPROPS_EMPTY: n = c->lst.empty_lebs + c->freeable_cnt - c->lst.taken_empty_lebs; if (n < c->lsave_cnt) return 1; return 0; case LPROPS_FREEABLE: return 1; case LPROPS_FRDI_IDX: return 1; } return 0; } /** * scan_for_dirty_cb - dirty space scan callback. * @c: the UBIFS file-system description object * @lprops: LEB properties to scan * @in_tree: whether the LEB properties are in main memory * @data: information passed to and from the caller of the scan * * This function returns a code that indicates whether the scan should continue * (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree * in main memory (%LPT_SCAN_ADD), or whether the scan should stop * (%LPT_SCAN_STOP). */ static int scan_for_dirty_cb(struct ubifs_info *c, const struct ubifs_lprops *lprops, int in_tree, struct scan_data *data) { int ret = LPT_SCAN_CONTINUE; /* Exclude LEBs that are currently in use */ if (lprops->flags & LPROPS_TAKEN) return LPT_SCAN_CONTINUE; /* Determine whether to add these LEB properties to the tree */ if (!in_tree && valuable(c, lprops)) ret |= LPT_SCAN_ADD; /* Exclude LEBs with too little space */ if (lprops->free + lprops->dirty < data->min_space) return ret; /* If specified, exclude index LEBs */ if (data->exclude_index && lprops->flags & LPROPS_INDEX) return ret; /* If specified, exclude empty or freeable LEBs */ if (lprops->free + lprops->dirty == c->leb_size) { if (!data->pick_free) return ret; /* Exclude LEBs with too little dirty space (unless it is empty) */ } else if (lprops->dirty < c->dead_wm) return ret; /* Finally we found space */ data->lnum = lprops->lnum; return LPT_SCAN_ADD | LPT_SCAN_STOP; } /** * scan_for_dirty - find a data LEB with free space. * @c: the UBIFS file-system description object * @min_space: minimum amount free plus dirty space the returned LEB has to * have * @pick_free: if it is OK to return a free or freeable LEB * @exclude_index: whether to exclude index LEBs * * This function returns a pointer to the LEB properties found or a negative * error code. */ static const struct ubifs_lprops *scan_for_dirty(struct ubifs_info *c, int min_space, int pick_free, int exclude_index) { const struct ubifs_lprops *lprops; struct ubifs_lpt_heap *heap; struct scan_data data; int err, i; /* There may be an LEB with enough dirty space on the free heap */ heap = &c->lpt_heap[LPROPS_FREE - 1]; for (i = 0; i < heap->cnt; i++) { lprops = heap->arr[i]; if (lprops->free + lprops->dirty < min_space) continue; if (lprops->dirty < c->dead_wm) continue; return lprops; } /* * A LEB may have fallen off of the bottom of the dirty heap, and ended * up as uncategorized even though it has enough dirty space for us now, * so check the uncategorized list. N.B. neither empty nor freeable LEBs * can end up as uncategorized because they are kept on lists not * finite-sized heaps. */ list_for_each_entry(lprops, &c->uncat_list, list) { if (lprops->flags & LPROPS_TAKEN) continue; if (lprops->free + lprops->dirty < min_space) continue; if (exclude_index && (lprops->flags & LPROPS_INDEX)) continue; if (lprops->dirty < c->dead_wm) continue; return lprops; } /* We have looked everywhere in main memory, now scan the flash */ if (c->pnodes_have >= c->pnode_cnt) /* All pnodes are in memory, so skip scan */ return ERR_PTR(-ENOSPC); data.min_space = min_space; data.pick_free = pick_free; data.lnum = -1; data.exclude_index = exclude_index; err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, (ubifs_lpt_scan_callback)scan_for_dirty_cb, &data); if (err) return ERR_PTR(err); ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt); c->lscan_lnum = data.lnum; lprops = ubifs_lpt_lookup_dirty(c, data.lnum); if (IS_ERR(lprops)) return lprops; ubifs_assert(lprops->lnum == data.lnum); ubifs_assert(lprops->free + lprops->dirty >= min_space); ubifs_assert(lprops->dirty >= c->dead_wm || (pick_free && lprops->free + lprops->dirty == c->leb_size)); ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); ubifs_assert(!exclude_index || !(lprops->flags & LPROPS_INDEX)); return lprops; } /** * ubifs_find_dirty_leb - find a dirty LEB for the Garbage Collector. * @c: the UBIFS file-system description object * @ret_lp: LEB properties are returned here on exit * @min_space: minimum amount free plus dirty space the returned LEB has to * have * @pick_free: controls whether it is OK to pick empty or index LEBs * * This function tries to find a dirty logical eraseblock which has at least * @min_space free and dirty space. It prefers to take an LEB from the dirty or * dirty index heap, and it falls-back to LPT scanning if the heaps are empty * or do not have an LEB which satisfies the @min_space criteria. * * Note, LEBs which have less than dead watermark of free + dirty space are * never picked by this function. * * The additional @pick_free argument controls if this function has to return a * free or freeable LEB if one is present. For example, GC must to set it to %1, * when called from the journal space reservation function, because the * appearance of free space may coincide with the loss of enough dirty space * for GC to succeed anyway. * * In contrast, if the Garbage Collector is called from budgeting, it should * just make free space, not return LEBs which are already free or freeable. * * In addition @pick_free is set to %2 by the recovery process in order to * recover gc_lnum in which case an index LEB must not be returned. * * This function returns zero and the LEB properties of found dirty LEB in case * of success, %-ENOSPC if no dirty LEB was found and a negative error code in * case of other failures. The returned LEB is marked as "taken". */ int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp, int min_space, int pick_free) { int err = 0, sum, exclude_index = pick_free == 2 ? 1 : 0; const struct ubifs_lprops *lp = NULL, *idx_lp = NULL; struct ubifs_lpt_heap *heap, *idx_heap; ubifs_get_lprops(c); if (pick_free) { int lebs, rsvd_idx_lebs = 0; spin_lock(&c->space_lock); lebs = c->lst.empty_lebs + c->idx_gc_cnt; lebs += c->freeable_cnt - c->lst.taken_empty_lebs; /* * Note, the index may consume more LEBs than have been reserved * for it. It is OK because it might be consolidated by GC. * But if the index takes fewer LEBs than it is reserved for it, * this function must avoid picking those reserved LEBs. */ if (c->bi.min_idx_lebs >= c->lst.idx_lebs) { rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs; exclude_index = 1; } spin_unlock(&c->space_lock); /* Check if there are enough free LEBs for the index */ if (rsvd_idx_lebs < lebs) { /* OK, try to find an empty LEB */ lp = ubifs_fast_find_empty(c); if (lp) goto found; /* Or a freeable LEB */ lp = ubifs_fast_find_freeable(c); if (lp) goto found; } else /* * We cannot pick free/freeable LEBs in the below code. */ pick_free = 0; } else { spin_lock(&c->space_lock); exclude_index = (c->bi.min_idx_lebs >= c->lst.idx_lebs); spin_unlock(&c->space_lock); } /* Look on the dirty and dirty index heaps */ heap = &c->lpt_heap[LPROPS_DIRTY - 1]; idx_heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1]; if (idx_heap->cnt && !exclude_index) { idx_lp = idx_heap->arr[0]; sum = idx_lp->free + idx_lp->dirty; /* * Since we reserve thrice as much space for the index than it * actually takes, it does not make sense to pick indexing LEBs * with less than, say, half LEB of dirty space. May be half is * not the optimal boundary - this should be tested and * checked. This boundary should determine how much we use * in-the-gaps to consolidate the index comparing to how much * we use garbage collector to consolidate it. The "half" * criteria just feels to be fine. */ if (sum < min_space || sum < c->half_leb_size) idx_lp = NULL; } if (heap->cnt) { lp = heap->arr[0]; if (lp->dirty + lp->free < min_space) lp = NULL; } /* Pick the LEB with most space */ if (idx_lp && lp) { if (idx_lp->free + idx_lp->dirty >= lp->free + lp->dirty) lp = idx_lp; } else if (idx_lp && !lp) lp = idx_lp; if (lp) { ubifs_assert(lp->free + lp->dirty >= c->dead_wm); goto found; } /* Did not find a dirty LEB on the dirty heaps, have to scan */ dbg_find("scanning LPT for a dirty LEB"); lp = scan_for_dirty(c, min_space, pick_free, exclude_index); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } ubifs_assert(lp->dirty >= c->dead_wm || (pick_free && lp->free + lp->dirty == c->leb_size)); found: dbg_find("found LEB %d, free %d, dirty %d, flags %#x", lp->lnum, lp->free, lp->dirty, lp->flags); lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, lp->flags | LPROPS_TAKEN, 0); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out; } memcpy(ret_lp, lp, sizeof(struct ubifs_lprops)); out: ubifs_release_lprops(c); return err; } /** * scan_for_free_cb - free space scan callback. * @c: the UBIFS file-system description object * @lprops: LEB properties to scan * @in_tree: whether the LEB properties are in main memory * @data: information passed to and from the caller of the scan * * This function returns a code that indicates whether the scan should continue * (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree * in main memory (%LPT_SCAN_ADD), or whether the scan should stop * (%LPT_SCAN_STOP). */ static int scan_for_free_cb(struct ubifs_info *c, const struct ubifs_lprops *lprops, int in_tree, struct scan_data *data) { int ret = LPT_SCAN_CONTINUE; /* Exclude LEBs that are currently in use */ if (lprops->flags & LPROPS_TAKEN) return LPT_SCAN_CONTINUE; /* Determine whether to add these LEB properties to the tree */ if (!in_tree && valuable(c, lprops)) ret |= LPT_SCAN_ADD; /* Exclude index LEBs */ if (lprops->flags & LPROPS_INDEX) return ret; /* Exclude LEBs with too little space */ if (lprops->free < data->min_space) return ret; /* If specified, exclude empty LEBs */ if (!data->pick_free && lprops->free == c->leb_size) return ret; /* * LEBs that have only free and dirty space must not be allocated * because they may have been unmapped already or they may have data * that is obsolete only because of nodes that are still sitting in a * wbuf. */ if (lprops->free + lprops->dirty == c->leb_size && lprops->dirty > 0) return ret; /* Finally we found space */ data->lnum = lprops->lnum; return LPT_SCAN_ADD | LPT_SCAN_STOP; } /** * do_find_free_space - find a data LEB with free space. * @c: the UBIFS file-system description object * @min_space: minimum amount of free space required * @pick_free: whether it is OK to scan for empty LEBs * @squeeze: whether to try to find space in a non-empty LEB first * * This function returns a pointer to the LEB properties found or a negative * error code. */ static const struct ubifs_lprops *do_find_free_space(struct ubifs_info *c, int min_space, int pick_free, int squeeze) { const struct ubifs_lprops *lprops; struct ubifs_lpt_heap *heap; struct scan_data data; int err, i; if (squeeze) { lprops = ubifs_fast_find_free(c); if (lprops && lprops->free >= min_space) return lprops; } if (pick_free) { lprops = ubifs_fast_find_empty(c); if (lprops) return lprops; } if (!squeeze) { lprops = ubifs_fast_find_free(c); if (lprops && lprops->free >= min_space) return lprops; } /* There may be an LEB with enough free space on the dirty heap */ heap = &c->lpt_heap[LPROPS_DIRTY - 1]; for (i = 0; i < heap->cnt; i++) { lprops = heap->arr[i]; if (lprops->free >= min_space) return lprops; } /* * A LEB may have fallen off of the bottom of the free heap, and ended * up as uncategorized even though it has enough free space for us now, * so check the uncategorized list. N.B. neither empty nor freeable LEBs * can end up as uncategorized because they are kept on lists not * finite-sized heaps. */ list_for_each_entry(lprops, &c->uncat_list, list) { if (lprops->flags & LPROPS_TAKEN) continue; if (lprops->flags & LPROPS_INDEX) continue; if (lprops->free >= min_space) return lprops; } /* We have looked everywhere in main memory, now scan the flash */ if (c->pnodes_have >= c->pnode_cnt) /* All pnodes are in memory, so skip scan */ return ERR_PTR(-ENOSPC); data.min_space = min_space; data.pick_free = pick_free; data.lnum = -1; err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, (ubifs_lpt_scan_callback)scan_for_free_cb, &data); if (err) return ERR_PTR(err); ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt); c->lscan_lnum = data.lnum; lprops = ubifs_lpt_lookup_dirty(c, data.lnum); if (IS_ERR(lprops)) return lprops; ubifs_assert(lprops->lnum == data.lnum); ubifs_assert(lprops->free >= min_space); ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); ubifs_assert(!(lprops->flags & LPROPS_INDEX)); return lprops; } /** * ubifs_find_free_space - find a data LEB with free space. * @c: the UBIFS file-system description object * @min_space: minimum amount of required free space * @offs: contains offset of where free space starts on exit * @squeeze: whether to try to find space in a non-empty LEB first * * This function looks for an LEB with at least @min_space bytes of free space. * It tries to find an empty LEB if possible. If no empty LEBs are available, * this function searches for a non-empty data LEB. The returned LEB is marked * as "taken". * * This function returns found LEB number in case of success, %-ENOSPC if it * failed to find a LEB with @min_space bytes of free space and other a negative * error codes in case of failure. */ int ubifs_find_free_space(struct ubifs_info *c, int min_space, int *offs, int squeeze) { const struct ubifs_lprops *lprops; int lebs, rsvd_idx_lebs, pick_free = 0, err, lnum, flags; dbg_find("min_space %d", min_space); ubifs_get_lprops(c); /* Check if there are enough empty LEBs for commit */ spin_lock(&c->space_lock); if (c->bi.min_idx_lebs > c->lst.idx_lebs) rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs; else rsvd_idx_lebs = 0; lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - c->lst.taken_empty_lebs; if (rsvd_idx_lebs < lebs) /* * OK to allocate an empty LEB, but we still don't want to go * looking for one if there aren't any. */ if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { pick_free = 1; /* * Because we release the space lock, we must account * for this allocation here. After the LEB properties * flags have been updated, we subtract one. Note, the * result of this is that lprops also decreases * @taken_empty_lebs in 'ubifs_change_lp()', so it is * off by one for a short period of time which may * introduce a small disturbance to budgeting * calculations, but this is harmless because at the * worst case this would make the budgeting subsystem * be more pessimistic than needed. * * Fundamentally, this is about serialization of the * budgeting and lprops subsystems. We could make the * @space_lock a mutex and avoid dropping it before * calling 'ubifs_change_lp()', but mutex is more * heavy-weight, and we want budgeting to be as fast as * possible. */ c->lst.taken_empty_lebs += 1; } spin_unlock(&c->space_lock); lprops = do_find_free_space(c, min_space, pick_free, squeeze); if (IS_ERR(lprops)) { err = PTR_ERR(lprops); goto out; } lnum = lprops->lnum; flags = lprops->flags | LPROPS_TAKEN; lprops = ubifs_change_lp(c, lprops, LPROPS_NC, LPROPS_NC, flags, 0); if (IS_ERR(lprops)) { err = PTR_ERR(lprops); goto out; } if (pick_free) { spin_lock(&c->space_lock); c->lst.taken_empty_lebs -= 1; spin_unlock(&c->space_lock); } *offs = c->leb_size - lprops->free; ubifs_release_lprops(c); if (*offs == 0) { /* * Ensure that empty LEBs have been unmapped. They may not have * been, for example, because of an unclean unmount. Also * LEBs that were freeable LEBs (free + dirty == leb_size) will * not have been unmapped. */ err = ubifs_leb_unmap(c, lnum); if (err) return err; } dbg_find("found LEB %d, free %d", lnum, c->leb_size - *offs); ubifs_assert(*offs <= c->leb_size - min_space); return lnum; out: if (pick_free) { spin_lock(&c->space_lock); c->lst.taken_empty_lebs -= 1; spin_unlock(&c->space_lock); } ubifs_release_lprops(c); return err; } /** * scan_for_idx_cb - callback used by the scan for a free LEB for the index. * @c: the UBIFS file-system description object * @lprops: LEB properties to scan * @in_tree: whether the LEB properties are in main memory * @data: information passed to and from the caller of the scan * * This function returns a code that indicates whether the scan should continue * (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree * in main memory (%LPT_SCAN_ADD), or whether the scan should stop * (%LPT_SCAN_STOP). */ static int scan_for_idx_cb(struct ubifs_info *c, const struct ubifs_lprops *lprops, int in_tree, struct scan_data *data) { int ret = LPT_SCAN_CONTINUE; /* Exclude LEBs that are currently in use */ if (lprops->flags & LPROPS_TAKEN) return LPT_SCAN_CONTINUE; /* Determine whether to add these LEB properties to the tree */ if (!in_tree && valuable(c, lprops)) ret |= LPT_SCAN_ADD; /* Exclude index LEBS */ if (lprops->flags & LPROPS_INDEX) return ret; /* Exclude LEBs that cannot be made empty */ if (lprops->free + lprops->dirty != c->leb_size) return ret; /* * We are allocating for the index so it is safe to allocate LEBs with * only free and dirty space, because write buffers are sync'd at commit * start. */ data->lnum = lprops->lnum; return LPT_SCAN_ADD | LPT_SCAN_STOP; } /** * scan_for_leb_for_idx - scan for a free LEB for the index. * @c: the UBIFS file-system description object */ static const struct ubifs_lprops *scan_for_leb_for_idx(struct ubifs_info *c) { struct ubifs_lprops *lprops; struct scan_data data; int err; data.lnum = -1; err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, (ubifs_lpt_scan_callback)scan_for_idx_cb, &data); if (err) return ERR_PTR(err); ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt); c->lscan_lnum = data.lnum; lprops = ubifs_lpt_lookup_dirty(c, data.lnum); if (IS_ERR(lprops)) return lprops; ubifs_assert(lprops->lnum == data.lnum); ubifs_assert(lprops->free + lprops->dirty == c->leb_size); ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); ubifs_assert(!(lprops->flags & LPROPS_INDEX)); return lprops; } /** * ubifs_find_free_leb_for_idx - find a free LEB for the index. * @c: the UBIFS file-system description object * * This function looks for a free LEB and returns that LEB number. The returned * LEB is marked as "taken", "index". * * Only empty LEBs are allocated. This is for two reasons. First, the commit * calculates the number of LEBs to allocate based on the assumption that they * will be empty. Secondly, free space at the end of an index LEB is not * guaranteed to be empty because it may have been used by the in-the-gaps * method prior to an unclean unmount. * * If no LEB is found %-ENOSPC is returned. For other failures another negative * error code is returned. */ int ubifs_find_free_leb_for_idx(struct ubifs_info *c) { const struct ubifs_lprops *lprops; int lnum = -1, err, flags; ubifs_get_lprops(c); lprops = ubifs_fast_find_empty(c); if (!lprops) { lprops = ubifs_fast_find_freeable(c); if (!lprops) { ubifs_assert(c->freeable_cnt == 0); if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { lprops = scan_for_leb_for_idx(c); if (IS_ERR(lprops)) { err = PTR_ERR(lprops); goto out; } } } } if (!lprops) { err = -ENOSPC; goto out; } lnum = lprops->lnum; dbg_find("found LEB %d, free %d, dirty %d, flags %#x", lnum, lprops->free, lprops->dirty, lprops->flags); flags = lprops->flags | LPROPS_TAKEN | LPROPS_INDEX; lprops = ubifs_change_lp(c, lprops, c->leb_size, 0, flags, 0); if (IS_ERR(lprops)) { err = PTR_ERR(lprops); goto out; } ubifs_release_lprops(c); /* * Ensure that empty LEBs have been unmapped. They may not have been, * for example, because of an unclean unmount. Also LEBs that were * freeable LEBs (free + dirty == leb_size) will not have been unmapped. */ err = ubifs_leb_unmap(c, lnum); if (err) { ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0, LPROPS_TAKEN | LPROPS_INDEX, 0); return err; } return lnum; out: ubifs_release_lprops(c); return err; } static int cmp_dirty_idx(const struct ubifs_lprops **a, const struct ubifs_lprops **b) { const struct ubifs_lprops *lpa = *a; const struct ubifs_lprops *lpb = *b; return lpa->dirty + lpa->free - lpb->dirty - lpb->free; } static void swap_dirty_idx(struct ubifs_lprops **a, struct ubifs_lprops **b, int size) { struct ubifs_lprops *t = *a; *a = *b; *b = t; } /** * ubifs_save_dirty_idx_lnums - save an array of the most dirty index LEB nos. * @c: the UBIFS file-system description object * * This function is called each commit to create an array of LEB numbers of * dirty index LEBs sorted in order of dirty and free space. This is used by * the in-the-gaps method of TNC commit. */ int ubifs_save_dirty_idx_lnums(struct ubifs_info *c) { int i; ubifs_get_lprops(c); /* Copy the LPROPS_DIRTY_IDX heap */ c->dirty_idx.cnt = c->lpt_heap[LPROPS_DIRTY_IDX - 1].cnt; memcpy(c->dirty_idx.arr, c->lpt_heap[LPROPS_DIRTY_IDX - 1].arr, sizeof(void *) * c->dirty_idx.cnt); /* Sort it so that the dirtiest is now at the end */ sort(c->dirty_idx.arr, c->dirty_idx.cnt, sizeof(void *), (int (*)(const void *, const void *))cmp_dirty_idx, (void (*)(void *, void *, int))swap_dirty_idx); dbg_find("found %d dirty index LEBs", c->dirty_idx.cnt); if (c->dirty_idx.cnt) dbg_find("dirtiest index LEB is %d with dirty %d and free %d", c->dirty_idx.arr[c->dirty_idx.cnt - 1]->lnum, c->dirty_idx.arr[c->dirty_idx.cnt - 1]->dirty, c->dirty_idx.arr[c->dirty_idx.cnt - 1]->free); /* Replace the lprops pointers with LEB numbers */ for (i = 0; i < c->dirty_idx.cnt; i++) c->dirty_idx.arr[i] = (void *)(size_t)c->dirty_idx.arr[i]->lnum; ubifs_release_lprops(c); return 0; } /** * scan_dirty_idx_cb - callback used by the scan for a dirty index LEB. * @c: the UBIFS file-system description object * @lprops: LEB properties to scan * @in_tree: whether the LEB properties are in main memory * @data: information passed to and from the caller of the scan * * This function returns a code that indicates whether the scan should continue * (%LPT_SCAN_CONTINUE), whether the LEB properties should be added to the tree * in main memory (%LPT_SCAN_ADD), or whether the scan should stop * (%LPT_SCAN_STOP). */ static int scan_dirty_idx_cb(struct ubifs_info *c, const struct ubifs_lprops *lprops, int in_tree, struct scan_data *data) { int ret = LPT_SCAN_CONTINUE; /* Exclude LEBs that are currently in use */ if (lprops->flags & LPROPS_TAKEN) return LPT_SCAN_CONTINUE; /* Determine whether to add these LEB properties to the tree */ if (!in_tree && valuable(c, lprops)) ret |= LPT_SCAN_ADD; /* Exclude non-index LEBs */ if (!(lprops->flags & LPROPS_INDEX)) return ret; /* Exclude LEBs with too little space */ if (lprops->free + lprops->dirty < c->min_idx_node_sz) return ret; /* Finally we found space */ data->lnum = lprops->lnum; return LPT_SCAN_ADD | LPT_SCAN_STOP; } /** * find_dirty_idx_leb - find a dirty index LEB. * @c: the UBIFS file-system description object * * This function returns LEB number upon success and a negative error code upon * failure. In particular, -ENOSPC is returned if a dirty index LEB is not * found. * * Note that this function scans the entire LPT but it is called very rarely. */ static int find_dirty_idx_leb(struct ubifs_info *c) { const struct ubifs_lprops *lprops; struct ubifs_lpt_heap *heap; struct scan_data data; int err, i, ret; /* Check all structures in memory first */ data.lnum = -1; heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1]; for (i = 0; i < heap->cnt; i++) { lprops = heap->arr[i]; ret = scan_dirty_idx_cb(c, lprops, 1, &data); if (ret & LPT_SCAN_STOP) goto found; } list_for_each_entry(lprops, &c->frdi_idx_list, list) { ret = scan_dirty_idx_cb(c, lprops, 1, &data); if (ret & LPT_SCAN_STOP) goto found; } list_for_each_entry(lprops, &c->uncat_list, list) { ret = scan_dirty_idx_cb(c, lprops, 1, &data); if (ret & LPT_SCAN_STOP) goto found; } if (c->pnodes_have >= c->pnode_cnt) /* All pnodes are in memory, so skip scan */ return -ENOSPC; err = ubifs_lpt_scan_nolock(c, -1, c->lscan_lnum, (ubifs_lpt_scan_callback)scan_dirty_idx_cb, &data); if (err) return err; found: ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt); c->lscan_lnum = data.lnum; lprops = ubifs_lpt_lookup_dirty(c, data.lnum); if (IS_ERR(lprops)) return PTR_ERR(lprops); ubifs_assert(lprops->lnum == data.lnum); ubifs_assert(lprops->free + lprops->dirty >= c->min_idx_node_sz); ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); ubifs_assert((lprops->flags & LPROPS_INDEX)); dbg_find("found dirty LEB %d, free %d, dirty %d, flags %#x", lprops->lnum, lprops->free, lprops->dirty, lprops->flags); lprops = ubifs_change_lp(c, lprops, LPROPS_NC, LPROPS_NC, lprops->flags | LPROPS_TAKEN, 0); if (IS_ERR(lprops)) return PTR_ERR(lprops); return lprops->lnum; } /** * get_idx_gc_leb - try to get a LEB number from trivial GC. * @c: the UBIFS file-system description object */ static int get_idx_gc_leb(struct ubifs_info *c) { const struct ubifs_lprops *lp; int err, lnum; err = ubifs_get_idx_gc_leb(c); if (err < 0) return err; lnum = err; /* * The LEB was due to be unmapped after the commit but * it is needed now for this commit. */ lp = ubifs_lpt_lookup_dirty(c, lnum); if (IS_ERR(lp)) return PTR_ERR(lp); lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, lp->flags | LPROPS_INDEX, -1); if (IS_ERR(lp)) return PTR_ERR(lp); dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty, lp->free, lp->flags); return lnum; } /** * find_dirtiest_idx_leb - find dirtiest index LEB from dirtiest array. * @c: the UBIFS file-system description object */ static int find_dirtiest_idx_leb(struct ubifs_info *c) { const struct ubifs_lprops *lp; int lnum; while (1) { if (!c->dirty_idx.cnt) return -ENOSPC; /* The lprops pointers were replaced by LEB numbers */ lnum = (size_t)c->dirty_idx.arr[--c->dirty_idx.cnt]; lp = ubifs_lpt_lookup(c, lnum); if (IS_ERR(lp)) return PTR_ERR(lp); if ((lp->flags & LPROPS_TAKEN) || !(lp->flags & LPROPS_INDEX)) continue; lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, lp->flags | LPROPS_TAKEN, 0); if (IS_ERR(lp)) return PTR_ERR(lp); break; } dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty, lp->free, lp->flags); ubifs_assert(lp->flags | LPROPS_TAKEN); ubifs_assert(lp->flags | LPROPS_INDEX); return lnum; } /** * ubifs_find_dirty_idx_leb - try to find dirtiest index LEB as at last commit. * @c: the UBIFS file-system description object * * This function attempts to find an untaken index LEB with the most free and * dirty space that can be used without overwriting index nodes that were in the * last index committed. */ int ubifs_find_dirty_idx_leb(struct ubifs_info *c) { int err; ubifs_get_lprops(c); /* * We made an array of the dirtiest index LEB numbers as at the start of * last commit. Try that array first. */ err = find_dirtiest_idx_leb(c); /* Next try scanning the entire LPT */ if (err == -ENOSPC) err = find_dirty_idx_leb(c); /* Finally take any index LEBs awaiting trivial GC */ if (err == -ENOSPC) err = get_idx_gc_leb(c); ubifs_release_lprops(c); return err; }
gpl-2.0
boa19861105/BOA-A4TW
arch/powerpc/platforms/cell/celleb_scc_sio.c
9509
2600
/* * setup serial port in SCC * * (C) Copyright 2006-2007 TOSHIBA CORPORATION * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/tty.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/console.h> #include <asm/io.h> #include <asm/prom.h> /* sio irq0=0xb00010022 irq0=0xb00010023 irq2=0xb00010024 mmio=0xfff000-0x1000,0xff2000-0x1000 */ static int txx9_serial_bitmap __initdata; static struct { uint32_t offset; uint32_t index; } txx9_scc_tab[3] __initdata = { { 0x300, 0 }, /* 0xFFF300 */ { 0x400, 0 }, /* 0xFFF400 */ { 0x800, 1 } /* 0xFF2800 */ }; static int __init txx9_serial_init(void) { extern int early_serial_txx9_setup(struct uart_port *port); struct device_node *node = NULL; int i; struct uart_port req; struct of_irq irq; struct resource res; while ((node = of_find_compatible_node(node, "serial", "toshiba,sio-scc")) != NULL) { for (i = 0; i < ARRAY_SIZE(txx9_scc_tab); i++) { if (!(txx9_serial_bitmap & (1<<i))) continue; if (of_irq_map_one(node, i, &irq)) continue; if (of_address_to_resource(node, txx9_scc_tab[i].index, &res)) continue; memset(&req, 0, sizeof(req)); req.line = i; req.iotype = UPIO_MEM; req.mapbase = res.start + txx9_scc_tab[i].offset; #ifdef CONFIG_SERIAL_TXX9_CONSOLE req.membase = ioremap(req.mapbase, 0x24); #endif req.irq = irq_create_of_mapping(irq.controller, irq.specifier, irq.size); req.flags |= UPF_IOREMAP | UPF_BUGGY_UART /*HAVE_CTS_LINE*/; req.uartclk = 83300000; early_serial_txx9_setup(&req); } } return 0; } static int __init txx9_serial_config(char *ptr) { int i; for (;;) { switch (get_option(&ptr, &i)) { default: return 0; case 2: txx9_serial_bitmap |= 1 << i; break; case 1: txx9_serial_bitmap |= 1 << i; return 0; } } } __setup("txx9_serial=", txx9_serial_config); console_initcall(txx9_serial_init);
gpl-2.0
Jaiglissechef-i9100/f4ktion_kernel
drivers/media/common/tuners/tea5761.c
12325
8920
/* * For Philips TEA5761 FM Chip * I2C address is allways 0x20 (0x10 at 7-bit mode). * * Copyright (c) 2005-2007 Mauro Carvalho Chehab (mchehab@infradead.org) * This code is placed under the terms of the GNUv2 General Public License * */ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/videodev2.h> #include <media/tuner.h> #include "tuner-i2c.h" #include "tea5761.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable verbose debug messages"); struct tea5761_priv { struct tuner_i2c_props i2c_props; u32 frequency; bool standby; }; /*****************************************************************************/ /*************************** * TEA5761HN I2C registers * ***************************/ /* INTREG - Read: bytes 0 and 1 / Write: byte 0 */ /* first byte for reading */ #define TEA5761_INTREG_IFFLAG 0x10 #define TEA5761_INTREG_LEVFLAG 0x8 #define TEA5761_INTREG_FRRFLAG 0x2 #define TEA5761_INTREG_BLFLAG 0x1 /* second byte for reading / byte for writing */ #define TEA5761_INTREG_IFMSK 0x10 #define TEA5761_INTREG_LEVMSK 0x8 #define TEA5761_INTREG_FRMSK 0x2 #define TEA5761_INTREG_BLMSK 0x1 /* FRQSET - Read: bytes 2 and 3 / Write: byte 1 and 2 */ /* First byte */ #define TEA5761_FRQSET_SEARCH_UP 0x80 /* 1=Station search from botton to up */ #define TEA5761_FRQSET_SEARCH_MODE 0x40 /* 1=Search mode */ /* Bits 0-5 for divider MSB */ /* Second byte */ /* Bits 0-7 for divider LSB */ /* TNCTRL - Read: bytes 4 and 5 / Write: Bytes 3 and 4 */ /* first byte */ #define TEA5761_TNCTRL_PUPD_0 0x40 /* Power UP/Power Down MSB */ #define TEA5761_TNCTRL_BLIM 0X20 /* 1= Japan Frequencies, 0= European frequencies */ #define TEA5761_TNCTRL_SWPM 0x10 /* 1= software port is FRRFLAG */ #define TEA5761_TNCTRL_IFCTC 0x08 /* 1= IF count time 15.02 ms, 0= IF count time 2.02 ms */ #define TEA5761_TNCTRL_AFM 0x04 #define TEA5761_TNCTRL_SMUTE 0x02 /* 1= Soft mute */ #define TEA5761_TNCTRL_SNC 0x01 /* second byte */ #define TEA5761_TNCTRL_MU 0x80 /* 1=Hard mute */ #define TEA5761_TNCTRL_SSL_1 0x40 #define TEA5761_TNCTRL_SSL_0 0x20 #define TEA5761_TNCTRL_HLSI 0x10 #define TEA5761_TNCTRL_MST 0x08 /* 1 = mono */ #define TEA5761_TNCTRL_SWP 0x04 #define TEA5761_TNCTRL_DTC 0x02 /* 1 = deemphasis 50 us, 0 = deemphasis 75 us */ #define TEA5761_TNCTRL_AHLSI 0x01 /* FRQCHECK - Read: bytes 6 and 7 */ /* First byte */ /* Bits 0-5 for divider MSB */ /* Second byte */ /* Bits 0-7 for divider LSB */ /* TUNCHECK - Read: bytes 8 and 9 */ /* First byte */ #define TEA5761_TUNCHECK_IF_MASK 0x7e /* IF count */ #define TEA5761_TUNCHECK_TUNTO 0x01 /* Second byte */ #define TEA5761_TUNCHECK_LEV_MASK 0xf0 /* Level Count */ #define TEA5761_TUNCHECK_LD 0x08 #define TEA5761_TUNCHECK_STEREO 0x04 /* TESTREG - Read: bytes 10 and 11 / Write: bytes 5 and 6 */ /* All zero = no test mode */ /* MANID - Read: bytes 12 and 13 */ /* First byte - should be 0x10 */ #define TEA5767_MANID_VERSION_MASK 0xf0 /* Version = 1 */ #define TEA5767_MANID_ID_MSB_MASK 0x0f /* Manufacurer ID - should be 0 */ /* Second byte - Should be 0x2b */ #define TEA5767_MANID_ID_LSB_MASK 0xfe /* Manufacturer ID - should be 0x15 */ #define TEA5767_MANID_IDAV 0x01 /* 1 = Chip has ID, 0 = Chip has no ID */ /* Chip ID - Read: bytes 14 and 15 */ /* First byte - should be 0x57 */ /* Second byte - should be 0x61 */ /*****************************************************************************/ #define FREQ_OFFSET 0 /* for TEA5767, it is 700 to give the right freq */ static void tea5761_status_dump(unsigned char *buffer) { unsigned int div, frq; div = ((buffer[2] & 0x3f) << 8) | buffer[3]; frq = 1000 * (div * 32768 / 1000 + FREQ_OFFSET + 225) / 4; /* Freq in KHz */ printk(KERN_INFO "tea5761: Frequency %d.%03d KHz (divider = 0x%04x)\n", frq / 1000, frq % 1000, div); } /* Freq should be specifyed at 62.5 Hz */ static int __set_radio_freq(struct dvb_frontend *fe, unsigned int freq, bool mono) { struct tea5761_priv *priv = fe->tuner_priv; unsigned int frq = freq; unsigned char buffer[7] = {0, 0, 0, 0, 0, 0, 0 }; unsigned div; int rc; tuner_dbg("radio freq counter %d\n", frq); if (priv->standby) { tuner_dbg("TEA5761 set to standby mode\n"); buffer[5] |= TEA5761_TNCTRL_MU; } else { buffer[4] |= TEA5761_TNCTRL_PUPD_0; } if (mono) { tuner_dbg("TEA5761 set to mono\n"); buffer[5] |= TEA5761_TNCTRL_MST; } else { tuner_dbg("TEA5761 set to stereo\n"); } div = (1000 * (frq * 4 / 16 + 700 + 225) ) >> 15; buffer[1] = (div >> 8) & 0x3f; buffer[2] = div & 0xff; if (debug) tea5761_status_dump(buffer); if (7 != (rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 7))) tuner_warn("i2c i/o error: rc == %d (should be 5)\n", rc); priv->frequency = frq * 125 / 2; return 0; } static int set_radio_freq(struct dvb_frontend *fe, struct analog_parameters *params) { struct tea5761_priv *priv = fe->analog_demod_priv; priv->standby = false; return __set_radio_freq(fe, params->frequency, params->audmode == V4L2_TUNER_MODE_MONO); } static int set_radio_sleep(struct dvb_frontend *fe) { struct tea5761_priv *priv = fe->analog_demod_priv; priv->standby = true; return __set_radio_freq(fe, priv->frequency, false); } static int tea5761_read_status(struct dvb_frontend *fe, char *buffer) { struct tea5761_priv *priv = fe->tuner_priv; int rc; memset(buffer, 0, 16); if (16 != (rc = tuner_i2c_xfer_recv(&priv->i2c_props, buffer, 16))) { tuner_warn("i2c i/o error: rc == %d (should be 16)\n", rc); return -EREMOTEIO; } return 0; } static inline int tea5761_signal(struct dvb_frontend *fe, const char *buffer) { struct tea5761_priv *priv = fe->tuner_priv; int signal = ((buffer[9] & TEA5761_TUNCHECK_LEV_MASK) << (13 - 4)); tuner_dbg("Signal strength: %d\n", signal); return signal; } static inline int tea5761_stereo(struct dvb_frontend *fe, const char *buffer) { struct tea5761_priv *priv = fe->tuner_priv; int stereo = buffer[9] & TEA5761_TUNCHECK_STEREO; tuner_dbg("Radio ST GET = %02x\n", stereo); return (stereo ? V4L2_TUNER_SUB_STEREO : 0); } static int tea5761_get_status(struct dvb_frontend *fe, u32 *status) { unsigned char buffer[16]; *status = 0; if (0 == tea5761_read_status(fe, buffer)) { if (tea5761_signal(fe, buffer)) *status = TUNER_STATUS_LOCKED; if (tea5761_stereo(fe, buffer)) *status |= TUNER_STATUS_STEREO; } return 0; } static int tea5761_get_rf_strength(struct dvb_frontend *fe, u16 *strength) { unsigned char buffer[16]; *strength = 0; if (0 == tea5761_read_status(fe, buffer)) *strength = tea5761_signal(fe, buffer); return 0; } int tea5761_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr) { unsigned char buffer[16]; int rc; struct tuner_i2c_props i2c = { .adap = i2c_adap, .addr = i2c_addr }; if (16 != (rc = tuner_i2c_xfer_recv(&i2c, buffer, 16))) { printk(KERN_WARNING "it is not a TEA5761. Received %i chars.\n", rc); return -EINVAL; } if ((buffer[13] != 0x2b) || (buffer[14] != 0x57) || (buffer[15] != 0x061)) { printk(KERN_WARNING "Manufacturer ID= 0x%02x, Chip ID = %02x%02x." " It is not a TEA5761\n", buffer[13], buffer[14], buffer[15]); return -EINVAL; } printk(KERN_WARNING "tea5761: TEA%02x%02x detected. " "Manufacturer ID= 0x%02x\n", buffer[14], buffer[15], buffer[13]); return 0; } static int tea5761_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static int tea5761_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct tea5761_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } static struct dvb_tuner_ops tea5761_tuner_ops = { .info = { .name = "tea5761", // Philips TEA5761HN FM Radio }, .set_analog_params = set_radio_freq, .sleep = set_radio_sleep, .release = tea5761_release, .get_frequency = tea5761_get_frequency, .get_status = tea5761_get_status, .get_rf_strength = tea5761_get_rf_strength, }; struct dvb_frontend *tea5761_attach(struct dvb_frontend *fe, struct i2c_adapter* i2c_adap, u8 i2c_addr) { struct tea5761_priv *priv = NULL; if (tea5761_autodetection(i2c_adap, i2c_addr) != 0) return NULL; priv = kzalloc(sizeof(struct tea5761_priv), GFP_KERNEL); if (priv == NULL) return NULL; fe->tuner_priv = priv; priv->i2c_props.addr = i2c_addr; priv->i2c_props.adap = i2c_adap; priv->i2c_props.name = "tea5761"; memcpy(&fe->ops.tuner_ops, &tea5761_tuner_ops, sizeof(struct dvb_tuner_ops)); tuner_info("type set to %s\n", "Philips TEA5761HN FM Radio"); return fe; } EXPORT_SYMBOL_GPL(tea5761_attach); EXPORT_SYMBOL_GPL(tea5761_autodetection); MODULE_DESCRIPTION("Philips TEA5761 FM tuner driver"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); MODULE_LICENSE("GPL");
gpl-2.0
tchaari/android_kernel_samsung_crespo
drivers/media/common/tuners/tea5761.c
12325
8920
/* * For Philips TEA5761 FM Chip * I2C address is allways 0x20 (0x10 at 7-bit mode). * * Copyright (c) 2005-2007 Mauro Carvalho Chehab (mchehab@infradead.org) * This code is placed under the terms of the GNUv2 General Public License * */ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/videodev2.h> #include <media/tuner.h> #include "tuner-i2c.h" #include "tea5761.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable verbose debug messages"); struct tea5761_priv { struct tuner_i2c_props i2c_props; u32 frequency; bool standby; }; /*****************************************************************************/ /*************************** * TEA5761HN I2C registers * ***************************/ /* INTREG - Read: bytes 0 and 1 / Write: byte 0 */ /* first byte for reading */ #define TEA5761_INTREG_IFFLAG 0x10 #define TEA5761_INTREG_LEVFLAG 0x8 #define TEA5761_INTREG_FRRFLAG 0x2 #define TEA5761_INTREG_BLFLAG 0x1 /* second byte for reading / byte for writing */ #define TEA5761_INTREG_IFMSK 0x10 #define TEA5761_INTREG_LEVMSK 0x8 #define TEA5761_INTREG_FRMSK 0x2 #define TEA5761_INTREG_BLMSK 0x1 /* FRQSET - Read: bytes 2 and 3 / Write: byte 1 and 2 */ /* First byte */ #define TEA5761_FRQSET_SEARCH_UP 0x80 /* 1=Station search from botton to up */ #define TEA5761_FRQSET_SEARCH_MODE 0x40 /* 1=Search mode */ /* Bits 0-5 for divider MSB */ /* Second byte */ /* Bits 0-7 for divider LSB */ /* TNCTRL - Read: bytes 4 and 5 / Write: Bytes 3 and 4 */ /* first byte */ #define TEA5761_TNCTRL_PUPD_0 0x40 /* Power UP/Power Down MSB */ #define TEA5761_TNCTRL_BLIM 0X20 /* 1= Japan Frequencies, 0= European frequencies */ #define TEA5761_TNCTRL_SWPM 0x10 /* 1= software port is FRRFLAG */ #define TEA5761_TNCTRL_IFCTC 0x08 /* 1= IF count time 15.02 ms, 0= IF count time 2.02 ms */ #define TEA5761_TNCTRL_AFM 0x04 #define TEA5761_TNCTRL_SMUTE 0x02 /* 1= Soft mute */ #define TEA5761_TNCTRL_SNC 0x01 /* second byte */ #define TEA5761_TNCTRL_MU 0x80 /* 1=Hard mute */ #define TEA5761_TNCTRL_SSL_1 0x40 #define TEA5761_TNCTRL_SSL_0 0x20 #define TEA5761_TNCTRL_HLSI 0x10 #define TEA5761_TNCTRL_MST 0x08 /* 1 = mono */ #define TEA5761_TNCTRL_SWP 0x04 #define TEA5761_TNCTRL_DTC 0x02 /* 1 = deemphasis 50 us, 0 = deemphasis 75 us */ #define TEA5761_TNCTRL_AHLSI 0x01 /* FRQCHECK - Read: bytes 6 and 7 */ /* First byte */ /* Bits 0-5 for divider MSB */ /* Second byte */ /* Bits 0-7 for divider LSB */ /* TUNCHECK - Read: bytes 8 and 9 */ /* First byte */ #define TEA5761_TUNCHECK_IF_MASK 0x7e /* IF count */ #define TEA5761_TUNCHECK_TUNTO 0x01 /* Second byte */ #define TEA5761_TUNCHECK_LEV_MASK 0xf0 /* Level Count */ #define TEA5761_TUNCHECK_LD 0x08 #define TEA5761_TUNCHECK_STEREO 0x04 /* TESTREG - Read: bytes 10 and 11 / Write: bytes 5 and 6 */ /* All zero = no test mode */ /* MANID - Read: bytes 12 and 13 */ /* First byte - should be 0x10 */ #define TEA5767_MANID_VERSION_MASK 0xf0 /* Version = 1 */ #define TEA5767_MANID_ID_MSB_MASK 0x0f /* Manufacurer ID - should be 0 */ /* Second byte - Should be 0x2b */ #define TEA5767_MANID_ID_LSB_MASK 0xfe /* Manufacturer ID - should be 0x15 */ #define TEA5767_MANID_IDAV 0x01 /* 1 = Chip has ID, 0 = Chip has no ID */ /* Chip ID - Read: bytes 14 and 15 */ /* First byte - should be 0x57 */ /* Second byte - should be 0x61 */ /*****************************************************************************/ #define FREQ_OFFSET 0 /* for TEA5767, it is 700 to give the right freq */ static void tea5761_status_dump(unsigned char *buffer) { unsigned int div, frq; div = ((buffer[2] & 0x3f) << 8) | buffer[3]; frq = 1000 * (div * 32768 / 1000 + FREQ_OFFSET + 225) / 4; /* Freq in KHz */ printk(KERN_INFO "tea5761: Frequency %d.%03d KHz (divider = 0x%04x)\n", frq / 1000, frq % 1000, div); } /* Freq should be specifyed at 62.5 Hz */ static int __set_radio_freq(struct dvb_frontend *fe, unsigned int freq, bool mono) { struct tea5761_priv *priv = fe->tuner_priv; unsigned int frq = freq; unsigned char buffer[7] = {0, 0, 0, 0, 0, 0, 0 }; unsigned div; int rc; tuner_dbg("radio freq counter %d\n", frq); if (priv->standby) { tuner_dbg("TEA5761 set to standby mode\n"); buffer[5] |= TEA5761_TNCTRL_MU; } else { buffer[4] |= TEA5761_TNCTRL_PUPD_0; } if (mono) { tuner_dbg("TEA5761 set to mono\n"); buffer[5] |= TEA5761_TNCTRL_MST; } else { tuner_dbg("TEA5761 set to stereo\n"); } div = (1000 * (frq * 4 / 16 + 700 + 225) ) >> 15; buffer[1] = (div >> 8) & 0x3f; buffer[2] = div & 0xff; if (debug) tea5761_status_dump(buffer); if (7 != (rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 7))) tuner_warn("i2c i/o error: rc == %d (should be 5)\n", rc); priv->frequency = frq * 125 / 2; return 0; } static int set_radio_freq(struct dvb_frontend *fe, struct analog_parameters *params) { struct tea5761_priv *priv = fe->analog_demod_priv; priv->standby = false; return __set_radio_freq(fe, params->frequency, params->audmode == V4L2_TUNER_MODE_MONO); } static int set_radio_sleep(struct dvb_frontend *fe) { struct tea5761_priv *priv = fe->analog_demod_priv; priv->standby = true; return __set_radio_freq(fe, priv->frequency, false); } static int tea5761_read_status(struct dvb_frontend *fe, char *buffer) { struct tea5761_priv *priv = fe->tuner_priv; int rc; memset(buffer, 0, 16); if (16 != (rc = tuner_i2c_xfer_recv(&priv->i2c_props, buffer, 16))) { tuner_warn("i2c i/o error: rc == %d (should be 16)\n", rc); return -EREMOTEIO; } return 0; } static inline int tea5761_signal(struct dvb_frontend *fe, const char *buffer) { struct tea5761_priv *priv = fe->tuner_priv; int signal = ((buffer[9] & TEA5761_TUNCHECK_LEV_MASK) << (13 - 4)); tuner_dbg("Signal strength: %d\n", signal); return signal; } static inline int tea5761_stereo(struct dvb_frontend *fe, const char *buffer) { struct tea5761_priv *priv = fe->tuner_priv; int stereo = buffer[9] & TEA5761_TUNCHECK_STEREO; tuner_dbg("Radio ST GET = %02x\n", stereo); return (stereo ? V4L2_TUNER_SUB_STEREO : 0); } static int tea5761_get_status(struct dvb_frontend *fe, u32 *status) { unsigned char buffer[16]; *status = 0; if (0 == tea5761_read_status(fe, buffer)) { if (tea5761_signal(fe, buffer)) *status = TUNER_STATUS_LOCKED; if (tea5761_stereo(fe, buffer)) *status |= TUNER_STATUS_STEREO; } return 0; } static int tea5761_get_rf_strength(struct dvb_frontend *fe, u16 *strength) { unsigned char buffer[16]; *strength = 0; if (0 == tea5761_read_status(fe, buffer)) *strength = tea5761_signal(fe, buffer); return 0; } int tea5761_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr) { unsigned char buffer[16]; int rc; struct tuner_i2c_props i2c = { .adap = i2c_adap, .addr = i2c_addr }; if (16 != (rc = tuner_i2c_xfer_recv(&i2c, buffer, 16))) { printk(KERN_WARNING "it is not a TEA5761. Received %i chars.\n", rc); return -EINVAL; } if ((buffer[13] != 0x2b) || (buffer[14] != 0x57) || (buffer[15] != 0x061)) { printk(KERN_WARNING "Manufacturer ID= 0x%02x, Chip ID = %02x%02x." " It is not a TEA5761\n", buffer[13], buffer[14], buffer[15]); return -EINVAL; } printk(KERN_WARNING "tea5761: TEA%02x%02x detected. " "Manufacturer ID= 0x%02x\n", buffer[14], buffer[15], buffer[13]); return 0; } static int tea5761_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } static int tea5761_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct tea5761_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } static struct dvb_tuner_ops tea5761_tuner_ops = { .info = { .name = "tea5761", // Philips TEA5761HN FM Radio }, .set_analog_params = set_radio_freq, .sleep = set_radio_sleep, .release = tea5761_release, .get_frequency = tea5761_get_frequency, .get_status = tea5761_get_status, .get_rf_strength = tea5761_get_rf_strength, }; struct dvb_frontend *tea5761_attach(struct dvb_frontend *fe, struct i2c_adapter* i2c_adap, u8 i2c_addr) { struct tea5761_priv *priv = NULL; if (tea5761_autodetection(i2c_adap, i2c_addr) != 0) return NULL; priv = kzalloc(sizeof(struct tea5761_priv), GFP_KERNEL); if (priv == NULL) return NULL; fe->tuner_priv = priv; priv->i2c_props.addr = i2c_addr; priv->i2c_props.adap = i2c_adap; priv->i2c_props.name = "tea5761"; memcpy(&fe->ops.tuner_ops, &tea5761_tuner_ops, sizeof(struct dvb_tuner_ops)); tuner_info("type set to %s\n", "Philips TEA5761HN FM Radio"); return fe; } EXPORT_SYMBOL_GPL(tea5761_attach); EXPORT_SYMBOL_GPL(tea5761_autodetection); MODULE_DESCRIPTION("Philips TEA5761 FM tuner driver"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); MODULE_LICENSE("GPL");
gpl-2.0
umiddelb/linux-fslc
drivers/spi/spi-tegra20-sflash.c
38
17241
/* * SPI driver for Nvidia's Tegra20 Serial Flash Controller. * * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * Author: Laxman Dewangan <ldewangan@nvidia.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/reset.h> #include <linux/spi/spi.h> #define SPI_COMMAND 0x000 #define SPI_GO BIT(30) #define SPI_M_S BIT(28) #define SPI_ACTIVE_SCLK_MASK (0x3 << 26) #define SPI_ACTIVE_SCLK_DRIVE_LOW (0 << 26) #define SPI_ACTIVE_SCLK_DRIVE_HIGH (1 << 26) #define SPI_ACTIVE_SCLK_PULL_LOW (2 << 26) #define SPI_ACTIVE_SCLK_PULL_HIGH (3 << 26) #define SPI_CK_SDA_FALLING (1 << 21) #define SPI_CK_SDA_RISING (0 << 21) #define SPI_CK_SDA_MASK (1 << 21) #define SPI_ACTIVE_SDA (0x3 << 18) #define SPI_ACTIVE_SDA_DRIVE_LOW (0 << 18) #define SPI_ACTIVE_SDA_DRIVE_HIGH (1 << 18) #define SPI_ACTIVE_SDA_PULL_LOW (2 << 18) #define SPI_ACTIVE_SDA_PULL_HIGH (3 << 18) #define SPI_CS_POL_INVERT BIT(16) #define SPI_TX_EN BIT(15) #define SPI_RX_EN BIT(14) #define SPI_CS_VAL_HIGH BIT(13) #define SPI_CS_VAL_LOW 0x0 #define SPI_CS_SW BIT(12) #define SPI_CS_HW 0x0 #define SPI_CS_DELAY_MASK (7 << 9) #define SPI_CS3_EN BIT(8) #define SPI_CS2_EN BIT(7) #define SPI_CS1_EN BIT(6) #define SPI_CS0_EN BIT(5) #define SPI_CS_MASK (SPI_CS3_EN | SPI_CS2_EN | \ SPI_CS1_EN | SPI_CS0_EN) #define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0) #define SPI_MODES (SPI_ACTIVE_SCLK_MASK | SPI_CK_SDA_MASK) #define SPI_STATUS 0x004 #define SPI_BSY BIT(31) #define SPI_RDY BIT(30) #define SPI_TXF_FLUSH BIT(29) #define SPI_RXF_FLUSH BIT(28) #define SPI_RX_UNF BIT(27) #define SPI_TX_OVF BIT(26) #define SPI_RXF_EMPTY BIT(25) #define SPI_RXF_FULL BIT(24) #define SPI_TXF_EMPTY BIT(23) #define SPI_TXF_FULL BIT(22) #define SPI_BLK_CNT(count) (((count) & 0xffff) + 1) #define SPI_FIFO_ERROR (SPI_RX_UNF | SPI_TX_OVF) #define SPI_FIFO_EMPTY (SPI_TX_EMPTY | SPI_RX_EMPTY) #define SPI_RX_CMP 0x8 #define SPI_DMA_CTL 0x0C #define SPI_DMA_EN BIT(31) #define SPI_IE_RXC BIT(27) #define SPI_IE_TXC BIT(26) #define SPI_PACKED BIT(20) #define SPI_RX_TRIG_MASK (0x3 << 18) #define SPI_RX_TRIG_1W (0x0 << 18) #define SPI_RX_TRIG_4W (0x1 << 18) #define SPI_TX_TRIG_MASK (0x3 << 16) #define SPI_TX_TRIG_1W (0x0 << 16) #define SPI_TX_TRIG_4W (0x1 << 16) #define SPI_DMA_BLK_COUNT(count) (((count) - 1) & 0xFFFF); #define SPI_TX_FIFO 0x10 #define SPI_RX_FIFO 0x20 #define DATA_DIR_TX (1 << 0) #define DATA_DIR_RX (1 << 1) #define MAX_CHIP_SELECT 4 #define SPI_FIFO_DEPTH 4 #define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000)) struct tegra_sflash_data { struct device *dev; struct spi_master *master; spinlock_t lock; struct clk *clk; struct reset_control *rst; void __iomem *base; unsigned irq; u32 spi_max_frequency; u32 cur_speed; struct spi_device *cur_spi; unsigned cur_pos; unsigned cur_len; unsigned bytes_per_word; unsigned cur_direction; unsigned curr_xfer_words; unsigned cur_rx_pos; unsigned cur_tx_pos; u32 tx_status; u32 rx_status; u32 status_reg; u32 def_command_reg; u32 command_reg; u32 dma_control_reg; struct completion xfer_completion; struct spi_transfer *curr_xfer; }; static int tegra_sflash_runtime_suspend(struct device *dev); static int tegra_sflash_runtime_resume(struct device *dev); static inline u32 tegra_sflash_readl(struct tegra_sflash_data *tsd, unsigned long reg) { return readl(tsd->base + reg); } static inline void tegra_sflash_writel(struct tegra_sflash_data *tsd, u32 val, unsigned long reg) { writel(val, tsd->base + reg); } static void tegra_sflash_clear_status(struct tegra_sflash_data *tsd) { /* Write 1 to clear status register */ tegra_sflash_writel(tsd, SPI_RDY | SPI_FIFO_ERROR, SPI_STATUS); } static unsigned tegra_sflash_calculate_curr_xfer_param( struct spi_device *spi, struct tegra_sflash_data *tsd, struct spi_transfer *t) { unsigned remain_len = t->len - tsd->cur_pos; unsigned max_word; tsd->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8); max_word = remain_len / tsd->bytes_per_word; if (max_word > SPI_FIFO_DEPTH) max_word = SPI_FIFO_DEPTH; tsd->curr_xfer_words = max_word; return max_word; } static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf( struct tegra_sflash_data *tsd, struct spi_transfer *t) { unsigned nbytes; u32 status; unsigned max_n_32bit = tsd->curr_xfer_words; u8 *tx_buf = (u8 *)t->tx_buf + tsd->cur_tx_pos; if (max_n_32bit > SPI_FIFO_DEPTH) max_n_32bit = SPI_FIFO_DEPTH; nbytes = max_n_32bit * tsd->bytes_per_word; status = tegra_sflash_readl(tsd, SPI_STATUS); while (!(status & SPI_TXF_FULL)) { int i; u32 x = 0; for (i = 0; nbytes && (i < tsd->bytes_per_word); i++, nbytes--) x |= (u32)(*tx_buf++) << (i * 8); tegra_sflash_writel(tsd, x, SPI_TX_FIFO); if (!nbytes) break; status = tegra_sflash_readl(tsd, SPI_STATUS); } tsd->cur_tx_pos += max_n_32bit * tsd->bytes_per_word; return max_n_32bit; } static int tegra_sflash_read_rx_fifo_to_client_rxbuf( struct tegra_sflash_data *tsd, struct spi_transfer *t) { u32 status; unsigned int read_words = 0; u8 *rx_buf = (u8 *)t->rx_buf + tsd->cur_rx_pos; status = tegra_sflash_readl(tsd, SPI_STATUS); while (!(status & SPI_RXF_EMPTY)) { int i; u32 x = tegra_sflash_readl(tsd, SPI_RX_FIFO); for (i = 0; (i < tsd->bytes_per_word); i++) *rx_buf++ = (x >> (i*8)) & 0xFF; read_words++; status = tegra_sflash_readl(tsd, SPI_STATUS); } tsd->cur_rx_pos += read_words * tsd->bytes_per_word; return 0; } static int tegra_sflash_start_cpu_based_transfer( struct tegra_sflash_data *tsd, struct spi_transfer *t) { u32 val = 0; unsigned cur_words; if (tsd->cur_direction & DATA_DIR_TX) val |= SPI_IE_TXC; if (tsd->cur_direction & DATA_DIR_RX) val |= SPI_IE_RXC; tegra_sflash_writel(tsd, val, SPI_DMA_CTL); tsd->dma_control_reg = val; if (tsd->cur_direction & DATA_DIR_TX) cur_words = tegra_sflash_fill_tx_fifo_from_client_txbuf(tsd, t); else cur_words = tsd->curr_xfer_words; val |= SPI_DMA_BLK_COUNT(cur_words); tegra_sflash_writel(tsd, val, SPI_DMA_CTL); tsd->dma_control_reg = val; val |= SPI_DMA_EN; tegra_sflash_writel(tsd, val, SPI_DMA_CTL); return 0; } static int tegra_sflash_start_transfer_one(struct spi_device *spi, struct spi_transfer *t, bool is_first_of_msg, bool is_single_xfer) { struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master); u32 speed; u32 command; speed = t->speed_hz; if (speed != tsd->cur_speed) { clk_set_rate(tsd->clk, speed); tsd->cur_speed = speed; } tsd->cur_spi = spi; tsd->cur_pos = 0; tsd->cur_rx_pos = 0; tsd->cur_tx_pos = 0; tsd->curr_xfer = t; tegra_sflash_calculate_curr_xfer_param(spi, tsd, t); if (is_first_of_msg) { command = tsd->def_command_reg; command |= SPI_BIT_LENGTH(t->bits_per_word - 1); command |= SPI_CS_VAL_HIGH; command &= ~SPI_MODES; if (spi->mode & SPI_CPHA) command |= SPI_CK_SDA_FALLING; if (spi->mode & SPI_CPOL) command |= SPI_ACTIVE_SCLK_DRIVE_HIGH; else command |= SPI_ACTIVE_SCLK_DRIVE_LOW; command |= SPI_CS0_EN << spi->chip_select; } else { command = tsd->command_reg; command &= ~SPI_BIT_LENGTH(~0); command |= SPI_BIT_LENGTH(t->bits_per_word - 1); command &= ~(SPI_RX_EN | SPI_TX_EN); } tsd->cur_direction = 0; if (t->rx_buf) { command |= SPI_RX_EN; tsd->cur_direction |= DATA_DIR_RX; } if (t->tx_buf) { command |= SPI_TX_EN; tsd->cur_direction |= DATA_DIR_TX; } tegra_sflash_writel(tsd, command, SPI_COMMAND); tsd->command_reg = command; return tegra_sflash_start_cpu_based_transfer(tsd, t); } static int tegra_sflash_setup(struct spi_device *spi) { struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master); /* Set speed to the spi max fequency if spi device has not set */ spi->max_speed_hz = spi->max_speed_hz ? : tsd->spi_max_frequency; return 0; } static int tegra_sflash_transfer_one_message(struct spi_master *master, struct spi_message *msg) { bool is_first_msg = true; int single_xfer; struct tegra_sflash_data *tsd = spi_master_get_devdata(master); struct spi_transfer *xfer; struct spi_device *spi = msg->spi; int ret; msg->status = 0; msg->actual_length = 0; single_xfer = list_is_singular(&msg->transfers); list_for_each_entry(xfer, &msg->transfers, transfer_list) { reinit_completion(&tsd->xfer_completion); ret = tegra_sflash_start_transfer_one(spi, xfer, is_first_msg, single_xfer); if (ret < 0) { dev_err(tsd->dev, "spi can not start transfer, err %d\n", ret); goto exit; } is_first_msg = false; ret = wait_for_completion_timeout(&tsd->xfer_completion, SPI_DMA_TIMEOUT); if (WARN_ON(ret == 0)) { dev_err(tsd->dev, "spi trasfer timeout, err %d\n", ret); ret = -EIO; goto exit; } if (tsd->tx_status || tsd->rx_status) { dev_err(tsd->dev, "Error in Transfer\n"); ret = -EIO; goto exit; } msg->actual_length += xfer->len; if (xfer->cs_change && xfer->delay_usecs) { tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND); udelay(xfer->delay_usecs); } } ret = 0; exit: tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND); msg->status = ret; spi_finalize_current_message(master); return ret; } static irqreturn_t handle_cpu_based_xfer(struct tegra_sflash_data *tsd) { struct spi_transfer *t = tsd->curr_xfer; unsigned long flags; spin_lock_irqsave(&tsd->lock, flags); if (tsd->tx_status || tsd->rx_status || (tsd->status_reg & SPI_BSY)) { dev_err(tsd->dev, "CpuXfer ERROR bit set 0x%x\n", tsd->status_reg); dev_err(tsd->dev, "CpuXfer 0x%08x:0x%08x\n", tsd->command_reg, tsd->dma_control_reg); reset_control_assert(tsd->rst); udelay(2); reset_control_deassert(tsd->rst); complete(&tsd->xfer_completion); goto exit; } if (tsd->cur_direction & DATA_DIR_RX) tegra_sflash_read_rx_fifo_to_client_rxbuf(tsd, t); if (tsd->cur_direction & DATA_DIR_TX) tsd->cur_pos = tsd->cur_tx_pos; else tsd->cur_pos = tsd->cur_rx_pos; if (tsd->cur_pos == t->len) { complete(&tsd->xfer_completion); goto exit; } tegra_sflash_calculate_curr_xfer_param(tsd->cur_spi, tsd, t); tegra_sflash_start_cpu_based_transfer(tsd, t); exit: spin_unlock_irqrestore(&tsd->lock, flags); return IRQ_HANDLED; } static irqreturn_t tegra_sflash_isr(int irq, void *context_data) { struct tegra_sflash_data *tsd = context_data; tsd->status_reg = tegra_sflash_readl(tsd, SPI_STATUS); if (tsd->cur_direction & DATA_DIR_TX) tsd->tx_status = tsd->status_reg & SPI_TX_OVF; if (tsd->cur_direction & DATA_DIR_RX) tsd->rx_status = tsd->status_reg & SPI_RX_UNF; tegra_sflash_clear_status(tsd); return handle_cpu_based_xfer(tsd); } static void tegra_sflash_parse_dt(struct tegra_sflash_data *tsd) { struct device_node *np = tsd->dev->of_node; if (of_property_read_u32(np, "spi-max-frequency", &tsd->spi_max_frequency)) tsd->spi_max_frequency = 25000000; /* 25MHz */ } static struct of_device_id tegra_sflash_of_match[] = { { .compatible = "nvidia,tegra20-sflash", }, {} }; MODULE_DEVICE_TABLE(of, tegra_sflash_of_match); static int tegra_sflash_probe(struct platform_device *pdev) { struct spi_master *master; struct tegra_sflash_data *tsd; struct resource *r; int ret; const struct of_device_id *match; match = of_match_device(tegra_sflash_of_match, &pdev->dev); if (!match) { dev_err(&pdev->dev, "Error: No device match found\n"); return -ENODEV; } master = spi_alloc_master(&pdev->dev, sizeof(*tsd)); if (!master) { dev_err(&pdev->dev, "master allocation failed\n"); return -ENOMEM; } /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA; master->setup = tegra_sflash_setup; master->transfer_one_message = tegra_sflash_transfer_one_message; master->auto_runtime_pm = true; master->num_chipselect = MAX_CHIP_SELECT; master->bus_num = -1; platform_set_drvdata(pdev, master); tsd = spi_master_get_devdata(master); tsd->master = master; tsd->dev = &pdev->dev; spin_lock_init(&tsd->lock); tegra_sflash_parse_dt(tsd); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); tsd->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(tsd->base)) { ret = PTR_ERR(tsd->base); goto exit_free_master; } tsd->irq = platform_get_irq(pdev, 0); ret = request_irq(tsd->irq, tegra_sflash_isr, 0, dev_name(&pdev->dev), tsd); if (ret < 0) { dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", tsd->irq); goto exit_free_master; } tsd->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(tsd->clk)) { dev_err(&pdev->dev, "can not get clock\n"); ret = PTR_ERR(tsd->clk); goto exit_free_irq; } tsd->rst = devm_reset_control_get(&pdev->dev, "spi"); if (IS_ERR(tsd->rst)) { dev_err(&pdev->dev, "can not get reset\n"); ret = PTR_ERR(tsd->rst); goto exit_free_irq; } init_completion(&tsd->xfer_completion); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra_sflash_runtime_resume(&pdev->dev); if (ret) goto exit_pm_disable; } ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret); goto exit_pm_disable; } /* Reset controller */ reset_control_assert(tsd->rst); udelay(2); reset_control_deassert(tsd->rst); tsd->def_command_reg = SPI_M_S | SPI_CS_SW; tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND); pm_runtime_put(&pdev->dev); master->dev.of_node = pdev->dev.of_node; ret = devm_spi_register_master(&pdev->dev, master); if (ret < 0) { dev_err(&pdev->dev, "can not register to master err %d\n", ret); goto exit_pm_disable; } return ret; exit_pm_disable: pm_runtime_disable(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) tegra_sflash_runtime_suspend(&pdev->dev); exit_free_irq: free_irq(tsd->irq, tsd); exit_free_master: spi_master_put(master); return ret; } static int tegra_sflash_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct tegra_sflash_data *tsd = spi_master_get_devdata(master); free_irq(tsd->irq, tsd); pm_runtime_disable(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) tegra_sflash_runtime_suspend(&pdev->dev); return 0; } #ifdef CONFIG_PM_SLEEP static int tegra_sflash_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); return spi_master_suspend(master); } static int tegra_sflash_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct tegra_sflash_data *tsd = spi_master_get_devdata(master); int ret; ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "pm runtime failed, e = %d\n", ret); return ret; } tegra_sflash_writel(tsd, tsd->command_reg, SPI_COMMAND); pm_runtime_put(dev); return spi_master_resume(master); } #endif static int tegra_sflash_runtime_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct tegra_sflash_data *tsd = spi_master_get_devdata(master); /* Flush all write which are in PPSB queue by reading back */ tegra_sflash_readl(tsd, SPI_COMMAND); clk_disable_unprepare(tsd->clk); return 0; } static int tegra_sflash_runtime_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct tegra_sflash_data *tsd = spi_master_get_devdata(master); int ret; ret = clk_prepare_enable(tsd->clk); if (ret < 0) { dev_err(tsd->dev, "clk_prepare failed: %d\n", ret); return ret; } return 0; } static const struct dev_pm_ops slink_pm_ops = { SET_RUNTIME_PM_OPS(tegra_sflash_runtime_suspend, tegra_sflash_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(tegra_sflash_suspend, tegra_sflash_resume) }; static struct platform_driver tegra_sflash_driver = { .driver = { .name = "spi-tegra-sflash", .owner = THIS_MODULE, .pm = &slink_pm_ops, .of_match_table = tegra_sflash_of_match, }, .probe = tegra_sflash_probe, .remove = tegra_sflash_remove, }; module_platform_driver(tegra_sflash_driver); MODULE_ALIAS("platform:spi-tegra-sflash"); MODULE_DESCRIPTION("NVIDIA Tegra20 Serial Flash Controller Driver"); MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
nimengyu2/ti-arm9-linux-03.21.00.04
net/ipv6/af_inet6.c
38
30802
/* * PF_INET6 socket protocol family * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Adapted from linux/net/ipv4/af_inet.c * * Fixes: * piggy, Karl Knutson : Socket protocol table * Hideaki YOSHIFUJI : sin6_scope_id support * Arnaldo Melo : check proc_net_create return, cleanups * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/icmpv6.h> #include <linux/netfilter_ipv6.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/udp.h> #include <net/udplite.h> #include <net/tcp.h> #include <net/ipip.h> #include <net/protocol.h> #include <net/inet_common.h> #include <net/route.h> #include <net/transp_v6.h> #include <net/ip6_route.h> #include <net/addrconf.h> #ifdef CONFIG_IPV6_TUNNEL #include <net/ip6_tunnel.h> #endif #include <asm/uaccess.h> #include <asm/system.h> #include <linux/mroute6.h> MODULE_AUTHOR("Cast of dozens"); MODULE_DESCRIPTION("IPv6 protocol stack for Linux"); MODULE_LICENSE("GPL"); /* The inetsw6 table contains everything that inet6_create needs to * build a new socket. */ static struct list_head inetsw6[SOCK_MAX]; static DEFINE_SPINLOCK(inetsw6_lock); struct ipv6_params ipv6_defaults = { .disable_ipv6 = 0, .autoconf = 1, }; static int disable_ipv6_mod = 0; module_param_named(disable, disable_ipv6_mod, int, 0444); MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional"); module_param_named(disable_ipv6, ipv6_defaults.disable_ipv6, int, 0444); MODULE_PARM_DESC(disable_ipv6, "Disable IPv6 on all interfaces"); module_param_named(autoconf, ipv6_defaults.autoconf, int, 0444); MODULE_PARM_DESC(autoconf, "Enable IPv6 address autoconfiguration on all interfaces"); static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) { const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo); return (struct ipv6_pinfo *)(((u8 *)sk) + offset); } static int inet6_create(struct net *net, struct socket *sock, int protocol, int kern) { struct inet_sock *inet; struct ipv6_pinfo *np; struct sock *sk; struct inet_protosw *answer; struct proto *answer_prot; unsigned char answer_flags; char answer_no_check; int try_loading_module = 0; int err; if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM && !inet_ehash_secret) build_ehash_secret(); /* Look for the requested type/protocol pair. */ lookup_protocol: err = -ESOCKTNOSUPPORT; rcu_read_lock(); list_for_each_entry_rcu(answer, &inetsw6[sock->type], list) { err = 0; /* Check the non-wild match. */ if (protocol == answer->protocol) { if (protocol != IPPROTO_IP) break; } else { /* Check for the two wild cases. */ if (IPPROTO_IP == protocol) { protocol = answer->protocol; break; } if (IPPROTO_IP == answer->protocol) break; } err = -EPROTONOSUPPORT; } if (err) { if (try_loading_module < 2) { rcu_read_unlock(); /* * Be more specific, e.g. net-pf-10-proto-132-type-1 * (net-pf-PF_INET6-proto-IPPROTO_SCTP-type-SOCK_STREAM) */ if (++try_loading_module == 1) request_module("net-pf-%d-proto-%d-type-%d", PF_INET6, protocol, sock->type); /* * Fall back to generic, e.g. net-pf-10-proto-132 * (net-pf-PF_INET6-proto-IPPROTO_SCTP) */ else request_module("net-pf-%d-proto-%d", PF_INET6, protocol); goto lookup_protocol; } else goto out_rcu_unlock; } err = -EPERM; if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) goto out_rcu_unlock; sock->ops = answer->ops; answer_prot = answer->prot; answer_no_check = answer->no_check; answer_flags = answer->flags; rcu_read_unlock(); WARN_ON(answer_prot->slab == NULL); err = -ENOBUFS; sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot); if (sk == NULL) goto out; sock_init_data(sock, sk); err = 0; sk->sk_no_check = answer_no_check; if (INET_PROTOSW_REUSE & answer_flags) sk->sk_reuse = 1; inet = inet_sk(sk); inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; if (SOCK_RAW == sock->type) { inet->inet_num = protocol; if (IPPROTO_RAW == protocol) inet->hdrincl = 1; } sk->sk_destruct = inet_sock_destruct; sk->sk_family = PF_INET6; sk->sk_protocol = protocol; sk->sk_backlog_rcv = answer->prot->backlog_rcv; inet_sk(sk)->pinet6 = np = inet6_sk_generic(sk); np->hop_limit = -1; np->mcast_hops = IPV6_DEFAULT_MCASTHOPS; np->mc_loop = 1; np->pmtudisc = IPV6_PMTUDISC_WANT; np->ipv6only = net->ipv6.sysctl.bindv6only; /* Init the ipv4 part of the socket since we can have sockets * using v6 API for ipv4. */ inet->uc_ttl = -1; inet->mc_loop = 1; inet->mc_ttl = 1; inet->mc_index = 0; inet->mc_list = NULL; if (ipv4_config.no_pmtu_disc) inet->pmtudisc = IP_PMTUDISC_DONT; else inet->pmtudisc = IP_PMTUDISC_WANT; /* * Increment only the relevant sk_prot->socks debug field, this changes * the previous behaviour of incrementing both the equivalent to * answer->prot->socks (inet6_sock_nr) and inet_sock_nr. * * This allows better debug granularity as we'll know exactly how many * UDPv6, TCPv6, etc socks were allocated, not the sum of all IPv6 * transport protocol socks. -acme */ sk_refcnt_debug_inc(sk); if (inet->inet_num) { /* It assumes that any protocol which allows * the user to assign a number at socket * creation time automatically shares. */ inet->inet_sport = htons(inet->inet_num); sk->sk_prot->hash(sk); } if (sk->sk_prot->init) { err = sk->sk_prot->init(sk); if (err) { sk_common_release(sk); goto out; } } out: return err; out_rcu_unlock: rcu_read_unlock(); goto out; } /* bind for INET6 API */ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in6 *addr=(struct sockaddr_in6 *)uaddr; struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); __be32 v4addr = 0; unsigned short snum; int addr_type = 0; int err = 0; /* If the socket has its own bind function then use it. */ if (sk->sk_prot->bind) return sk->sk_prot->bind(sk, uaddr, addr_len); if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; addr_type = ipv6_addr_type(&addr->sin6_addr); if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM) return -EINVAL; snum = ntohs(addr->sin6_port); if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; lock_sock(sk); /* Check these errors (active socket, double bind). */ if (sk->sk_state != TCP_CLOSE || inet->inet_num) { err = -EINVAL; goto out; } /* Check if the address belongs to the host. */ if (addr_type == IPV6_ADDR_MAPPED) { int chk_addr_ret; /* Binding to v4-mapped address on a v6-only socket * makes no sense */ if (np->ipv6only) { err = -EINVAL; goto out; } /* Reproduce AF_INET checks to make the bindings consitant */ v4addr = addr->sin6_addr.s6_addr32[3]; chk_addr_ret = inet_addr_type(net, v4addr); if (!sysctl_ip_nonlocal_bind && !(inet->freebind || inet->transparent) && v4addr != htonl(INADDR_ANY) && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) { err = -EADDRNOTAVAIL; goto out; } } else { if (addr_type != IPV6_ADDR_ANY) { struct net_device *dev = NULL; rcu_read_lock(); if (addr_type & IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && addr->sin6_scope_id) { /* Override any existing binding, if another one * is supplied by user. */ sk->sk_bound_dev_if = addr->sin6_scope_id; } /* Binding to link-local address requires an interface */ if (!sk->sk_bound_dev_if) { err = -EINVAL; goto out_unlock; } dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); if (!dev) { err = -ENODEV; goto out_unlock; } } /* ipv4 addr of the socket is invalid. Only the * unspecified and mapped address have a v4 equivalent. */ v4addr = LOOPBACK4_IPV6; if (!(addr_type & IPV6_ADDR_MULTICAST)) { if (!inet->transparent && !ipv6_chk_addr(net, &addr->sin6_addr, dev, 0)) { err = -EADDRNOTAVAIL; goto out_unlock; } } rcu_read_unlock(); } } inet->inet_rcv_saddr = v4addr; inet->inet_saddr = v4addr; ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); if (!(addr_type & IPV6_ADDR_MULTICAST)) ipv6_addr_copy(&np->saddr, &addr->sin6_addr); /* Make sure we are allowed to bind here. */ if (sk->sk_prot->get_port(sk, snum)) { inet_reset_saddr(sk); err = -EADDRINUSE; goto out; } if (addr_type != IPV6_ADDR_ANY) { sk->sk_userlocks |= SOCK_BINDADDR_LOCK; if (addr_type != IPV6_ADDR_MAPPED) np->ipv6only = 1; } if (snum) sk->sk_userlocks |= SOCK_BINDPORT_LOCK; inet->inet_sport = htons(inet->inet_num); inet->inet_dport = 0; inet->inet_daddr = 0; out: release_sock(sk); return err; out_unlock: rcu_read_unlock(); goto out; } EXPORT_SYMBOL(inet6_bind); int inet6_release(struct socket *sock) { struct sock *sk = sock->sk; if (sk == NULL) return -EINVAL; /* Free mc lists */ ipv6_sock_mc_close(sk); /* Free ac lists */ ipv6_sock_ac_close(sk); return inet_release(sock); } EXPORT_SYMBOL(inet6_release); void inet6_destroy_sock(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff *skb; struct ipv6_txoptions *opt; /* Release rx options */ if ((skb = xchg(&np->pktoptions, NULL)) != NULL) kfree_skb(skb); if ((skb = xchg(&np->rxpmtu, NULL)) != NULL) kfree_skb(skb); /* Free flowlabels */ fl6_free_socklist(sk); /* Free tx options */ if ((opt = xchg(&np->opt, NULL)) != NULL) sock_kfree_s(sk, opt, opt->tot_len); } EXPORT_SYMBOL_GPL(inet6_destroy_sock); /* * This does both peername and sockname. */ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sockaddr_in6 *sin=(struct sockaddr_in6 *)uaddr; struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); sin->sin6_family = AF_INET6; sin->sin6_flowinfo = 0; sin->sin6_scope_id = 0; if (peer) { if (!inet->inet_dport) return -ENOTCONN; if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && peer == 1) return -ENOTCONN; sin->sin6_port = inet->inet_dport; ipv6_addr_copy(&sin->sin6_addr, &np->daddr); if (np->sndflow) sin->sin6_flowinfo = np->flow_label; } else { if (ipv6_addr_any(&np->rcv_saddr)) ipv6_addr_copy(&sin->sin6_addr, &np->saddr); else ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr); sin->sin6_port = inet->inet_sport; } if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) sin->sin6_scope_id = sk->sk_bound_dev_if; *uaddr_len = sizeof(*sin); return 0; } EXPORT_SYMBOL(inet6_getname); int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); switch(cmd) { case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *)arg); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *)arg); case SIOCADDRT: case SIOCDELRT: return ipv6_route_ioctl(net, cmd, (void __user *)arg); case SIOCSIFADDR: return addrconf_add_ifaddr(net, (void __user *) arg); case SIOCDIFADDR: return addrconf_del_ifaddr(net, (void __user *) arg); case SIOCSIFDSTADDR: return addrconf_set_dstaddr(net, (void __user *) arg); default: if (!sk->sk_prot->ioctl) return -ENOIOCTLCMD; return sk->sk_prot->ioctl(sk, cmd, arg); } /*NOTREACHED*/ return 0; } EXPORT_SYMBOL(inet6_ioctl); const struct proto_ops inet6_stream_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, .connect = inet_stream_connect, /* ok */ .socketpair = sock_no_socketpair, /* a do nothing */ .accept = inet_accept, /* ok */ .getname = inet6_getname, .poll = tcp_poll, /* ok */ .ioctl = inet6_ioctl, /* must change */ .listen = inet_listen, /* ok */ .shutdown = inet_shutdown, /* ok */ .setsockopt = sock_common_setsockopt, /* ok */ .getsockopt = sock_common_getsockopt, /* ok */ .sendmsg = inet_sendmsg, /* ok */ .recvmsg = inet_recvmsg, /* ok */ .mmap = sock_no_mmap, .sendpage = inet_sendpage, .splice_read = tcp_splice_read, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; const struct proto_ops inet6_dgram_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, .connect = inet_dgram_connect, /* ok */ .socketpair = sock_no_socketpair, /* a do nothing */ .accept = sock_no_accept, /* a do nothing */ .getname = inet6_getname, .poll = udp_poll, /* ok */ .ioctl = inet6_ioctl, /* must change */ .listen = sock_no_listen, /* ok */ .shutdown = inet_shutdown, /* ok */ .setsockopt = sock_common_setsockopt, /* ok */ .getsockopt = sock_common_getsockopt, /* ok */ .sendmsg = inet_sendmsg, /* ok */ .recvmsg = inet_recvmsg, /* ok */ .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; static const struct net_proto_family inet6_family_ops = { .family = PF_INET6, .create = inet6_create, .owner = THIS_MODULE, }; int inet6_register_protosw(struct inet_protosw *p) { struct list_head *lh; struct inet_protosw *answer; struct list_head *last_perm; int protocol = p->protocol; int ret; spin_lock_bh(&inetsw6_lock); ret = -EINVAL; if (p->type >= SOCK_MAX) goto out_illegal; /* If we are trying to override a permanent protocol, bail. */ answer = NULL; ret = -EPERM; last_perm = &inetsw6[p->type]; list_for_each(lh, &inetsw6[p->type]) { answer = list_entry(lh, struct inet_protosw, list); /* Check only the non-wild match. */ if (INET_PROTOSW_PERMANENT & answer->flags) { if (protocol == answer->protocol) break; last_perm = lh; } answer = NULL; } if (answer) goto out_permanent; /* Add the new entry after the last permanent entry if any, so that * the new entry does not override a permanent entry when matched with * a wild-card protocol. But it is allowed to override any existing * non-permanent entry. This means that when we remove this entry, the * system automatically returns to the old behavior. */ list_add_rcu(&p->list, last_perm); ret = 0; out: spin_unlock_bh(&inetsw6_lock); return ret; out_permanent: printk(KERN_ERR "Attempt to override permanent protocol %d.\n", protocol); goto out; out_illegal: printk(KERN_ERR "Ignoring attempt to register invalid socket type %d.\n", p->type); goto out; } EXPORT_SYMBOL(inet6_register_protosw); void inet6_unregister_protosw(struct inet_protosw *p) { if (INET_PROTOSW_PERMANENT & p->flags) { printk(KERN_ERR "Attempt to unregister permanent protocol %d.\n", p->protocol); } else { spin_lock_bh(&inetsw6_lock); list_del_rcu(&p->list); spin_unlock_bh(&inetsw6_lock); synchronize_net(); } } EXPORT_SYMBOL(inet6_unregister_protosw); int inet6_sk_rebuild_header(struct sock *sk) { int err; struct dst_entry *dst; struct ipv6_pinfo *np = inet6_sk(sk); dst = __sk_dst_check(sk, np->dst_cookie); if (dst == NULL) { struct inet_sock *inet = inet_sk(sk); struct in6_addr *final_p, final; struct flowi fl; memset(&fl, 0, sizeof(fl)); fl.proto = sk->sk_protocol; ipv6_addr_copy(&fl.fl6_dst, &np->daddr); ipv6_addr_copy(&fl.fl6_src, &np->saddr); fl.fl6_flowlabel = np->flow_label; fl.oif = sk->sk_bound_dev_if; fl.mark = sk->sk_mark; fl.fl_ip_dport = inet->inet_dport; fl.fl_ip_sport = inet->inet_sport; security_sk_classify_flow(sk, &fl); final_p = fl6_update_dst(&fl, np->opt, &final); err = ip6_dst_lookup(sk, &dst, &fl); if (err) { sk->sk_route_caps = 0; return err; } if (final_p) ipv6_addr_copy(&fl.fl6_dst, final_p); if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) { sk->sk_err_soft = -err; return err; } __ip6_dst_store(sk, dst, NULL, NULL); } return 0; } EXPORT_SYMBOL_GPL(inet6_sk_rebuild_header); int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet6_skb_parm *opt = IP6CB(skb); if (np->rxopt.all) { if ((opt->hop && (np->rxopt.bits.hopopts || np->rxopt.bits.ohopopts)) || ((IPV6_FLOWINFO_MASK & *(__be32 *)skb_network_header(skb)) && np->rxopt.bits.rxflow) || (opt->srcrt && (np->rxopt.bits.srcrt || np->rxopt.bits.osrcrt)) || ((opt->dst1 || opt->dst0) && (np->rxopt.bits.dstopts || np->rxopt.bits.odstopts))) return 1; } return 0; } EXPORT_SYMBOL_GPL(ipv6_opt_accepted); static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto) { const struct inet6_protocol *ops = NULL; for (;;) { struct ipv6_opt_hdr *opth; int len; if (proto != NEXTHDR_HOP) { ops = rcu_dereference(inet6_protos[proto]); if (unlikely(!ops)) break; if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) break; } if (unlikely(!pskb_may_pull(skb, 8))) break; opth = (void *)skb->data; len = ipv6_optlen(opth); if (unlikely(!pskb_may_pull(skb, len))) break; proto = opth->nexthdr; __skb_pull(skb, len); } return proto; } static int ipv6_gso_send_check(struct sk_buff *skb) { struct ipv6hdr *ipv6h; const struct inet6_protocol *ops; int err = -EINVAL; if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) goto out; ipv6h = ipv6_hdr(skb); __skb_pull(skb, sizeof(*ipv6h)); err = -EPROTONOSUPPORT; rcu_read_lock(); ops = rcu_dereference(inet6_protos[ ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]); if (likely(ops && ops->gso_send_check)) { skb_reset_transport_header(skb); err = ops->gso_send_check(skb); } rcu_read_unlock(); out: return err; } static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct ipv6hdr *ipv6h; const struct inet6_protocol *ops; int proto; struct frag_hdr *fptr; unsigned int unfrag_ip6hlen; u8 *prevhdr; int offset = 0; if (!(features & NETIF_F_V6_CSUM)) features &= ~NETIF_F_SG; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_TCPV6 | 0))) goto out; if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) goto out; ipv6h = ipv6_hdr(skb); __skb_pull(skb, sizeof(*ipv6h)); segs = ERR_PTR(-EPROTONOSUPPORT); proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); rcu_read_lock(); ops = rcu_dereference(inet6_protos[proto]); if (likely(ops && ops->gso_segment)) { skb_reset_transport_header(skb); segs = ops->gso_segment(skb, features); } rcu_read_unlock(); if (unlikely(IS_ERR(segs))) goto out; for (skb = segs; skb; skb = skb->next) { ipv6h = ipv6_hdr(skb); ipv6h->payload_len = htons(skb->len - skb->mac_len - sizeof(*ipv6h)); if (proto == IPPROTO_UDP) { unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); fptr->frag_off = htons(offset); if (skb->next != NULL) fptr->frag_off |= htons(IP6_MF); offset += (ntohs(ipv6h->payload_len) - sizeof(struct frag_hdr)); } } out: return segs; } struct ipv6_gro_cb { struct napi_gro_cb napi; int proto; }; #define IPV6_GRO_CB(skb) ((struct ipv6_gro_cb *)(skb)->cb) static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, struct sk_buff *skb) { const struct inet6_protocol *ops; struct sk_buff **pp = NULL; struct sk_buff *p; struct ipv6hdr *iph; unsigned int nlen; unsigned int hlen; unsigned int off; int flush = 1; int proto; __wsum csum; off = skb_gro_offset(skb); hlen = off + sizeof(*iph); iph = skb_gro_header_fast(skb, off); if (skb_gro_header_hard(skb, hlen)) { iph = skb_gro_header_slow(skb, hlen, off); if (unlikely(!iph)) goto out; } skb_gro_pull(skb, sizeof(*iph)); skb_set_transport_header(skb, skb_gro_offset(skb)); flush += ntohs(iph->payload_len) != skb_gro_len(skb); rcu_read_lock(); proto = iph->nexthdr; ops = rcu_dereference(inet6_protos[proto]); if (!ops || !ops->gro_receive) { __pskb_pull(skb, skb_gro_offset(skb)); proto = ipv6_gso_pull_exthdrs(skb, proto); skb_gro_pull(skb, -skb_transport_offset(skb)); skb_reset_transport_header(skb); __skb_push(skb, skb_gro_offset(skb)); if (!ops || !ops->gro_receive) goto out_unlock; iph = ipv6_hdr(skb); } IPV6_GRO_CB(skb)->proto = proto; flush--; nlen = skb_network_header_len(skb); for (p = *head; p; p = p->next) { struct ipv6hdr *iph2; if (!NAPI_GRO_CB(p)->same_flow) continue; iph2 = ipv6_hdr(p); /* All fields must match except length. */ if (nlen != skb_network_header_len(p) || memcmp(iph, iph2, offsetof(struct ipv6hdr, payload_len)) || memcmp(&iph->nexthdr, &iph2->nexthdr, nlen - offsetof(struct ipv6hdr, nexthdr))) { NAPI_GRO_CB(p)->same_flow = 0; continue; } NAPI_GRO_CB(p)->flush |= flush; } NAPI_GRO_CB(skb)->flush |= flush; csum = skb->csum; skb_postpull_rcsum(skb, iph, skb_network_header_len(skb)); pp = ops->gro_receive(head, skb); skb->csum = csum; out_unlock: rcu_read_unlock(); out: NAPI_GRO_CB(skb)->flush |= flush; return pp; } static int ipv6_gro_complete(struct sk_buff *skb) { const struct inet6_protocol *ops; struct ipv6hdr *iph = ipv6_hdr(skb); int err = -ENOSYS; iph->payload_len = htons(skb->len - skb_network_offset(skb) - sizeof(*iph)); rcu_read_lock(); ops = rcu_dereference(inet6_protos[IPV6_GRO_CB(skb)->proto]); if (WARN_ON(!ops || !ops->gro_complete)) goto out_unlock; err = ops->gro_complete(skb); out_unlock: rcu_read_unlock(); return err; } static struct packet_type ipv6_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_IPV6), .func = ipv6_rcv, .gso_send_check = ipv6_gso_send_check, .gso_segment = ipv6_gso_segment, .gro_receive = ipv6_gro_receive, .gro_complete = ipv6_gro_complete, }; static int __init ipv6_packet_init(void) { dev_add_pack(&ipv6_packet_type); return 0; } static void ipv6_packet_cleanup(void) { dev_remove_pack(&ipv6_packet_type); } static int __net_init ipv6_init_mibs(struct net *net) { if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6, sizeof(struct udp_mib), __alignof__(struct udp_mib)) < 0) return -ENOMEM; if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6, sizeof(struct udp_mib), __alignof__(struct udp_mib)) < 0) goto err_udplite_mib; if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics, sizeof(struct ipstats_mib), __alignof__(struct ipstats_mib)) < 0) goto err_ip_mib; if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics, sizeof(struct icmpv6_mib), __alignof__(struct icmpv6_mib)) < 0) goto err_icmp_mib; if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics, sizeof(struct icmpv6msg_mib), __alignof__(struct icmpv6msg_mib)) < 0) goto err_icmpmsg_mib; return 0; err_icmpmsg_mib: snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics); err_icmp_mib: snmp_mib_free((void __percpu **)net->mib.ipv6_statistics); err_ip_mib: snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6); err_udplite_mib: snmp_mib_free((void __percpu **)net->mib.udp_stats_in6); return -ENOMEM; } static void ipv6_cleanup_mibs(struct net *net) { snmp_mib_free((void __percpu **)net->mib.udp_stats_in6); snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6); snmp_mib_free((void __percpu **)net->mib.ipv6_statistics); snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics); snmp_mib_free((void __percpu **)net->mib.icmpv6msg_statistics); } static int __net_init inet6_net_init(struct net *net) { int err = 0; net->ipv6.sysctl.bindv6only = 0; net->ipv6.sysctl.icmpv6_time = 1*HZ; err = ipv6_init_mibs(net); if (err) return err; #ifdef CONFIG_PROC_FS err = udp6_proc_init(net); if (err) goto out; err = tcp6_proc_init(net); if (err) goto proc_tcp6_fail; err = ac6_proc_init(net); if (err) goto proc_ac6_fail; #endif return err; #ifdef CONFIG_PROC_FS proc_ac6_fail: tcp6_proc_exit(net); proc_tcp6_fail: udp6_proc_exit(net); out: ipv6_cleanup_mibs(net); return err; #endif } static void __net_exit inet6_net_exit(struct net *net) { #ifdef CONFIG_PROC_FS udp6_proc_exit(net); tcp6_proc_exit(net); ac6_proc_exit(net); #endif ipv6_cleanup_mibs(net); } static struct pernet_operations inet6_net_ops = { .init = inet6_net_init, .exit = inet6_net_exit, }; static int __init inet6_init(void) { struct sk_buff *dummy_skb; struct list_head *r; int err = 0; BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb)); /* Register the socket-side information for inet6_create. */ for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) INIT_LIST_HEAD(r); if (disable_ipv6_mod) { printk(KERN_INFO "IPv6: Loaded, but administratively disabled, " "reboot required to enable\n"); goto out; } err = proto_register(&tcpv6_prot, 1); if (err) goto out; err = proto_register(&udpv6_prot, 1); if (err) goto out_unregister_tcp_proto; err = proto_register(&udplitev6_prot, 1); if (err) goto out_unregister_udp_proto; err = proto_register(&rawv6_prot, 1); if (err) goto out_unregister_udplite_proto; /* We MUST register RAW sockets before we create the ICMP6, * IGMP6, or NDISC control sockets. */ err = rawv6_init(); if (err) goto out_unregister_raw_proto; /* Register the family here so that the init calls below will * be able to create sockets. (?? is this dangerous ??) */ err = sock_register(&inet6_family_ops); if (err) goto out_sock_register_fail; #ifdef CONFIG_SYSCTL err = ipv6_static_sysctl_register(); if (err) goto static_sysctl_fail; #endif /* * ipngwg API draft makes clear that the correct semantics * for TCP and UDP is to consider one TCP and UDP instance * in a host availiable by both INET and INET6 APIs and * able to communicate via both network protocols. */ err = register_pernet_subsys(&inet6_net_ops); if (err) goto register_pernet_fail; err = icmpv6_init(); if (err) goto icmp_fail; err = ip6_mr_init(); if (err) goto ipmr_fail; err = ndisc_init(); if (err) goto ndisc_fail; err = igmp6_init(); if (err) goto igmp_fail; err = ipv6_netfilter_init(); if (err) goto netfilter_fail; /* Create /proc/foo6 entries. */ #ifdef CONFIG_PROC_FS err = -ENOMEM; if (raw6_proc_init()) goto proc_raw6_fail; if (udplite6_proc_init()) goto proc_udplite6_fail; if (ipv6_misc_proc_init()) goto proc_misc6_fail; if (if6_proc_init()) goto proc_if6_fail; #endif err = ip6_route_init(); if (err) goto ip6_route_fail; err = ip6_flowlabel_init(); if (err) goto ip6_flowlabel_fail; err = addrconf_init(); if (err) goto addrconf_fail; /* Init v6 extension headers. */ err = ipv6_exthdrs_init(); if (err) goto ipv6_exthdrs_fail; err = ipv6_frag_init(); if (err) goto ipv6_frag_fail; /* Init v6 transport protocols. */ err = udpv6_init(); if (err) goto udpv6_fail; err = udplitev6_init(); if (err) goto udplitev6_fail; err = tcpv6_init(); if (err) goto tcpv6_fail; err = ipv6_packet_init(); if (err) goto ipv6_packet_fail; #ifdef CONFIG_SYSCTL err = ipv6_sysctl_register(); if (err) goto sysctl_fail; #endif out: return err; #ifdef CONFIG_SYSCTL sysctl_fail: ipv6_packet_cleanup(); #endif ipv6_packet_fail: tcpv6_exit(); tcpv6_fail: udplitev6_exit(); udplitev6_fail: udpv6_exit(); udpv6_fail: ipv6_frag_exit(); ipv6_frag_fail: ipv6_exthdrs_exit(); ipv6_exthdrs_fail: addrconf_cleanup(); addrconf_fail: ip6_flowlabel_cleanup(); ip6_flowlabel_fail: ip6_route_cleanup(); ip6_route_fail: #ifdef CONFIG_PROC_FS if6_proc_exit(); proc_if6_fail: ipv6_misc_proc_exit(); proc_misc6_fail: udplite6_proc_exit(); proc_udplite6_fail: raw6_proc_exit(); proc_raw6_fail: #endif ipv6_netfilter_fini(); netfilter_fail: igmp6_cleanup(); igmp_fail: ndisc_cleanup(); ndisc_fail: ip6_mr_cleanup(); ipmr_fail: icmpv6_cleanup(); icmp_fail: unregister_pernet_subsys(&inet6_net_ops); register_pernet_fail: #ifdef CONFIG_SYSCTL ipv6_static_sysctl_unregister(); static_sysctl_fail: #endif sock_unregister(PF_INET6); rtnl_unregister_all(PF_INET6); out_sock_register_fail: rawv6_exit(); out_unregister_raw_proto: proto_unregister(&rawv6_prot); out_unregister_udplite_proto: proto_unregister(&udplitev6_prot); out_unregister_udp_proto: proto_unregister(&udpv6_prot); out_unregister_tcp_proto: proto_unregister(&tcpv6_prot); goto out; } module_init(inet6_init); static void __exit inet6_exit(void) { if (disable_ipv6_mod) return; /* First of all disallow new sockets creation. */ sock_unregister(PF_INET6); /* Disallow any further netlink messages */ rtnl_unregister_all(PF_INET6); #ifdef CONFIG_SYSCTL ipv6_sysctl_unregister(); #endif udpv6_exit(); udplitev6_exit(); tcpv6_exit(); /* Cleanup code parts. */ ipv6_packet_cleanup(); ipv6_frag_exit(); ipv6_exthdrs_exit(); addrconf_cleanup(); ip6_flowlabel_cleanup(); ip6_route_cleanup(); #ifdef CONFIG_PROC_FS /* Cleanup code parts. */ if6_proc_exit(); ipv6_misc_proc_exit(); udplite6_proc_exit(); raw6_proc_exit(); #endif ipv6_netfilter_fini(); igmp6_cleanup(); ndisc_cleanup(); ip6_mr_cleanup(); icmpv6_cleanup(); rawv6_exit(); unregister_pernet_subsys(&inet6_net_ops); #ifdef CONFIG_SYSCTL ipv6_static_sysctl_unregister(); #endif proto_unregister(&rawv6_prot); proto_unregister(&udplitev6_prot); proto_unregister(&udpv6_prot); proto_unregister(&tcpv6_prot); rcu_barrier(); /* Wait for completion of call_rcu()'s */ } module_exit(inet6_exit); MODULE_ALIAS_NETPROTO(PF_INET6);
gpl-2.0
Evil-Green/Ptah-GT-I9300
drivers/misc/modem_if/modem_modemctl_device_esc6270.c
38
8604
/* /linux/drivers/misc/modem_if/modem_modemctl_device_esc6270.c * * Copyright (C) 2010 Google, Inc. * Copyright (C) 2010 Samsung Electronics. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/platform_device.h> #include <linux/platform_data/modem.h> #include "modem_prj.h" #include <linux/regulator/consumer.h> #include <plat/gpio-cfg.h> #if defined(CONFIG_LINK_DEVICE_DPRAM) #include "modem_link_device_dpram.h" #elif defined(CONFIG_LINK_DEVICE_PLD) #include "modem_link_device_pld.h" #endif #if defined(CONFIG_LINK_DEVICE_DPRAM) || defined(CONFIG_LINK_DEVICE_PLD) #include <linux/mfd/max77693.h> #define PIF_TIMEOUT (180 * HZ) #define DPRAM_INIT_TIMEOUT (30 * HZ) static int esc6270_on(struct modem_ctl *mc) { int ret; struct link_device *ld = get_current_link(mc->iod); pr_info("[MODEM_IF:ESC] <%s> start!!!\n", __func__); if (!mc->gpio_cp_reset) { pr_err("[MODEM_IF:ESC] no gpio data\n"); return -ENXIO; } if (mc->gpio_reset_req_n) gpio_set_value(mc->gpio_reset_req_n, 1); gpio_set_value(mc->gpio_cp_reset, 1); msleep(30); gpio_set_value(mc->gpio_cp_on, 1); msleep(500); gpio_set_value(mc->gpio_cp_on, 0); msleep(500); gpio_set_value(mc->gpio_pda_active, 1); mc->iod->modem_state_changed(mc->iod, STATE_BOOTING); ld->mode = LINK_MODE_BOOT; return 0; } static int esc6270_off(struct modem_ctl *mc) { pr_info("[MODEM_IF:ESC] esc6270_off()\n"); #if 1 if (!mc->gpio_cp_reset) { pr_err("[MODEM_IF:ESC] no gpio data\n"); return -ENXIO; } gpio_set_value(mc->gpio_cp_reset, 0); gpio_set_value(mc->gpio_cp_on, 0); #endif mc->iod->modem_state_changed(mc->iod, STATE_OFFLINE); return 0; } static int esc6270_reset(struct modem_ctl *mc) { int ret = 0; pr_debug("[MODEM_IF:ESC] esc6270_reset()\n"); ret = esc6270_off(mc); if (ret) return -ENXIO; msleep(100); ret = esc6270_on(mc); if (ret) return -ENXIO; return 0; } int esc6270_boot_on(struct modem_ctl *mc) { struct link_device *ld = get_current_link(mc->iod); pr_info("[MODEM_IF:ESC] <%s>\n", __func__); /* Need to init uart byt gpio_flm_uart_sel GPIO */ if (!mc->gpio_cp_reset || !mc->gpio_flm_uart_sel) { pr_err("[MODEM_IF:ESC] no gpio data\n"); return -ENXIO; } gpio_set_value(mc->gpio_flm_uart_sel, 1); pr_info(" - ESC_PHONE_ON : %d, ESC_RESET_N : %d\n", gpio_get_value(mc->gpio_cp_on), gpio_get_value(mc->gpio_cp_reset)); gpio_set_value(mc->gpio_cp_on, 0); gpio_direction_output(mc->gpio_cp_reset, 0); msleep(100); gpio_direction_output(mc->gpio_cp_on, 1); msleep(44); pr_info(" - ESC_PHONE_ON : %d, ESC_RESET_N : %d\n", gpio_get_value(mc->gpio_cp_on), gpio_get_value(mc->gpio_cp_reset)); gpio_direction_input(mc->gpio_cp_reset); msleep(600); gpio_direction_output(mc->gpio_cp_on, 0); msleep(20); pr_info(" - ESC_PHONE_ON : %d, ESC_RESET_N : %d\n", gpio_get_value(mc->gpio_cp_on), gpio_get_value(mc->gpio_cp_reset)); #if defined(CONFIG_LINK_DEVICE_PLD) gpio_direction_output(mc->gpio_fpga_cs_n, 1); #endif mc->iod->modem_state_changed(mc->iod, STATE_BOOTING); ld->mode = LINK_MODE_BOOT; return 0; } static int esc6270_boot_off(struct modem_ctl *mc) { pr_info("[MODEM_IF:ESC] <%s>\n", __func__); if (!mc->gpio_flm_uart_sel) { pr_err("[MODEM_IF:ESC] no gpio data\n"); return -ENXIO; } gpio_set_value(mc->gpio_flm_uart_sel, 0); mc->iod->modem_state_changed(mc->iod, STATE_OFFLINE); return 0; } static int esc6270_active_count; static irqreturn_t phone_active_irq_handler(int irq, void *arg) { struct modem_ctl *mc = (struct modem_ctl *)arg; int phone_reset = 0; int phone_active = 0; int phone_state = 0; int cp_dump_int = 0; if (!mc->gpio_cp_reset || !mc->gpio_phone_active) { /* || !mc->gpio_cp_dump_int) { */ pr_err("[MODEM_IF:ESC] no gpio data\n"); return IRQ_HANDLED; } phone_reset = gpio_get_value(mc->gpio_cp_reset); phone_active = gpio_get_value(mc->gpio_phone_active); cp_dump_int = gpio_get_value(mc->gpio_cp_dump_int); pr_info("[MODEM_IF:ESC] <%s> phone_reset=%d, phone_active=%d, cp_dump_int=%d\n", __func__, phone_reset, phone_active, cp_dump_int); if (phone_reset && phone_active) { phone_state = STATE_ONLINE; if (mc->iod && mc->iod->modem_state_changed) mc->iod->modem_state_changed(mc->iod, phone_state); } else if (phone_reset && !phone_active) { if (mc->phone_state == STATE_ONLINE) { if (cp_dump_int) phone_state = STATE_CRASH_EXIT; else phone_state = STATE_CRASH_RESET; if (mc->iod && mc->iod->modem_state_changed) mc->iod->modem_state_changed(mc->iod, phone_state); } } else { phone_state = STATE_OFFLINE; if (mc->iod && mc->iod->modem_state_changed) mc->iod->modem_state_changed(mc->iod, phone_state); } if (phone_active) irq_set_irq_type(mc->irq_phone_active, IRQ_TYPE_LEVEL_LOW); else irq_set_irq_type(mc->irq_phone_active, IRQ_TYPE_LEVEL_HIGH); pr_info("[MODEM_IF::ESC] <%s> phone_state = %d\n", __func__, phone_state); return IRQ_HANDLED; } #if defined(CONFIG_SIM_DETECT) static irqreturn_t sim_detect_irq_handler(int irq, void *_mc) { struct modem_ctl *mc = (struct modem_ctl *)_mc; pr_info("[MODEM_IF:ESC] <%s> gpio_sim_detect = %d\n", __func__, gpio_get_value(mc->gpio_sim_detect)); if (mc->iod && mc->iod->sim_state_changed) mc->iod->sim_state_changed(mc->iod, !gpio_get_value(mc->gpio_sim_detect)); return IRQ_HANDLED; } #endif static void esc6270_get_ops(struct modem_ctl *mc) { mc->ops.modem_on = esc6270_on; mc->ops.modem_off = esc6270_off; mc->ops.modem_reset = esc6270_reset; mc->ops.modem_boot_on = esc6270_boot_on; mc->ops.modem_boot_off = esc6270_boot_off; } int esc6270_init_modemctl_device(struct modem_ctl *mc, struct modem_data *pdata) { int ret = 0; struct platform_device *pdev; mc->gpio_cp_on = pdata->gpio_cp_on; mc->gpio_reset_req_n = pdata->gpio_reset_req_n; mc->gpio_cp_reset = pdata->gpio_cp_reset; mc->gpio_pda_active = pdata->gpio_pda_active; mc->gpio_phone_active = pdata->gpio_phone_active; mc->gpio_cp_dump_int = pdata->gpio_cp_dump_int; mc->gpio_flm_uart_sel = pdata->gpio_flm_uart_sel; mc->gpio_cp_warm_reset = pdata->gpio_cp_warm_reset; mc->gpio_sim_detect = pdata->gpio_sim_detect; #if defined(CONFIG_LINK_DEVICE_PLD) mc->gpio_fpga_cs_n = pdata->gpio_fpga1_cs_n; #endif gpio_set_value(mc->gpio_cp_reset, 0); gpio_set_value(mc->gpio_cp_on, 0); pdev = to_platform_device(mc->dev); mc->irq_phone_active = platform_get_irq_byname(pdev, "cp_active_irq"); pr_info("[MODEM_IF:ESC] <%s> PHONE_ACTIVE IRQ# = %d\n", __func__, mc->irq_phone_active); esc6270_get_ops(mc); if (mc->irq_phone_active) { ret = request_irq(mc->irq_phone_active, phone_active_irq_handler, IRQF_TRIGGER_HIGH, "esc_active", mc); if (ret) { pr_err("[MODEM_IF:ESC] <%s> failed to request_irq IRQ# %d (err=%d)\n", __func__, mc->irq_phone_active, ret); return ret; } ret = enable_irq_wake(mc->irq_phone_active); if (ret) { pr_err("[MODEM_IF:ESC] %s: failed to enable_irq_wake IRQ# %d (err=%d)\n", __func__, mc->irq_phone_active, ret); free_irq(mc->irq_phone_active, mc); return ret; } } #if defined(CONFIG_SIM_DETECT) mc->irq_sim_detect = platform_get_irq_byname(pdev, "sim_irq"); pr_info("[MODEM_IF:ESC] <%s> SIM_DECTCT IRQ# = %d\n", __func__, mc->irq_sim_detect); if (mc->irq_sim_detect) { ret = request_irq(mc->irq_sim_detect, sim_detect_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "esc_sim_detect", mc); if (ret) { mif_err("failed to request_irq: %d\n", ret); mc->sim_state.online = false; mc->sim_state.changed = false; return ret; } ret = enable_irq_wake(mc->irq_sim_detect); if (ret) { mif_err("failed to enable_irq_wake: %d\n", ret); free_irq(mc->irq_sim_detect, mc); mc->sim_state.online = false; mc->sim_state.changed = false; return ret; } /* initialize sim_state => insert: gpio=0, remove: gpio=1 */ mc->sim_state.online = !gpio_get_value(mc->gpio_sim_detect); } #endif return ret; } #endif
gpl-2.0
visi0nary/mediatek
mt6592/mediatek/platform/mt6582/kernel/drivers/ldvt/hdmitx/tmbslTDA9989_Edid.c
38
57098
/** * Copyright (C) 2009 NXP N.V., All Rights Reserved. * This source code and any compilation or derivative thereof is the proprietary * information of NXP N.V. and is confidential in nature. Under no circumstances * is this software to be exposed to or placed under an Open Source License of * any type without the expressed written permission of NXP N.V. * * \file tmbslTDA9989_edid.c * * \version $Revision: 2 $ * */ /*============================================================================*/ /* INCLUDE FILES */ /*============================================================================*/ #include "tmbslHdmiTx_types.h" #include "tmbslTDA9989_Functions.h" #include "tmbslTDA9989_local.h" #include "tmbslTDA9989_State_l.h" #include "tmbslTDA9989_Edid_l.h" #define TMFL_TDA19989 #define TMFL_NO_RTOS #define TMFL_LINUX_OS_KERNEL_DRIVER /*============================================================================*/ /* TYPES DECLARATIONS */ /*============================================================================*/ /*============================================================================*/ /* CONSTANTS DECLARATIONS */ /*============================================================================*/ #define EDID_NUMBER_MAX_DTD_BLK_1 6 /** EDID block 0 parse start point */ #define EDID_BLK0_BASE_DTD 0x36 #define EDID_BLK1_OFFSET_BASE_DTD 2 /** EDID block 0 extension block count */ #define EDID_BLK0_EXT_CNT 0x7E /** EDID extension block parse start point */ #define EDID_BLK_EXT_BASE 0x04 /** CEA extension block type */ #define EDID_CEA_EXTENSION 0x02 /** CEA Block Map */ #define EDID_BLOCK_MAP 0xF0 /** NB Max of descriptor DTD or monitor in block 0 */ #define EDID_NB_MAX_DESCRIP_BLK_IN_BLK_0 4 #define EDID_MONITOR_NAME_DESC_DATA_TYPE 252 #define EDID_MONITOR_RANGE_DESC_DATA_TYPE 253 /*============================================================================*/ /* DEFINES DECLARATIONS */ /*============================================================================*/ /*============================================================================*/ /* VARIABLES DECLARATIONS */ /*============================================================================*/ /*============================================================================*/ /* FUNCTION PROTOTYPES */ /*============================================================================*/ static tmErrorCode_t requestEdidBlock(tmHdmiTxobject_t *pDis); static tmErrorCode_t parseEdidBlock (tmHdmiTxobject_t *pDis, Int blockNumber); static Bool storeDtdBlock (tmHdmiTxobject_t *pDis, UInt8 blockPtr); static Bool storeMonitorDescriptor (tmHdmiTxobject_t *pDis, UInt8 blockPtr); /*============================================================================*/ /* tmbslTDA9989HwGetCapabilities */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989HwGetCapabilities ( tmUnitSelect_t txUnit, tmbslHdmiTxHwFeature_t deviceCapability, Bool *pFeatureSupported ) { tmHdmiTxobject_t *pDis; tmErrorCode_t err = TM_OK; /* Check unit parameter and point to its object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) RETIF_BADPARAM(pFeatureSupported == Null) *pFeatureSupported = False; switch (deviceCapability) { case HDMITX_FEATURE_HW_HDCP: if((pDis->uDeviceFeatures & E_MASKREG_P00_VERSION_not_h) == 0) { *pFeatureSupported = True; } break; case HDMITX_FEATURE_HW_SCALER: if((pDis->uDeviceFeatures & E_MASKREG_P00_VERSION_not_s) == 0) { *pFeatureSupported = True; } break; case HDMITX_FEATURE_HW_AUDIO_OBA: *pFeatureSupported = True; break; case HDMITX_FEATURE_HW_AUDIO_DST: *pFeatureSupported = False; break; case HDMITX_FEATURE_HW_AUDIO_HBR: *pFeatureSupported = False; break; case HDMITX_FEATURE_HW_HDMI_1_1: *pFeatureSupported = True; break; case HDMITX_FEATURE_HW_HDMI_1_2A: *pFeatureSupported = True; break; case HDMITX_FEATURE_HW_HDMI_1_3A: *pFeatureSupported = False; break; case HDMITX_FEATURE_HW_DEEP_COLOR_30: *pFeatureSupported = False; break; case HDMITX_FEATURE_HW_DEEP_COLOR_36: *pFeatureSupported = False; break; case HDMITX_FEATURE_HW_DEEP_COLOR_48: *pFeatureSupported = False; break; case HDMITX_FEATURE_HW_UPSAMPLER: *pFeatureSupported = True; break; case HDMITX_FEATURE_HW_DOWNSAMPLER: *pFeatureSupported = True; break; case HDMITX_FEATURE_HW_COLOR_CONVERSION: *pFeatureSupported = True; break; default: *pFeatureSupported = False; break; } return err; } /*============================================================================*/ /* tmbslTDA9989EdidGetAudioCapabilities */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989EdidGetAudioCapabilities ( tmUnitSelect_t txUnit, tmbslHdmiTxEdidSad_t *pEdidAFmts, UInt aFmtLength, UInt *pAFmtsAvail, UInt8 *pAudioFlags ) { tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ tmErrorCode_t err; /* Error code */ UInt i; /* Loop index */ /* Check unit parameter and point to TX unit object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) /* Check remaining parameter(s) */ RETIF_BADPARAM(pEdidAFmts == Null) RETIF_BADPARAM(aFmtLength < 1) RETIF_BADPARAM(pAFmtsAvail == Null) RETIF_BADPARAM(pAudioFlags == Null) if ((pDis->EdidStatus == HDMITX_EDID_READ) || (pDis->EdidStatus == HDMITX_EDID_ERROR_CHK)) { /* allow if edid are read or if there are a chk error on an other block than block 0 */ /* Copy the Device Instance Structure EdidAFmts descriptors to * pEdidAFmts until we run out or no more space in structure. */ if (pDis->EdidSadCnt > 0) { for (i = 0; (i < (UInt)pDis->EdidSadCnt) && (i < aFmtLength); i++) { pEdidAFmts[i].ModeChans = pDis->EdidAFmts[i].ModeChans; pEdidAFmts[i].Freqs = pDis->EdidAFmts[i].Freqs; pEdidAFmts[i].Byte3 = pDis->EdidAFmts[i].Byte3; } } else { /* No pEdidAFmts to copy so set a zero format to be safe */ pEdidAFmts[0].ModeChans = 0; pEdidAFmts[0].Freqs = 0; pEdidAFmts[0].Byte3 = 0; } /* Fill Audio Flags parameter */ *pAudioFlags = ((pDis->EdidCeaFlags & 0x40) << 1); /* Basic audio */ if (pDis->EdidSinkAi == True) { *pAudioFlags += 0x40; /* Mask in AI support */ } /* Fill number of SADs available parameter */ *pAFmtsAvail = pDis->EdidSadCnt; } else { /* Not allowed if EdidStatus value is not valid */ err = TMBSL_ERR_HDMI_RESOURCE_NOT_AVAILABLE; } return err; } /*============================================================================*/ /* tmbslTDA9989EdidGetBlockCount */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989EdidGetBlockCount ( tmUnitSelect_t txUnit, UInt8 *puEdidBlockCount ) { tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ tmErrorCode_t err; /* Error code */ /* Check unit parameter and point to TX unit object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) /* Check remaining parameter(s) */ RETIF_BADPARAM(puEdidBlockCount == Null) if ((pDis->EdidStatus == HDMITX_EDID_READ) || (pDis->EdidStatus == HDMITX_EDID_ERROR_CHK)) { /* allow if edid are read or if there are a chk error on an other block than block 0 */ *puEdidBlockCount = pDis->EdidBlockCnt; } else { /* Not allowed if EdidStatus value is not valid */ err = TMBSL_ERR_HDMI_RESOURCE_NOT_AVAILABLE; } return err; } /*============================================================================*/ /* tmbslTDA9989EdidGetStatus */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989EdidGetStatus ( tmUnitSelect_t txUnit, UInt8 *puEdidStatus ) { tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ tmErrorCode_t err; /* Error code */ /* Check unit parameter and point to TX unit object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) /* Check remaining parameter(s) */ RETIF_BADPARAM(puEdidStatus == Null) if (puEdidStatus) { *puEdidStatus = pDis->EdidStatus; } return err; } /*============================================================================*/ /* tmbslTDA9989EdidRequestBlockData */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989EdidRequestBlockData ( tmUnitSelect_t txUnit, UInt8 *pRawEdid, Int numBlocks, /* Only relevant if pRawEdid valid */ Int lenRawEdid /* Only relevant if pRawEdid valid */ ) { tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ tmErrorCode_t err; /* Error code */ UInt8 regval; /* Byte value write to register */ /* Check unit parameter and point to TX unit object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) #ifdef TMFL_TDA9989_PIXEL_CLOCK_ON_DDC if ( (pDis->vinFmt == HDMITX_VFMT_16_1920x1080p_60Hz) || (pDis->vinFmt == HDMITX_VFMT_31_1920x1080p_50Hz)) { err = setHwRegisterField(pDis, E_REG_P02_PLL_SERIAL_3_RW, E_MASKREG_P02_PLL_SERIAL_3_srl_ccir, 0x01); RETIF_REG_FAIL(err) } #endif /* TMFL_TDA9989_PIXEL_CLOCK_ON_DDC */ #ifdef TMFL_RGB_DDR_12BITS /* RAM on */ setHwRegisterField(pDis, E_REG_P12_TX4_RW, E_MASKREG_P12_TX4_pd_ram, 0); #endif /* enable edid read */ err = setHwRegister(pDis, E_REG_P00_INT_FLAGS_2_RW, E_MASKREG_P00_INT_FLAGS_2_edid_blk_rd); /* Check remaining parameter(s) * We do allow a null pRawEdid pointer, in which case buffer length is * irrelevant. If pRawEdid pointer is valid, there is no point in * continuing if insufficient space for at least one block. */ RETIF_BADPARAM((pRawEdid != Null) && (lenRawEdid < EDID_BLOCK_SIZE)) /* Sensible value of numBlocks? */ RETIF((pRawEdid != Null) && ((numBlocks < 1) || (numBlocks > 255)), TMBSL_ERR_HDMI_INCONSISTENT_PARAMS) /* Enough space for the data requested? */ RETIF((pRawEdid != Null) && (lenRawEdid < (numBlocks * EDID_BLOCK_SIZE)), TMBSL_ERR_HDMI_INCONSISTENT_PARAMS) /* Read the HPD pin via the hpd_in flag in the first interrupt status * register and return a TMBSL_ERR_HDMI_NULL_CONNECTION error if it is * not set. * We must use the flag in the Device Instance Structure to avoid * clearing pending interrupt flags. */ RETIF(pDis->hotPlugStatus != HDMITX_HOTPLUG_ACTIVE, TMBSL_ERR_HDMI_NULL_CONNECTION) if (pDis->EdidReadStarted == False) { /* Reset the EdidStatus in the Device Instance Structure */ pDis->EdidStatus = HDMITX_EDID_NOT_READ; pDis->EdidReadStarted = True; /* Reset stored parameters from EDID in the Device Instance Structure */ pDis->EdidSinkType = HDMITX_SINK_DVI; pDis->EdidSinkAi = False; pDis->EdidCeaFlags = 0; pDis->EdidCeaXVYCCFlags = 0; pDis->EdidSvdCnt = 0; pDis->EdidSadCnt = 0; pDis->EdidSourceAddress = 0; /* 0.0.0.0 */ pDis->NbDTDStored = 0; pDis->EdidFirstMonitorDescriptor.bDescRecord = False; pDis->EdidSecondMonitorDescriptor.bDescRecord = False; pDis->EdidOtherMonitorDescriptor.bDescRecord = False; pDis->EdidLatency.latency_available = False; pDis->EdidLatency.Ilatency_available = False; pDis->EdidExtraVsdbData.hdmiVideoPresent = False; pDis->EdidToApp.pRawEdid = pRawEdid; pDis->EdidToApp.numBlocks = numBlocks; /* Enable the T0 interrupt for detecting the Read_EDID failure */ regval = E_MASKREG_P00_INT_FLAGS_0_t0 ; err = setHwRegister(pDis, E_REG_P00_INT_FLAGS_0_RW, regval); RETIF(err != TM_OK, err); /* Launch the read of first EDID block into Device Instance workspace */ pDis->EdidBlockRequested = 0; err = requestEdidBlock(pDis); } else { /* Not allowed if read edid is on going */ err = TMBSL_ERR_HDMI_RESOURCE_NOT_AVAILABLE; } return err; } /*============================================================================*/ /* EdidBlockAvailable */ /*============================================================================*/ tmErrorCode_t EdidBlockAvailable (tmUnitSelect_t txUnit, Bool * pSendEDIDCallback) { tmErrorCode_t err; /* Error code */ UInt8 chksum; /* Checksum value */ UInt8 LoopIndex; /* Loop index */ UInt8 extBlockCnt; tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ err = TM_OK; /* Check remaining parameter(s) */ RETIF_BADPARAM(pSendEDIDCallback == Null) /* Check unit parameter and point to its object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) if (pDis->EdidReadStarted == True) { err = tmbslTDA9989HwGetRegisters(txUnit, kPageIndexToPage[E_PAGE_09], SPA2ADDR(E_REG_P09_EDID_DATA_0_R), pDis->EdidBlock, EDID_BLOCK_SIZE); RETIF(err != TM_OK, err) if(pSendEDIDCallback) { *pSendEDIDCallback = False; } if (pDis->EdidStatus == HDMITX_EDID_NOT_READ) { err = getHwRegisters(pDis, E_REG_P09_EDID_DATA_0_R, pDis->EdidBlock, EDID_BLOCK_SIZE); RETIF_REG_FAIL(err) /* Add up all the values of the EDID block bytes, including the * checksum byte */ chksum = 0; for (LoopIndex = 0; LoopIndex < EDID_BLOCK_SIZE; LoopIndex++) { chksum = chksum + pDis->EdidBlock[LoopIndex]; } /* IF the EDID block does not yield a checksum of zero */ if(chksum != 0) { if (pDis->EdidBlockRequested == 0) { /* THEN return a HDMITX_EDID_ERROR error.*/ pDis->EdidStatus = HDMITX_EDID_ERROR_CHK_BLOCK_0; } else { /* THEN return a HDMITX_EDID_ERROR_CHK error.*/ pDis->EdidStatus = HDMITX_EDID_ERROR_CHK; } } } if (pDis->EdidStatus == HDMITX_EDID_ERROR_CHK_BLOCK_0) { /* PR11 : On i2c error or bad checksum in block 0 */ /* allow driver to go in state CONNECTED */ /* On the other block, we also accept INVALID_CHECKSUM which means * there was a checksum error */ if(pSendEDIDCallback) { *pSendEDIDCallback = True; } setState(pDis, EV_GETBLOCKDATA); if (pDis->rxSenseStatus == HDMITX_RX_SENSE_ACTIVE) { setState(pDis, EV_SINKON); } pDis->EdidReadStarted = False; return err; } /* Check if block 0 */ if (pDis->EdidBlockRequested == 0) { /* Could check block 0 header (0x00,6 x 0xFF,0x00) here but not * certain to be future proof [CEA861C A.2.3] */ /* Read block count from penultimate byte of block and store in DIS */ extBlockCnt = pDis->EdidBlock[EDID_BLK0_EXT_CNT]; pDis->EdidBlockCnt = extBlockCnt + 1; /* Total = Block 0 + extensions */ } /* If pointer was supplied, copy block from DIS to buffer */ if (pDis->EdidToApp.pRawEdid != Null) { /* Check if we've copied as many as requested yet? */ if (pDis->EdidBlockRequested < pDis->EdidToApp.numBlocks) { lmemcpy(pDis->EdidToApp.pRawEdid + (pDis->EdidBlockRequested * EDID_BLOCK_SIZE), pDis->EdidBlock, EDID_BLOCK_SIZE); } } parseEdidBlock(pDis, pDis->EdidBlockRequested); /* If extension blocks are present, process them */ if ( (pDis->EdidBlockRequested + 1) < pDis->EdidBlockCnt) { pDis->EdidBlockRequested = pDis->EdidBlockRequested + 1; /* Launch an edid block read */ err = requestEdidBlock(pDis); } else { if (pDis->EdidStatus == HDMITX_EDID_NOT_READ) { pDis->EdidStatus = HDMITX_EDID_READ; #ifdef TMFL_RGB_DDR_12BITS /* RAM off */ setHwRegisterField(pDis, E_REG_P12_TX4_RW, E_MASKREG_P12_TX4_pd_ram, 1); #endif } if(pSendEDIDCallback) { *pSendEDIDCallback = True; } setState(pDis, EV_GETBLOCKDATA); if (pDis->rxSenseStatus == HDMITX_RX_SENSE_ACTIVE) { setState(pDis, EV_SINKON); } pDis->EdidReadStarted = False; } } else { /* function called in an invalid state */ err = TMBSL_ERR_HDMI_RESOURCE_NOT_AVAILABLE; } return err; } /*============================================================================*/ /* ClearEdidRequest */ /*============================================================================*/ tmErrorCode_t ClearEdidRequest (tmUnitSelect_t txUnit) { tmErrorCode_t err; /* Error code */ tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ err = TM_OK; /* Check unit parameter and point to its object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) /* Reset the EdidStatus in the Device Instance Structure */ pDis->EdidStatus = HDMITX_EDID_NOT_READ; pDis->EdidReadStarted = False; /* Reset stored parameters from EDID in the Device Instance Structure */ pDis->EdidSinkType = HDMITX_SINK_DVI; pDis->EdidSinkAi = False; pDis->EdidCeaFlags = 0; pDis->EdidCeaXVYCCFlags = 0; pDis->EdidSvdCnt = 0; pDis->EdidSadCnt = 0; pDis->EdidSourceAddress = 0; /* 0.0.0.0 */ pDis->NbDTDStored = 0; pDis->EdidFirstMonitorDescriptor.bDescRecord = False; pDis->EdidSecondMonitorDescriptor.bDescRecord = False; pDis->EdidOtherMonitorDescriptor.bDescRecord = False; pDis->EdidLatency.latency_available = False; pDis->EdidLatency.Ilatency_available = False; pDis->EdidExtraVsdbData.hdmiVideoPresent = False; /* Launch the read of first EDID block into Device Instance workspace */ pDis->EdidBlockRequested = 0; return err; } /*============================================================================*/ /* tmbslTDA9989EdidGetSinkType */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989EdidGetSinkType ( tmUnitSelect_t txUnit, tmbslHdmiTxSinkType_t *pSinkType ) { tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ tmErrorCode_t err; /* Error code */ /* Check unit parameter and point to TX unit object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) /* Check remaining parameter(s) */ RETIF_BADPARAM(pSinkType == Null) if ((pDis->EdidStatus == HDMITX_EDID_READ) || (pDis->EdidStatus == HDMITX_EDID_ERROR_CHK)) { /* allow if edid are read or if there are a chk error on an other block than block 0 */ *pSinkType = pDis->EdidSinkType; } else { /* Not allowed if EdidStatus value is not valid */ err = TMBSL_ERR_HDMI_RESOURCE_NOT_AVAILABLE; } return err; } /*============================================================================*/ /* tmbslTDA9989EdidGetSourceAddress */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989EdidGetSourceAddress ( tmUnitSelect_t txUnit, UInt16 *pSourceAddress ) { tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ tmErrorCode_t err; /* Error code */ /* Check unit parameter and point to TX unit object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) /* Check remaining parameter(s) */ RETIF_BADPARAM(pSourceAddress == Null) if ((pDis->EdidStatus == HDMITX_EDID_READ) || (pDis->EdidStatus == HDMITX_EDID_ERROR_CHK)) { /* allow if edid are read or if there are a chk error on an other block than block 0 */ *pSourceAddress = pDis->EdidSourceAddress; } else { /* Not allowed if EdidStatus value is not valid */ err = TMBSL_ERR_HDMI_RESOURCE_NOT_AVAILABLE; } return err; } /*============================================================================*/ /* tmbslTDA9989EdidGetDetailedTimingDescriptors */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989EdidGetDetailedTimingDescriptors ( tmUnitSelect_t txUnit, tmbslHdmiTxEdidDtd_t *pEdidDTD, UInt8 nb_size, UInt8 *pDTDAvail ) { tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ tmErrorCode_t err; /* Error code */ /* Check unit parameter and point to TX unit object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) /* Check remaining parameter(s) */ RETIF_BADPARAM(pEdidDTD == Null) RETIF_BADPARAM(pDTDAvail == Null) RETIF_BADPARAM(nb_size == 0) if ((pDis->EdidStatus == HDMITX_EDID_READ) || (pDis->EdidStatus == HDMITX_EDID_ERROR_CHK)) { /* allow if edid are read or if there are a chk error on an other block than block 0 */ if (nb_size > pDis->NbDTDStored) { *pDTDAvail = pDis->NbDTDStored; } else { *pDTDAvail = nb_size; } lmemcpy(pEdidDTD, pDis->EdidDTD, sizeof(tmbslHdmiTxEdidDtd_t) * (*pDTDAvail)); } else { /* Not allowed if EdidStatus value is not valid */ err = TMBSL_ERR_HDMI_RESOURCE_NOT_AVAILABLE; } return TM_OK; } /*============================================================================*/ /* tmbslTDA9989EdidGetMonitorDescriptors */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989EdidGetMonitorDescriptors ( tmUnitSelect_t txUnit, tmbslHdmiTxEdidFirstMD_t *pEdidFirstMD, tmbslHdmiTxEdidSecondMD_t *pEdidSecondMD, tmbslHdmiTxEdidOtherMD_t *pEdidOtherMD, UInt8 sizeOtherMD, UInt8 *pOtherMDAvail ) { tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ tmErrorCode_t err; /* Error code */ /* Check unit parameter and point to TX unit object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) /* Check remaining parameter(s) */ RETIF_BADPARAM(pEdidFirstMD == Null) RETIF_BADPARAM(pEdidSecondMD == Null) RETIF_BADPARAM(pEdidOtherMD == Null) DUMMY_ACCESS(pOtherMDAvail); DUMMY_ACCESS(sizeOtherMD); if ((pDis->EdidStatus == HDMITX_EDID_READ) || (pDis->EdidStatus == HDMITX_EDID_ERROR_CHK)) { *pOtherMDAvail = 1; lmemcpy(pEdidFirstMD, &(pDis->EdidFirstMonitorDescriptor), sizeof(tmbslHdmiTxEdidFirstMD_t)); lmemcpy(pEdidSecondMD, &(pDis->EdidSecondMonitorDescriptor), sizeof(tmbslHdmiTxEdidSecondMD_t)); lmemcpy(pEdidOtherMD, &(pDis->EdidOtherMonitorDescriptor), sizeof(tmbslHdmiTxEdidOtherMD_t)); } else { /* Not allowed if EdidStatus value is not valid */ *pOtherMDAvail = 0; err = TMBSL_ERR_HDMI_RESOURCE_NOT_AVAILABLE; } return TM_OK; } /*============================================================================*/ /* tmbslTDA9989EdidGetBasicDisplayParam */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989EdidGetBasicDisplayParam ( tmUnitSelect_t txUnit, tmbslHdmiTxEdidBDParam_t *pEdidBDParam ) { tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ tmErrorCode_t err; /* Error code */ /* Check unit parameter and point to TX unit object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) /* Check remaining parameter(s) */ RETIF_BADPARAM(pEdidBDParam == Null) if ((pDis->EdidStatus == HDMITX_EDID_READ) || (pDis->EdidStatus == HDMITX_EDID_ERROR_CHK)) { lmemcpy(pEdidBDParam, &(pDis->EDIDBasicDisplayParam), sizeof(tmbslHdmiTxEdidBDParam_t)); } else { /* Not allowed if EdidStatus value is not valid */ err = TMBSL_ERR_HDMI_RESOURCE_NOT_AVAILABLE; } return TM_OK; } /*============================================================================*/ /* tmbslTDA9989EdidGetVideoCapabilities */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989EdidGetVideoCapabilities ( tmUnitSelect_t txUnit, UInt8 *pEdidVFmts, UInt vFmtLength, UInt *pVFmtsAvail, UInt8 *pVidFlags ) { tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ tmErrorCode_t err; /* Error code */ UInt i; /* Loop index */ /* Check unit parameter and point to TX unit object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) /* Check remaining parameter(s) */ RETIF_BADPARAM(pEdidVFmts == Null) RETIF_BADPARAM(vFmtLength < 1) RETIF_BADPARAM(pVFmtsAvail == Null) RETIF_BADPARAM(pVidFlags == Null) if ((pDis->EdidStatus == HDMITX_EDID_READ) || (pDis->EdidStatus == HDMITX_EDID_ERROR_CHK)) { /* allow if edid are read or if there are a chk error on an other block than block 0 */ /* Copy the Device Instance Structure EdidVFmts descriptors to * pEdidVFmts until we run out or no more space in structure. */ if (pDis->EdidSvdCnt > 0) { for (i = 0; (i < (UInt)pDis->EdidSvdCnt) && (i < vFmtLength); i++) { pEdidVFmts[i] = pDis->EdidVFmts[i]; } } else { /* No pEdidVFmts to copy so set a zero format to be safe */ pEdidVFmts[0] = HDMITX_VFMT_NULL; } /* Fill Video Flags parameter */ *pVidFlags = ((pDis->EdidCeaFlags & 0x80) | /* Underscan */ ((pDis->EdidCeaFlags & 0x30) << 1) ); /* YUV444, YUV422 */ /* Add info regarding xvYCC support */ *pVidFlags = *pVidFlags | (pDis->EdidCeaXVYCCFlags & 0x03); /* Fill number of SVDs available parameter */ *pVFmtsAvail = pDis->EdidSvdCnt; } else { /* Not allowed if EdidStatus value is not valid */ err = TMBSL_ERR_HDMI_RESOURCE_NOT_AVAILABLE; } return err; } /*============================================================================*/ /* tmbslTDA9989EdidGetVideoPreferred */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989EdidGetVideoPreferred ( tmUnitSelect_t txUnit, tmbslHdmiTxEdidDtd_t *pEdidDTD ) { tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ tmErrorCode_t err; /* Error code */ /* Check unit parameter and point to TX unit object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) /* Check remaining parameter(s) */ RETIF_BADPARAM(pEdidDTD == Null) if ((pDis->EdidStatus == HDMITX_EDID_READ) || (pDis->EdidStatus == HDMITX_EDID_ERROR_CHK)) { /* allow if edid are read or if there are a chk error on an other block than block 0 */ /* Populate the Detailed Timing Descriptor structure pEdidDTD from * EdidDtd in the Device Instance Structure. */ lmemcpy(pEdidDTD, &pDis->EdidDTD, sizeof(tmbslHdmiTxEdidDtd_t)); } else { /* Not allowed if EdidStatus value is not valid */ err = TMBSL_ERR_HDMI_RESOURCE_NOT_AVAILABLE; } return err; } /*============================================================================*/ /* STATIC FUNCTION */ /*============================================================================*/ /*============================================================================*/ /* requestEdidBlock - reads an entire edid block */ /*============================================================================*/ static tmErrorCode_t requestEdidBlock ( tmHdmiTxobject_t *pDis /* Device instance strucure to use */ ) { tmErrorCode_t err; /* Error code */ UInt8 segptr; /* Segment ptr value */ UInt8 offset; /* Word offset value */ /* Check block number is valid [CEA861C A.2.1] */ RETIF_BADPARAM(pDis->EdidBlockRequested >= 255) err = setHwRegister(pDis, E_REG_P09_DDC_ADDR_RW, DDC_EDID_ADDRESS); RETIF_REG_FAIL(err) /* For even blocks we need an offset of 0, odd blocks we need 128 */ offset = (((UInt8)pDis->EdidBlockRequested & 1) == 1) ? 128 : 0; err = setHwRegister(pDis, E_REG_P09_DDC_OFFS_RW, offset); RETIF_REG_FAIL(err) err = setHwRegister(pDis, E_REG_P09_DDC_SEGM_ADDR_RW, DDC_SGMT_PTR_ADDRESS); RETIF_REG_FAIL(err) /* Calculate which segment of the EDID we need (2 blocks per segment) */ segptr = (UInt8)pDis->EdidBlockRequested / 2; err = setHwRegister(pDis, E_REG_P09_DDC_SEGM_RW, segptr); RETIF_REG_FAIL(err) /* Enable reading EDID */ err = setHwRegister(pDis, E_REG_P09_EDID_CTRL_RW, 0x1); RETIF_REG_FAIL(err) /* The flag to start the EDID reading must cleared by software*/ err = setHwRegister(pDis, E_REG_P09_EDID_CTRL_RW, 0x0); RETIF_REG_FAIL(err) return err; } /*============================================================================*/ /* parseEdidBlock */ /*============================================================================*/ static tmErrorCode_t parseEdidBlock ( tmHdmiTxobject_t *pDis, /* Device instance strucure holding block */ Int blockNumber /* Block number */ ) { UInt8 i; /* Loop index */ UInt8 blockPtr, endPtr; /* Parsing pointers */ UInt8 blockType, blockLength; Bool dtdFound; UInt8 NbBlkRead, offset3D=0; /* Check block number is valid [CEA861C A.2.1] */ RETIF_BADPARAM(blockNumber >= 255) NbBlkRead = 0; dtdFound = True; blockPtr = 0; if (blockNumber == 0) { pDis->EDIDBasicDisplayParam.uVideoInputDef = pDis->EdidBlock[0x14]; pDis->EDIDBasicDisplayParam.uMaxHorizontalSize = pDis->EdidBlock[0x15]; pDis->EDIDBasicDisplayParam.uMaxVerticalSize = pDis->EdidBlock[0x16]; pDis->EDIDBasicDisplayParam.uGamma = pDis->EdidBlock[0x17]; pDis->EDIDBasicDisplayParam.uFeatureSupport = pDis->EdidBlock[0x18]; /* Block 0 - contains DTDs but no video data block (SVDs) */ for (i = 0; (i < 2) && (dtdFound); i++) /* search 2 possible DTD blocks in block 0 */ { blockPtr = (UInt8)(EDID_BLK0_BASE_DTD + (i * EDID_DTD_BLK_SIZE)); if ((blockPtr + EDID_DTD_BLK_SIZE - 1) < EDID_BLOCK_SIZE) { dtdFound = storeDtdBlock(pDis, blockPtr); if (dtdFound) { NbBlkRead++; } } } dtdFound = True; /* Parse monitor descriptor */ for (i = NbBlkRead; (i < EDID_NB_MAX_DESCRIP_BLK_IN_BLK_0) && (dtdFound); i++) { blockPtr = (UInt8)(EDID_BLK0_BASE_DTD + (i * EDID_DTD_BLK_SIZE)); if ((blockPtr + EDID_DTD_BLK_SIZE - 1) < EDID_BLOCK_SIZE) { dtdFound = storeMonitorDescriptor(pDis, blockPtr); } } } else if (blockNumber >= 1) { switch (pDis->EdidBlock[0]) { /* CEA EXTENSION */ case EDID_CEA_EXTENSION: /* Read CEA flag bits here - lockout when read once??? */ pDis->EdidCeaFlags = pDis->EdidBlock[3]; blockPtr = EDID_BLK_EXT_BASE; /* data block start always fixed */ endPtr = pDis->EdidBlock[2]; /* byte after end of data blocks */ if (endPtr >= (EDID_BLK_EXT_BASE + 2) && (endPtr <= EDID_BLOCK_SIZE)) /* Only try reading if data blocks take up 2 bytes or more, since * a video data block must be at least 2 bytes */ { while (blockPtr < endPtr) { blockType = (UInt8)((pDis->EdidBlock[blockPtr] & 0xE0) >> 5); blockLength = (pDis->EdidBlock[blockPtr] & 0x1F); switch((Int)blockType) { case E_CEA_VIDEO_BLOCK: /* We have a video data block */ for (i = 1; i <= blockLength; i++) { if ((blockPtr + i) < (EDID_BLOCK_SIZE)) { /* If space, store non-zero SVDs */ if ((pDis->EdidBlock[blockPtr + i] != 0) && (pDis->EdidSvdCnt < HDMI_TX_SVD_MAX_CNT)) { pDis->EdidVFmts[pDis->EdidSvdCnt] = pDis->EdidBlock[blockPtr + i]; pDis->EdidSvdCnt++; } } else { /* do nothing */ } } break; case E_CEA_AUDIO_BLOCK: /* We have an audio data block */ for (i = 1; (i + 2) <= blockLength; i += 3) { /* Must loop in steps of 3 (SAD size) */ /* If space, store non-zero SADs */ if ((blockPtr) < (EDID_BLOCK_SIZE -(i +2))) { if (((pDis->EdidBlock[blockPtr + i] & 0x78) != 0) && (pDis->EdidSadCnt < HDMI_TX_SAD_MAX_CNT)) { pDis->EdidAFmts[pDis->EdidSadCnt].ModeChans = pDis->EdidBlock[blockPtr + i]; pDis->EdidAFmts[pDis->EdidSadCnt].Freqs = pDis->EdidBlock[blockPtr + i + 1]; pDis->EdidAFmts[pDis->EdidSadCnt].Byte3 = pDis->EdidBlock[blockPtr + i + 2]; pDis->EdidSadCnt++; } } else { /* do nothing */ } } break; case E_CEA_VSDB: /* We have a VSDB */ /* 5 bytes expected, but this is EDID land so double check*/ if (blockLength >= 5) { if ((blockPtr) < (EDID_BLOCK_SIZE - 5)) { if ((pDis->EdidBlock[blockPtr + 1] == 0x03) && (pDis->EdidBlock[blockPtr + 2] == 0x0C) && (pDis->EdidBlock[blockPtr + 3] == 0x00)) { pDis->EdidSinkType = HDMITX_SINK_HDMI; if ((blockPtr) < (EDID_BLOCK_SIZE - 5)) { pDis->EdidSourceAddress = ((UInt16)pDis->EdidBlock[blockPtr + 4] << 8) + pDis->EdidBlock[blockPtr + 5]; } else { /* do nothing */ } } else { pDis->EdidSinkType = HDMITX_SINK_DVI; } } else { /* do nothing */ } } if (blockLength >= 6) /* Space for byte with AI flag */ { /* Mask AI bit */ if ((blockPtr ) < (EDID_BLOCK_SIZE - 6)) { if((pDis->EdidBlock[blockPtr + 6] & 0x80) == 0x80) { pDis->EdidSinkAi = True; } } else { /* do nothing */ } } /* Read Max_TMDS_Clock */ if (blockLength >= 7) pDis->EdidExtraVsdbData.maxTmdsClock = pDis->EdidBlock[blockPtr + 7]; else pDis->EdidExtraVsdbData.maxTmdsClock = 0; /* latency, HDMI Video present and content type fields */ if (blockLength >= 8) { if ((blockPtr) < (EDID_BLOCK_SIZE - 10)) { /* Read CNC0~3 */ pDis->EdidExtraVsdbData.cnc0 = pDis->EdidBlock[blockPtr + 8] & 0x01; /* 1=True, 0=False */ pDis->EdidExtraVsdbData.cnc1 = (pDis->EdidBlock[blockPtr + 8] & 0x02) >> 1; pDis->EdidExtraVsdbData.cnc2 = (pDis->EdidBlock[blockPtr + 8] & 0x04) >> 2; pDis->EdidExtraVsdbData.cnc3 = (pDis->EdidBlock[blockPtr + 8] & 0x08) >> 3; if( (pDis->EdidBlock[blockPtr + 8] & 0xC0) == 0xC0 ) { /* Read video_latency, audio_latency, I_video_latency, I_audio_latency */ if ((blockPtr) < (EDID_BLOCK_SIZE - 12)) { pDis->EdidLatency.Edidvideo_latency = pDis->EdidBlock[blockPtr + 9]; pDis->EdidLatency.Edidaudio_latency = pDis->EdidBlock[blockPtr + 10]; pDis->EdidLatency.EdidIvideo_latency = pDis->EdidBlock[blockPtr + 11]; pDis->EdidLatency.EdidIaudio_latency = pDis->EdidBlock[blockPtr + 12]; pDis->EdidLatency.latency_available = True; pDis->EdidLatency.Ilatency_available = True; offset3D = 13; /* offset to the '3D_present' field */ } else { /* do nothing */ } } else if ((pDis->EdidBlock[blockPtr + 8] & 0x80) == 0x80) { /* Read video_latency, audio_latency */ pDis->EdidLatency.Edidvideo_latency = pDis->EdidBlock[blockPtr + 9]; pDis->EdidLatency.Edidaudio_latency = pDis->EdidBlock[blockPtr + 10]; pDis->EdidLatency.latency_available = True; offset3D = 11; } else { pDis->EdidLatency.latency_available = False; pDis->EdidLatency.Ilatency_available = False; offset3D = 9; } /* Read HDMI_Video_present */ pDis->EdidExtraVsdbData.hdmiVideoPresent = (pDis->EdidBlock[blockPtr + 8] & 0x20) >> 5; } else { /* do nothing */ } } else { pDis->EdidLatency.latency_available = False; pDis->EdidLatency.Ilatency_available = False; pDis->EdidExtraVsdbData.hdmiVideoPresent = False; pDis->EdidExtraVsdbData.cnc0 = False; pDis->EdidExtraVsdbData.cnc1 = False; pDis->EdidExtraVsdbData.cnc2 = False; pDis->EdidExtraVsdbData.cnc3 = False; } /* 3D data fields according to HDMI 1.4a standard */ if (pDis->EdidExtraVsdbData.hdmiVideoPresent) { /* read 3D_present */ pDis->EdidExtraVsdbData.h3DPresent = (pDis->EdidBlock[blockPtr + offset3D] & 0x80) >> 7; /* read 3D_Multi_present */ pDis->EdidExtraVsdbData.h3DMultiPresent = (pDis->EdidBlock[blockPtr + offset3D] & 0x60) >> 5; /* read image_Size */ pDis->EdidExtraVsdbData.imageSize = (pDis->EdidBlock[blockPtr + offset3D] & 0x18) >> 3; /* read HDMI_3D_LEN and HDMI_XX_LEN */ offset3D += 1; pDis->EdidExtraVsdbData.hdmi3DLen = pDis->EdidBlock[blockPtr + offset3D] & 0x1F; pDis->EdidExtraVsdbData.hdmiVicLen = (pDis->EdidBlock[blockPtr + offset3D] & 0xE0) >> 5; if((pDis->EdidExtraVsdbData.hdmi3DLen + pDis->EdidExtraVsdbData.hdmiVicLen) > 0) { /* copy the rest of the bytes*/ lmemcpy(pDis->EdidExtraVsdbData.ext3DData, &(pDis->EdidBlock[blockPtr + offset3D + 1]), blockLength-offset3D); } } else { pDis->EdidExtraVsdbData.h3DPresent = False; pDis->EdidExtraVsdbData.h3DMultiPresent = 0; pDis->EdidExtraVsdbData.imageSize = 0; pDis->EdidExtraVsdbData.hdmi3DLen = 0; pDis->EdidExtraVsdbData.hdmiVicLen = 0; } break; case E_CEA_EXTENDED: /* Use extended Tag */ /* we need to read the extended tag code */ if ((blockPtr ) < (EDID_BLOCK_SIZE -2)) { switch ( pDis->EdidBlock[blockPtr + 1]) { case EXT_CEA_COLORIMETRY_DB: /* look at xvYCC709 and xvYCC601 support */ pDis->EdidCeaXVYCCFlags = pDis->EdidBlock[blockPtr + 2]; break; } } else { /* do nothing */ } break; /* E_CEA_EXTENDED */ default: break; } blockPtr += (blockLength + 1); /* Point to next block */ } } dtdFound = True; for (i = 0; (i < EDID_NUMBER_MAX_DTD_BLK_1) && (dtdFound); i++) /* search possible DTD blocks in block 1 */ { blockPtr = ((UInt8)pDis->EdidBlock[EDID_BLK1_OFFSET_BASE_DTD]) + ((UInt8)(i * EDID_DTD_BLK_SIZE)); if ((blockPtr + EDID_DTD_BLK_SIZE - 1) < EDID_BLOCK_SIZE) { dtdFound = storeDtdBlock(pDis, blockPtr); } } break; case EDID_BLOCK_MAP: /* BLOCK MAP */ if (pDis->EdidBlockCnt > 1) { if ((pDis->EdidBlockCnt - 1) < EDID_BLOCK_SIZE) { if (pDis->EdidBlock[pDis->EdidBlockCnt - 1] == EDID_CEA_EXTENSION) { /* Some devices have been incorrectly designed so that the block map is not counted in the */ /* extension count. Design of compliant devices should take compatibility with those non-compliant */ /* devices into consideration. */ pDis->EdidBlockCnt = pDis->EdidBlockCnt + 1; } } else { /* do nothing */ } } break; } } return TM_OK; } /*============================================================================*/ /* storeDtdBlock */ /*============================================================================*/ static Bool storeDtdBlock ( tmHdmiTxobject_t *pDis, /* Device instance strucure holding block */ UInt8 blockPtr ) { Bool dtdFound = False; if (blockPtr >= (EDID_BLOCK_SIZE-17)) { /* do nothing */ return dtdFound; } /* First, select blocks that are DTDs [CEA861C A.2.10] */ if (((pDis->EdidBlock[blockPtr+0] != 0) || (pDis->EdidBlock[blockPtr+1] != 0) || (pDis->EdidBlock[blockPtr+2] != 0) || (pDis->EdidBlock[blockPtr+4] != 0)) && (pDis->NbDTDStored < NUMBER_DTD_STORED)) { /* Store the first DTD we find, others will be skipped */ pDis->EdidDTD[pDis->NbDTDStored].uPixelClock = ((UInt16)pDis->EdidBlock[blockPtr+1] << 8) | (UInt16)pDis->EdidBlock[blockPtr+0]; pDis->EdidDTD[pDis->NbDTDStored].uHActivePixels = (((UInt16)pDis->EdidBlock[blockPtr+4] & 0x00F0) << 4) | (UInt16)pDis->EdidBlock[blockPtr+2]; pDis->EdidDTD[pDis->NbDTDStored].uHBlankPixels = (((UInt16)pDis->EdidBlock[blockPtr+4] & 0x000F) << 8) | (UInt16)pDis->EdidBlock[blockPtr+3]; pDis->EdidDTD[pDis->NbDTDStored].uVActiveLines = (((UInt16)pDis->EdidBlock[blockPtr+7] & 0x00F0) << 4) | (UInt16)pDis->EdidBlock[blockPtr+5]; pDis->EdidDTD[pDis->NbDTDStored].uVBlankLines = (((UInt16)pDis->EdidBlock[blockPtr+7] & 0x000F) << 8) | (UInt16)pDis->EdidBlock[blockPtr+6]; pDis->EdidDTD[pDis->NbDTDStored].uHSyncOffset = (((UInt16)pDis->EdidBlock[blockPtr+11] & 0x00C0) << 2) | (UInt16)pDis->EdidBlock[blockPtr+8]; pDis->EdidDTD[pDis->NbDTDStored].uHSyncWidth = (((UInt16)pDis->EdidBlock[blockPtr+11] & 0x0030) << 4) | (UInt16)pDis->EdidBlock[blockPtr+9]; pDis->EdidDTD[pDis->NbDTDStored].uVSyncOffset = (((UInt16)pDis->EdidBlock[blockPtr+11] & 0x000C) << 2) | (((UInt16)pDis->EdidBlock[blockPtr+10] & 0x00F0) >> 4); pDis->EdidDTD[pDis->NbDTDStored].uVSyncWidth = (((UInt16)pDis->EdidBlock[blockPtr+11] & 0x0003) << 4) | ((UInt16)pDis->EdidBlock[blockPtr+10] & 0x000F); pDis->EdidDTD[pDis->NbDTDStored].uHImageSize = (((UInt16)pDis->EdidBlock[blockPtr+14] & 0x00F0) << 4) | (UInt16)pDis->EdidBlock[blockPtr+12]; pDis->EdidDTD[pDis->NbDTDStored].uVImageSize = (((UInt16)pDis->EdidBlock[blockPtr+14] & 0x000F) << 8) | (UInt16)pDis->EdidBlock[blockPtr+13]; pDis->EdidDTD[pDis->NbDTDStored].uHBorderPixels = (UInt16)pDis->EdidBlock[blockPtr+15]; pDis->EdidDTD[pDis->NbDTDStored].uVBorderPixels = (UInt16)pDis->EdidBlock[blockPtr+16]; pDis->EdidDTD[pDis->NbDTDStored].Flags = pDis->EdidBlock[blockPtr+17]; pDis->NbDTDStored++; dtdFound = True; /* Stop any more DTDs being parsed */ } return (dtdFound); } /*============================================================================*/ /* storeMonitorBlock */ /*============================================================================*/ static Bool storeMonitorDescriptor ( tmHdmiTxobject_t *pDis, /* Device instance strucure holding block */ UInt8 blockPtr ) { Bool dtdFound = False; if (blockPtr >= (EDID_BLOCK_SIZE-5)) { /* do nothing */ return dtdFound; } /* First, select blocks that are DTDs [CEA861C A.2.10] */ if ((pDis->EdidBlock[blockPtr+0] == 0) && (pDis->EdidBlock[blockPtr+1] == 0) && (pDis->EdidBlock[blockPtr+2] == 0) ) { if (pDis->EdidBlock[blockPtr+3] == EDID_MONITOR_NAME_DESC_DATA_TYPE) { if (pDis->EdidFirstMonitorDescriptor.bDescRecord == False) { pDis->EdidFirstMonitorDescriptor.bDescRecord = True; lmemcpy(&(pDis->EdidFirstMonitorDescriptor.uMonitorName) , &(pDis->EdidBlock[blockPtr+5]), EDID_MONITOR_DESCRIPTOR_SIZE); dtdFound = True; } else if ((pDis->EdidOtherMonitorDescriptor.bDescRecord == False)) { pDis->EdidOtherMonitorDescriptor.bDescRecord = True; lmemcpy(&(pDis->EdidOtherMonitorDescriptor.uOtherDescriptor) , &(pDis->EdidBlock[blockPtr+5]), EDID_MONITOR_DESCRIPTOR_SIZE); dtdFound = True; } } else if (pDis->EdidBlock[blockPtr+3] == EDID_MONITOR_RANGE_DESC_DATA_TYPE) { if (pDis->EdidSecondMonitorDescriptor.bDescRecord == False) { if (blockPtr < (EDID_BLOCK_SIZE-9)) { pDis->EdidSecondMonitorDescriptor.bDescRecord = True; pDis->EdidSecondMonitorDescriptor.uMinVerticalRate = pDis->EdidBlock[blockPtr+5]; pDis->EdidSecondMonitorDescriptor.uMaxVerticalRate = pDis->EdidBlock[blockPtr+6]; pDis->EdidSecondMonitorDescriptor.uMinHorizontalRate = pDis->EdidBlock[blockPtr+7]; pDis->EdidSecondMonitorDescriptor.uMaxHorizontalRate = pDis->EdidBlock[blockPtr+8]; pDis->EdidSecondMonitorDescriptor.uMaxSupportedPixelClk = pDis->EdidBlock[blockPtr+9]; dtdFound = True; } else { /* do nothing */ } } } } return (dtdFound); } /*============================================================================*/ /* tmbslTDA9989EdidGetLatencyInfo */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989EdidGetLatencyInfo ( tmUnitSelect_t txUnit, tmbslHdmiTxEdidLatency_t * pEdidLatency ) { tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ tmErrorCode_t err; /* Error code */ /* Check unit parameter and point to TX unit object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) /* Check remaining parameter(s) */ RETIF_BADPARAM(pEdidLatency == Null) if ((pDis->EdidStatus == HDMITX_EDID_READ) || (pDis->EdidStatus == HDMITX_EDID_ERROR_CHK)) { /* allow if edid are read or if there are a chk error on an other block than block 0 */ *pEdidLatency = pDis->EdidLatency; } else { /* Not allowed if EdidStatus value is not valid */ err = TMBSL_ERR_HDMI_RESOURCE_NOT_AVAILABLE; } return err; } /*============================================================================*/ /* tmbslTDA9989EdidGetExtraVsdbData */ /*============================================================================*/ tmErrorCode_t tmbslTDA9989EdidGetExtraVsdbData ( tmUnitSelect_t txUnit, tmbslHdmiTxEdidExtraVsdbData_t **pExtraVsdbData ) { tmHdmiTxobject_t *pDis; /* Pointer to Device Instance Structure */ tmErrorCode_t err; /* Error code */ /* Check unit parameter and point to TX unit object */ err = checkUnitSetDis(txUnit, &pDis); RETIF(err != TM_OK, err) /* Check remaining parameter(s) */ RETIF_BADPARAM(pExtraVsdbData == Null) if ((pDis->EdidStatus == HDMITX_EDID_READ) || (pDis->EdidStatus == HDMITX_EDID_ERROR_CHK)) { /* allow if edid are read or if there are a chk error on an other block than block 0 */ *pExtraVsdbData = &(pDis->EdidExtraVsdbData); } else { /* Not allowed if EdidStatus value is not valid */ err = TMBSL_ERR_HDMI_RESOURCE_NOT_AVAILABLE; } return err; } /*============================================================================*/ /* END OF FILE */ /*============================================================================*/
gpl-2.0
chris4824/kernel_samsung_jf
drivers/motor/tspdrv.c
38
17976
/* ** ========================================================================= ** File: ** tspdrv.c ** ** Description: ** TouchSense Kernel Module main entry-point. ** ** Portions Copyright (c) 2008-2010 Immersion Corporation. All Rights Reserved. ** ** This file contains Original Code and/or Modifications of Original Code ** as defined in and that are subject to the GNU Public License v2 - ** (the 'License'). You may not use this file except in compliance with the ** License. You should have received a copy of the GNU General Public License ** along with this program; if not, write to the Free Software Foundation, Inc., ** 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or contact ** TouchSenseSales@immersion.com. ** ** The Original Code and all software distributed under the License are ** distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER ** EXPRESS OR IMPLIED, AND IMMERSION HEREBY DISCLAIMS ALL SUCH WARRANTIES, ** INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, FITNESS ** FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. Please see ** the License for the specific language governing rights and limitations ** under the License. ** ========================================================================= */ #ifndef __KERNEL__ #define __KERNEL__ #endif #include <linux/module.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/fs.h> #include <linux/version.h> #include <linux/miscdevice.h> #include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/hrtimer.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/wakelock.h> #include <linux/io.h> #include "../staging/android/timed_output.h" #include "tspdrv.h" #include <linux/vibrator.h> #include "immvibespi.c" #if defined(VIBE_DEBUG) && defined(VIBE_RECORD) #include <tspdrvRecorder.c> #endif #include <mach/msm_xo.h> /* Device name and version information */ /* DO NOT CHANGE - this is auto-generated */ #define VERSION_STR " v3.4.55.7\n" /* account extra space for future extra digits in version number */ #define VERSION_STR_LEN 16 /* initialized in tspdrv_probe */ static char g_szdevice_name[(VIBE_MAX_DEVICE_NAME_LENGTH + VERSION_STR_LEN) * NUM_ACTUATORS]; static size_t g_cchdevice_name; static struct wake_lock vib_wake_lock; /* Flag indicating whether the driver is in use */ static char g_bisplaying; /* Buffer to store data sent to SPI */ #define SPI_BUFFER_SIZE \ (NUM_ACTUATORS * (VIBE_OUTPUT_SAMPLE_SIZE + SPI_HEADER_SIZE)) static int g_bstoprequested; static actuator_samples_buffer g_samples_buffer[NUM_ACTUATORS] = {{0} }; static char g_cwrite_buffer[SPI_BUFFER_SIZE]; #define VIBE_TUNING /* #define VIBE_ENABLE_SYSTEM_TIMER */ /* For QA purposes */ #ifdef QA_TEST #define FORCE_LOG_BUFFER_SIZE 128 #define TIME_INCREMENT 5 static int g_ntime; static int g_nforcelog_index; static int8_t g_nforcelog[FORCE_LOG_BUFFER_SIZE]; #endif #ifdef IMPLEMENT_AS_CHAR_DRIVER static int g_nmajor; #endif /* Needs to be included after the global variables because it uses them */ #ifdef CONFIG_HIGH_RES_TIMERS #include "VibeOSKernelLinuxHRTime.c" #else #include "VibeOSKernelLinuxTime.c" #endif /* timed_output */ static void _set_vibetonz_work(struct work_struct *unused); static DECLARE_WORK(vibetonz_work, _set_vibetonz_work); static struct hrtimer timer; static int max_timeout = 10000; static int vibrator_value = -1; static int vibrator_work; #define TEST_MODE_TIME 10000 struct vibrator_platform_data vibrator_drvdata; static int set_vibetonz(int timeout) { int8_t strength; if (!timeout) { if (vibrator_drvdata.vib_model == HAPTIC_PWM) { strength = 0; ImmVibeSPI_ForceOut_SetSamples(0, 8, 1, &strength); } else { /* HAPTIC_MOTOR */ ImmVibeSPI_ForceOut_AmpDisable(0); } } else { DbgOut((KERN_INFO "tspdrv: ENABLE\n")); if (vibrator_drvdata.vib_model == HAPTIC_PWM) { #if defined(CONFIG_MACH_JF_TMO) strength = 79; #else strength = 119; #endif /* 90% duty cycle */ ImmVibeSPI_ForceOut_SetSamples(0, 8, 1, &strength); } else { /* HAPTIC_MOTOR */ ImmVibeSPI_ForceOut_AmpEnable(0); } } vibrator_value = timeout; return 0; } static void _set_vibetonz_work(struct work_struct *unused) { set_vibetonz(vibrator_work); return; } static enum hrtimer_restart vibetonz_timer_func(struct hrtimer *timer) { /* set_vibetonz(0); */ vibrator_work = 0; schedule_work(&vibetonz_work); return HRTIMER_NORESTART; } static int get_time_for_vibetonz(struct timed_output_dev *dev) { int remaining; if (hrtimer_active(&timer)) { ktime_t r = hrtimer_get_remaining(&timer); remaining = ktime_to_ms(r);/*returning time in ms*/ } else { remaining = 0; } if (vibrator_value == -1) remaining = -1; return remaining; } static void enable_vibetonz_from_user(struct timed_output_dev *dev, int value) { printk(KERN_DEBUG "tspdrv: Enable time = %d msec\n", value); hrtimer_cancel(&timer); /* set_vibetonz(value); */ vibrator_work = value; schedule_work(&vibetonz_work); if (value > 0 && (value != TEST_MODE_TIME)) { if (value > max_timeout) value = max_timeout; hrtimer_start(&timer, ktime_set(value / 1000, (value % 1000) * 1000000), HRTIMER_MODE_REL); vibrator_value = 0; } } static struct timed_output_dev timed_output_vt = { .name = "vibrator", .get_time = get_time_for_vibetonz, .enable = enable_vibetonz_from_user, }; static void vibetonz_start(void) { int ret = 0; hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); timer.function = vibetonz_timer_func; ret = timed_output_dev_register(&timed_output_vt); if (ret < 0) DbgOut((KERN_ERR "tspdrv: timed_output_dev_register fail\n")); ret = device_create_file(timed_output_vt.dev, &dev_attr_pwm_value); if (ret < 0) DbgOut((KERN_ERR "tspdrv: device_create_file fail: pwm_value\n")); ret = device_create_file(timed_output_vt.dev, &dev_attr_pwm_max); if (ret < 0) { pr_err("vibrator_init(): create sysfs fail: pwm_max\n"); } ret = device_create_file(timed_output_vt.dev, &dev_attr_pwm_min); if (ret < 0) { pr_err("vibrator_init(): create sysfs fail: pwm_min\n"); } ret = device_create_file(timed_output_vt.dev, &dev_attr_pwm_default); if (ret < 0) { pr_err("vibrator_init(): create sysfs fail: pwm_default\n"); } ret = device_create_file(timed_output_vt.dev, &dev_attr_pwm_threshold); if (ret < 0) { pr_err("vibrator_init(): create sysfs fail: pwm_threshold\n"); } } /* File IO */ static int open(struct inode *inode, struct file *file); static int release(struct inode *inode, struct file *file); static ssize_t read(struct file *file, char *buf, size_t count, loff_t *ppos); static ssize_t write(struct file *file, const char *buf, size_t count, loff_t *ppos); #if HAVE_UNLOCKED_IOCTL static long ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #endif static const struct file_operations fops = { .owner = THIS_MODULE, .read = read, .write = write, .unlocked_ioctl = ioctl, .open = open, .release = release , .llseek = default_llseek }; #ifndef IMPLEMENT_AS_CHAR_DRIVER static struct miscdevice miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = MODULE_NAME, .fops = &fops }; #endif #ifdef VIBE_ENABLE_SYSTEM_TIMER int vibetonz_clk_on(struct device *dev) { struct clk *vibetonz_clk = NULL; vibetonz_clk = clk_get(dev, "timers"); if (IS_ERR(vibetonz_clk)) { DbgOut((KERN_ERR "tspdrv: failed to get clock for vibetonz\n")); goto err_clk0; } clk_enable(vibetonz_clk); clk_put(vibetonz_clk); return 0; err_clk0: clk_put(vibetonz_clk); return -EINVAL; } int vibetonz_clk_off(struct device *dev) { struct clk *vibetonz_clk = NULL; vibetonz_clk = clk_get(dev, "timers"); if (IS_ERR(vibetonz_clk)) { DbgOut((KERN_ERR "tspdrv: failed to get clock for vibetonz\n")); goto err_clk0; } clk_disable(vibetonz_clk); clk_put(vibetonz_clk); return 0; err_clk0: clk_put(vibetonz_clk); return -EINVAL; } #else int vibetonz_clk_on(struct device *dev) { return -EINVAL; } int vibetonz_clk_off(struct device *dev) { return -EINVAL; } #endif /* VIBE_ENABLE_SYSTEM_TIMER */ static __devinit int tspdrv_probe(struct platform_device *pdev) { struct vibrator_platform_data *pdata; int ret, i; /* initialized below */ DbgOut((KERN_INFO "tspdrv: tspdrv_probe.\n")); /* This condition will be removed,after all board files changes done */ if (pdev->dev.platform_data == NULL) { DbgOut(KERN_ERR "tspdrv: tspdrv probe failed, pdata is NULL"); return -EINVAL; } else { pdata = pdev->dev.platform_data; vibrator_drvdata.vib_model = pdata->vib_model; vibrator_drvdata.is_pmic_haptic_pwr_en = \ pdata->is_pmic_haptic_pwr_en; if (pdata->vib_model == HAPTIC_PWM) { if (pdata->is_pmic_vib_pwm) vibrator_drvdata.vib_pwm_gpio = \ PM8921_GPIO_PM_TO_SYS(pdata->vib_pwm_gpio); else vibrator_drvdata.vib_pwm_gpio = pdata->vib_pwm_gpio; } vibrator_drvdata.power_onoff = pdata->power_onoff; } #ifdef IMPLEMENT_AS_CHAR_DRIVER g_nmajor = register_chrdev(0, MODULE_NAME, &fops); if (g_nmajor < 0) { DbgOut((KERN_ERR "tspdrv: can't get major number.\n")); ret = g_nmajor; return ret; } #else ret = misc_register(&miscdev); if (ret) { DbgOut((KERN_ERR "tspdrv: misc_register failed.\n")); return ret; } #endif DbgRecorderInit(()); vibetonz_clk_on(&pdev->dev); ImmVibeSPI_ForceOut_Initialize(); VibeOSKernelLinuxInitTimer(); /* Get and concatenate device name and initialize data buffer */ g_cchdevice_name = 0; for (i = 0; i < NUM_ACTUATORS; i++) { char *szName = g_szdevice_name + g_cchdevice_name; ImmVibeSPI_Device_GetName(i, szName, VIBE_MAX_DEVICE_NAME_LENGTH); /* Append version information and get buffer length */ strlcat(szName, VERSION_STR, sizeof(VERSION_STR)); g_cchdevice_name += strnlen(szName, (VIBE_MAX_DEVICE_NAME_LENGTH+VERSION_STR_LEN)*NUM_ACTUATORS); g_samples_buffer[i].nindex_playing_buffer = -1;/* Not playing */ g_samples_buffer[i].actuator_samples[0].nbuffer_size = 0; g_samples_buffer[i].actuator_samples[1].nbuffer_size = 0; } wake_lock_init(&vib_wake_lock, WAKE_LOCK_SUSPEND, "vib_present"); vibetonz_start(); return 0; } static int __devexit tspdrv_remove(struct platform_device *pdev) { DbgOut((KERN_INFO "tspdrv: tspdrv_remove.\n")); DbgRecorderTerminate(()); VibeOSKernelLinuxTerminateTimer(); ImmVibeSPI_ForceOut_Terminate(); wake_lock_destroy(&vib_wake_lock); return 0; } static int open(struct inode *inode, struct file *file) { DbgOut((KERN_INFO "tspdrv: open.\n")); if (!try_module_get(THIS_MODULE)) return -ENODEV; return 0; } static int release(struct inode *inode, struct file *file) { DbgOut((KERN_INFO "tspdrv: release.\n")); /* ** Reset force and stop timer when the driver is closed, to make sure ** no dangling semaphore remains in the system, especially when the ** driver is run outside of immvibed for testing purposes. */ VibeOSKernelLinuxStopTimer(); /* ** Clear the variable used to store the magic number to prevent ** unauthorized caller to write data. TouchSense service is the only ** valid caller. */ file->private_data = (void *)NULL; module_put(THIS_MODULE); return 0; } static ssize_t read(struct file *file, char *buf, size_t count, loff_t *ppos) { const size_t nbufsize = (g_cchdevice_name > (size_t)(*ppos)) ? min(count, g_cchdevice_name - (size_t)(*ppos)) : 0; /* End of buffer, exit */ if (0 == nbufsize) return 0; if (0 != copy_to_user(buf, g_szdevice_name + (*ppos), nbufsize)) { /* Failed to copy all the data, exit */ DbgOut((KERN_ERR "tspdrv: copy_to_user failed.\n")); return 0; } /* Update file position and return copied buffer size */ *ppos += nbufsize; return nbufsize; } static ssize_t write(struct file *file, const char *buf, size_t count, loff_t *ppos) { int i = 0; *ppos = 0; /* file position not used, always set to 0 */ /* DbgOut((KERN_ERR "tspdrv: write....\n")); */ /* ** Prevent unauthorized caller to write data. ** TouchSense service is the only valid caller. */ if (file->private_data != (void *)TSPDRV_MAGIC_NUMBER) { DbgOut((KERN_ERR "tspdrv: unauthorized write.\n")); return 0; } /* Check buffer size */ if ((count <= SPI_HEADER_SIZE) || (count > SPI_BUFFER_SIZE)) { DbgOut((KERN_ERR "tspdrv: invalid write buffer size.\n")); return 0; } /* Copy immediately the input buffer */ if (0 != copy_from_user(g_cwrite_buffer, buf, count)) { /* Failed to copy all the data, exit */ DbgOut((KERN_ERR "tspdrv: copy_from_user failed.\n")); return 0; } while (i < count) { int nindex_free_buffer; /* initialized below */ samples_buffer *pinput_buffer = (samples_buffer *)(&g_cwrite_buffer[i]); if ((i + SPI_HEADER_SIZE) >= count) { /* ** Index is about to go beyond the buffer size. ** (Should never happen). */ DbgOut((KERN_EMERG "tspdrv: invalid buffer index.\n")); } /* Check bit depth */ if (8 != pinput_buffer->nbit_depth) DbgOut((KERN_WARNING "tspdrv: invalid bit depth.Use default value(8).\n")); /* The above code not valid if SPI header size is not 3 */ #if (SPI_HEADER_SIZE != 3) #error "SPI_HEADER_SIZE expected to be 3" #endif /* Check buffer size */ if ((i + SPI_HEADER_SIZE + pinput_buffer->nbuffer_size) > count) { /* ** Index is about to go beyond the buffer size. ** (Should never happen). */ DbgOut((KERN_EMERG "tspdrv: invalid data size.\n")); } /* Check actuator index */ if (NUM_ACTUATORS <= pinput_buffer->nactuator_index) { DbgOut((KERN_ERR "tspdrv: invalid actuator index.\n")); i += (SPI_HEADER_SIZE + pinput_buffer->nbuffer_size); continue; } if (0 == g_samples_buffer[pinput_buffer->nactuator_index] .actuator_samples[0].nbuffer_size) { nindex_free_buffer = 0; } else if (0 == g_samples_buffer[pinput_buffer->nactuator_index] .actuator_samples[1].nbuffer_size) { nindex_free_buffer = 1; } else { /* No room to store new samples */ DbgOut((KERN_ERR "tspdrv: no room to store new samples.\n")); return 0; } /* Store the data in the free buffer of the given actuator */ memcpy( &(g_samples_buffer[pinput_buffer->nactuator_index] .actuator_samples[nindex_free_buffer]), &g_cwrite_buffer[i], (SPI_HEADER_SIZE + pinput_buffer->nbuffer_size)); /* If the no buffer is playing, prepare to play ** g_samples_buffer[pinput_buffer->nactuator_index]. ** actuator_samples[nindex_free_buffer] */ if (-1 == g_samples_buffer[pinput_buffer->nactuator_index] .nindex_playing_buffer) { g_samples_buffer[pinput_buffer->nactuator_index] .nindex_playing_buffer = nindex_free_buffer; g_samples_buffer[pinput_buffer->nactuator_index] .nindex_output_value = 0; } /* Increment buffer index */ i += (SPI_HEADER_SIZE + pinput_buffer->nbuffer_size); } #ifdef QA_TEST g_nforcelog[g_nforcelog_index++] = g_cSPIBuffer[0]; if (g_nforcelog_index >= FORCE_LOG_BUFFER_SIZE) { for (i = 0; i < FORCE_LOG_BUFFER_SIZE; i++) { printk(KERN_INFO "%d\t%d\n", g_ntime, g_nforcelog[i]); g_ntime += TIME_INCREMENT; } g_nforcelog_index = 0; } #endif /* Start the timer after receiving new output force */ g_bisplaying = true; VibeOSKernelLinuxStartTimer(); return count; } static long ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { #ifdef QA_TEST int i; #endif /* DbgOut(KERN_INFO "tspdrv: ioctl cmd[0x%x].\n", cmd); */ switch (cmd) { case TSPDRV_STOP_KERNEL_TIMER: /* ** As we send one sample ahead of time, we need to finish ** playing the last sample before stopping the timer. ** So we just set a flag here. */ if (true == g_bisplaying) g_bstoprequested = true; #ifdef VIBEOSKERNELPROCESSDATA /* Last data processing to disable amp and stop timer */ VibeOSKernelProcessData(NULL); #endif #ifdef QA_TEST if (g_nforcelog_index) { for (i = 0; i < g_nforcelog_index; i++) { printk(KERN_INFO "%d\t%d\n" , g_ntime, g_nforcelog[i]); g_ntime += TIME_INCREMENT; } } g_ntime = 0; g_nforcelog_index = 0; #endif break; case TSPDRV_MAGIC_NUMBER: case TSPDRV_SET_MAGIC_NUMBER: filp->private_data = (void *)TSPDRV_MAGIC_NUMBER; break; case TSPDRV_ENABLE_AMP: wake_lock(&vib_wake_lock); ImmVibeSPI_ForceOut_AmpEnable(arg); DbgRecorderReset((arg)); DbgRecord((arg, ";------- TSPDRV_ENABLE_AMP ---------\n")); break; case TSPDRV_DISABLE_AMP: /* ** Small fix for now to handle proper combination of ** TSPDRV_STOP_KERNEL_TIMER and TSPDRV_DISABLE_AMP together ** If a stop was requested, ignore the request as the amp ** will be disabled by the timer proc when it's ready */ if (!g_bstoprequested) ImmVibeSPI_ForceOut_AmpDisable(arg); wake_unlock(&vib_wake_lock); break; case TSPDRV_GET_NUM_ACTUATORS: return NUM_ACTUATORS; } return 0; } static int suspend(struct platform_device *pdev, pm_message_t state) { int ret; if (g_bisplaying) { ret = -EBUSY; } else { /* Disable system timers */ vibetonz_clk_off(&pdev->dev); ret = 0; } DbgOut(KERN_DEBUG "tspdrv: %s (%d).\n", __func__, ret); return ret; } static int resume(struct platform_device *pdev) { /* Restart system timers */ DbgOut(KERN_DEBUG "tspdrv: %s.\n", __func__); return 0; } static struct platform_driver tspdrv_driver = { .probe = tspdrv_probe, .remove = __devexit_p(tspdrv_remove), .suspend = suspend, .resume = resume, .driver = { .name = MODULE_NAME, .owner = THIS_MODULE, }, }; static int __init tspdrv_init(void) { return platform_driver_register(&tspdrv_driver); } static void __exit tspdrv_exit(void) { platform_driver_unregister(&tspdrv_driver); } late_initcall(tspdrv_init); module_exit(tspdrv_exit); /* Module info */ MODULE_AUTHOR("Immersion Corporation"); MODULE_DESCRIPTION("TouchSense Kernel Module"); MODULE_LICENSE("GPL v2");
gpl-2.0
Minia89/one_plus_one
fs/cifs/transport.c
294
24661
/* * fs/cifs/transport.c * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) * Jeremy Allison (jra@samba.org) 2006. * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/list.h> #include <linux/gfp.h> #include <linux/wait.h> #include <linux/net.h> #include <linux/delay.h> #include <linux/freezer.h> #include <asm/uaccess.h> #include <asm/processor.h> #include <linux/mempool.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" extern mempool_t *cifs_mid_poolp; static void wake_up_task(struct mid_q_entry *mid) { wake_up_process(mid->callback_data); } struct mid_q_entry * AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) { struct mid_q_entry *temp; if (server == NULL) { cERROR(1, "Null TCP session in AllocMidQEntry"); return NULL; } temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); if (temp == NULL) return temp; else { memset(temp, 0, sizeof(struct mid_q_entry)); temp->mid = smb_buffer->Mid; /* always LE */ temp->pid = current->pid; temp->command = cpu_to_le16(smb_buffer->Command); cFYI(1, "For smb_command %d", smb_buffer->Command); /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */ /* when mid allocated can be before when sent */ temp->when_alloc = jiffies; /* * The default is for the mid to be synchronous, so the * default callback just wakes up the current task. */ temp->callback = wake_up_task; temp->callback_data = current; } atomic_inc(&midCount); temp->mid_state = MID_REQUEST_ALLOCATED; return temp; } void DeleteMidQEntry(struct mid_q_entry *midEntry) { #ifdef CONFIG_CIFS_STATS2 unsigned long now; #endif midEntry->mid_state = MID_FREE; atomic_dec(&midCount); if (midEntry->large_buf) cifs_buf_release(midEntry->resp_buf); else cifs_small_buf_release(midEntry->resp_buf); #ifdef CONFIG_CIFS_STATS2 now = jiffies; /* commands taking longer than one second are indications that something is wrong, unless it is quite a slow link or server */ if ((now - midEntry->when_alloc) > HZ) { if ((cifsFYI & CIFS_TIMER) && (midEntry->command != cpu_to_le16(SMB_COM_LOCKING_ANDX))) { printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu", midEntry->command, midEntry->mid); printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n", now - midEntry->when_alloc, now - midEntry->when_sent, now - midEntry->when_received); } } #endif mempool_free(midEntry, cifs_mid_poolp); } static void delete_mid(struct mid_q_entry *mid) { spin_lock(&GlobalMid_Lock); list_del(&mid->qhead); spin_unlock(&GlobalMid_Lock); DeleteMidQEntry(mid); } static int smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) { int rc = 0; int i = 0; struct msghdr smb_msg; __be32 *buf_len = (__be32 *)(iov[0].iov_base); unsigned int len = iov[0].iov_len; unsigned int total_len; int first_vec = 0; unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); struct socket *ssocket = server->ssocket; if (ssocket == NULL) return -ENOTSOCK; /* BB eventually add reconnect code here */ smb_msg.msg_name = (struct sockaddr *) &server->dstaddr; smb_msg.msg_namelen = sizeof(struct sockaddr); smb_msg.msg_control = NULL; smb_msg.msg_controllen = 0; if (server->noblocksnd) smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; else smb_msg.msg_flags = MSG_NOSIGNAL; total_len = 0; for (i = 0; i < n_vec; i++) total_len += iov[i].iov_len; cFYI(1, "Sending smb: total_len %d", total_len); dump_smb(iov[0].iov_base, len); i = 0; while (total_len) { rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec], n_vec - first_vec, total_len); if ((rc == -ENOSPC) || (rc == -EAGAIN)) { i++; /* * If blocking send we try 3 times, since each can block * for 5 seconds. For nonblocking we have to try more * but wait increasing amounts of time allowing time for * socket to clear. The overall time we wait in either * case to send on the socket is about 15 seconds. * Similarly we wait for 15 seconds for a response from * the server in SendReceive[2] for the server to send * a response back for most types of requests (except * SMB Write past end of file which can be slow, and * blocking lock operations). NFS waits slightly longer * than CIFS, but this can make it take longer for * nonresponsive servers to be detected and 15 seconds * is more than enough time for modern networks to * send a packet. In most cases if we fail to send * after the retries we will kill the socket and * reconnect which may clear the network problem. */ if ((i >= 14) || (!server->noblocksnd && (i > 2))) { cERROR(1, "sends on sock %p stuck for 15 seconds", ssocket); rc = -EAGAIN; break; } msleep(1 << i); continue; } if (rc < 0) break; if (rc == total_len) { total_len = 0; break; } else if (rc > total_len) { cERROR(1, "sent %d requested %d", rc, total_len); break; } if (rc == 0) { /* should never happen, letting socket clear before retrying is our only obvious option here */ cERROR(1, "tcp sent no data"); msleep(500); continue; } total_len -= rc; /* the line below resets i */ for (i = first_vec; i < n_vec; i++) { if (iov[i].iov_len) { if (rc > iov[i].iov_len) { rc -= iov[i].iov_len; iov[i].iov_len = 0; } else { iov[i].iov_base += rc; iov[i].iov_len -= rc; first_vec = i; break; } } } i = 0; /* in case we get ENOSPC on the next send */ } if ((total_len > 0) && (total_len != smb_buf_length + 4)) { cFYI(1, "partial send (%d remaining), terminating session", total_len); /* If we have only sent part of an SMB then the next SMB could be taken as the remainder of this one. We need to kill the socket so the server throws away the partial SMB */ server->tcpStatus = CifsNeedReconnect; } if (rc < 0 && rc != -EINTR) cERROR(1, "Error %d sending data on socket to server", rc); else rc = 0; /* Don't want to modify the buffer as a side effect of this call. */ *buf_len = cpu_to_be32(smb_buf_length); return rc; } int smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, unsigned int smb_buf_length) { struct kvec iov; iov.iov_base = smb_buffer; iov.iov_len = smb_buf_length + 4; return smb_sendv(server, &iov, 1); } static int wait_for_free_credits(struct TCP_Server_Info *server, const int optype, int *credits) { int rc; spin_lock(&server->req_lock); if (optype == CIFS_ASYNC_OP) { /* oplock breaks must not be held up */ server->in_flight++; *credits -= 1; spin_unlock(&server->req_lock); return 0; } while (1) { if (*credits <= 0) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable(server->request_q, has_credits(server, credits)); cifs_num_waiters_dec(server); if (rc) return rc; spin_lock(&server->req_lock); } else { if (server->tcpStatus == CifsExiting) { spin_unlock(&server->req_lock); return -ENOENT; } /* * Can not count locking commands against total * as they are allowed to block on server. */ /* update # of requests on the wire to server */ if (optype != CIFS_BLOCKING_OP) { *credits -= 1; server->in_flight++; } spin_unlock(&server->req_lock); break; } } return 0; } static int wait_for_free_request(struct TCP_Server_Info *server, const int optype) { return wait_for_free_credits(server, optype, get_credits_field(server)); } static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, struct mid_q_entry **ppmidQ) { if (ses->server->tcpStatus == CifsExiting) { return -ENOENT; } if (ses->server->tcpStatus == CifsNeedReconnect) { cFYI(1, "tcp session dead - return to caller to retry"); return -EAGAIN; } if (ses->status != CifsGood) { /* check if SMB session is bad because we are setting it up */ if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && (in_buf->Command != SMB_COM_NEGOTIATE)) return -EAGAIN; /* else ok - we are setting up session */ } *ppmidQ = AllocMidQEntry(in_buf, ses->server); if (*ppmidQ == NULL) return -ENOMEM; spin_lock(&GlobalMid_Lock); list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q); spin_unlock(&GlobalMid_Lock); return 0; } static int wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) { int error; error = wait_event_freezekillable_unsafe(server->response_q, midQ->mid_state != MID_REQUEST_SUBMITTED); if (error < 0) return -ERESTARTSYS; return 0; } static int cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov, unsigned int nvec, struct mid_q_entry **ret_mid) { int rc; struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base; struct mid_q_entry *mid; /* enable signing if server requires it */ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; mid = AllocMidQEntry(hdr, server); if (mid == NULL) return -ENOMEM; /* put it on the pending_mid_q */ spin_lock(&GlobalMid_Lock); list_add_tail(&mid->qhead, &server->pending_mid_q); spin_unlock(&GlobalMid_Lock); rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number); if (rc) delete_mid(mid); *ret_mid = mid; return rc; } /* * Send a SMB request and set the callback function in the mid to handle * the result. Caller is responsible for dealing with timeouts. */ int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, unsigned int nvec, mid_receive_t *receive, mid_callback_t *callback, void *cbdata, bool ignore_pend) { int rc; struct mid_q_entry *mid; rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0); if (rc) return rc; mutex_lock(&server->srv_mutex); rc = cifs_setup_async_request(server, iov, nvec, &mid); if (rc) { mutex_unlock(&server->srv_mutex); cifs_add_credits(server, 1); wake_up(&server->request_q); return rc; } mid->receive = receive; mid->callback = callback; mid->callback_data = cbdata; mid->mid_state = MID_REQUEST_SUBMITTED; cifs_in_send_inc(server); rc = smb_sendv(server, iov, nvec); cifs_in_send_dec(server); cifs_save_when_sent(mid); mutex_unlock(&server->srv_mutex); if (rc) goto out_err; return rc; out_err: delete_mid(mid); cifs_add_credits(server, 1); wake_up(&server->request_q); return rc; } /* * * Send an SMB Request. No response info (other than return code) * needs to be parsed. * * flags indicate the type of request buffer and how long to wait * and whether to log NT STATUS code (error) before mapping it to POSIX error * */ int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses, char *in_buf, int flags) { int rc; struct kvec iov[1]; int resp_buf_type; iov[0].iov_base = in_buf; iov[0].iov_len = get_rfc1002_length(in_buf) + 4; flags |= CIFS_NO_RESP; rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags); cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc); return rc; } static int cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) { int rc = 0; cFYI(1, "%s: cmd=%d mid=%llu state=%d", __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state); spin_lock(&GlobalMid_Lock); switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: spin_unlock(&GlobalMid_Lock); return rc; case MID_RETRY_NEEDED: rc = -EAGAIN; break; case MID_RESPONSE_MALFORMED: rc = -EIO; break; case MID_SHUTDOWN: rc = -EHOSTDOWN; break; default: list_del_init(&mid->qhead); cERROR(1, "%s: invalid mid state mid=%llu state=%d", __func__, mid->mid, mid->mid_state); rc = -EIO; } spin_unlock(&GlobalMid_Lock); DeleteMidQEntry(mid); return rc; } /* * An NT cancel request header looks just like the original request except: * * The Command is SMB_COM_NT_CANCEL * The WordCount is zeroed out * The ByteCount is zeroed out * * This function mangles an existing request buffer into a * SMB_COM_NT_CANCEL request and then sends it. */ static int send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf, struct mid_q_entry *mid) { int rc = 0; /* -4 for RFC1001 length and +2 for BCC field */ in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2); in_buf->Command = SMB_COM_NT_CANCEL; in_buf->WordCount = 0; put_bcc(0, in_buf); mutex_lock(&server->srv_mutex); rc = cifs_sign_smb(in_buf, server, &mid->sequence_number); if (rc) { mutex_unlock(&server->srv_mutex); return rc; } /* * The response to this call was already factored into the sequence * number when the call went out, so we must adjust it back downward * after signing here. */ --server->sequence_number; rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); mutex_unlock(&server->srv_mutex); cFYI(1, "issued NT_CANCEL for mid %u, rc = %d", in_buf->Mid, rc); return rc; } int cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, bool log_error) { unsigned int len = get_rfc1002_length(mid->resp_buf) + 4; dump_smb(mid->resp_buf, min_t(u32, 92, len)); /* convert the length into a more usable form */ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { struct kvec iov; iov.iov_base = mid->resp_buf; iov.iov_len = len; /* FIXME: add code to kill session */ if (cifs_verify_signature(&iov, 1, server, mid->sequence_number + 1) != 0) cERROR(1, "Unexpected SMB signature"); } /* BB special case reconnect tid and uid here? */ return map_smb_to_linux_error(mid->resp_buf, log_error); } static int cifs_setup_request(struct cifs_ses *ses, struct kvec *iov, unsigned int nvec, struct mid_q_entry **ret_mid) { int rc; struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base; struct mid_q_entry *mid; rc = allocate_mid(ses, hdr, &mid); if (rc) return rc; rc = cifs_sign_smb2(iov, nvec, ses->server, &mid->sequence_number); if (rc) delete_mid(mid); *ret_mid = mid; return rc; } int SendReceive2(const unsigned int xid, struct cifs_ses *ses, struct kvec *iov, int n_vec, int *pRespBufType /* ret */, const int flags) { int rc = 0; int long_op; struct mid_q_entry *midQ; char *buf = iov[0].iov_base; long_op = flags & CIFS_TIMEOUT_MASK; *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */ if ((ses == NULL) || (ses->server == NULL)) { cifs_small_buf_release(buf); cERROR(1, "Null session"); return -EIO; } if (ses->server->tcpStatus == CifsExiting) { cifs_small_buf_release(buf); return -ENOENT; } /* * Ensure that we do not send more than 50 overlapping requests * to the same server. We may make this configurable later or * use ses->maxReq. */ rc = wait_for_free_request(ses->server, long_op); if (rc) { cifs_small_buf_release(buf); return rc; } /* * Make sure that we sign in the same order that we send on this socket * and avoid races inside tcp sendmsg code that could cause corruption * of smb data. */ mutex_lock(&ses->server->srv_mutex); rc = cifs_setup_request(ses, iov, n_vec, &midQ); if (rc) { mutex_unlock(&ses->server->srv_mutex); cifs_small_buf_release(buf); /* Update # of requests on wire to server */ cifs_add_credits(ses->server, 1); return rc; } midQ->mid_state = MID_REQUEST_SUBMITTED; cifs_in_send_inc(ses->server); rc = smb_sendv(ses->server, iov, n_vec); cifs_in_send_dec(ses->server); cifs_save_when_sent(midQ); mutex_unlock(&ses->server->srv_mutex); if (rc < 0) { cifs_small_buf_release(buf); goto out; } if (long_op == CIFS_ASYNC_OP) { cifs_small_buf_release(buf); goto out; } rc = wait_for_response(ses->server, midQ); if (rc != 0) { send_nt_cancel(ses->server, (struct smb_hdr *)buf, midQ); spin_lock(&GlobalMid_Lock); if (midQ->mid_state == MID_REQUEST_SUBMITTED) { midQ->callback = DeleteMidQEntry; spin_unlock(&GlobalMid_Lock); cifs_small_buf_release(buf); cifs_add_credits(ses->server, 1); return rc; } spin_unlock(&GlobalMid_Lock); } cifs_small_buf_release(buf); rc = cifs_sync_mid_result(midQ, ses->server); if (rc != 0) { cifs_add_credits(ses->server, 1); return rc; } if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) { rc = -EIO; cFYI(1, "Bad MID state?"); goto out; } buf = (char *)midQ->resp_buf; iov[0].iov_base = buf; iov[0].iov_len = get_rfc1002_length(buf) + 4; if (midQ->large_buf) *pRespBufType = CIFS_LARGE_BUFFER; else *pRespBufType = CIFS_SMALL_BUFFER; rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR); /* mark it so buf will not be freed by delete_mid */ if ((flags & CIFS_NO_RESP) == 0) midQ->resp_buf = NULL; out: delete_mid(midQ); cifs_add_credits(ses->server, 1); return rc; } int SendReceive(const unsigned int xid, struct cifs_ses *ses, struct smb_hdr *in_buf, struct smb_hdr *out_buf, int *pbytes_returned, const int long_op) { int rc = 0; struct mid_q_entry *midQ; if (ses == NULL) { cERROR(1, "Null smb session"); return -EIO; } if (ses->server == NULL) { cERROR(1, "Null tcp session"); return -EIO; } if (ses->server->tcpStatus == CifsExiting) return -ENOENT; /* Ensure that we do not send more than 50 overlapping requests to the same server. We may make this configurable later or use ses->maxReq */ if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { cERROR(1, "Illegal length, greater than maximum frame, %d", be32_to_cpu(in_buf->smb_buf_length)); return -EIO; } rc = wait_for_free_request(ses->server, long_op); if (rc) return rc; /* make sure that we sign in the same order that we send on this socket and avoid races inside tcp sendmsg code that could cause corruption of smb data */ mutex_lock(&ses->server->srv_mutex); rc = allocate_mid(ses, in_buf, &midQ); if (rc) { mutex_unlock(&ses->server->srv_mutex); /* Update # of requests on wire to server */ cifs_add_credits(ses->server, 1); return rc; } rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); if (rc) { mutex_unlock(&ses->server->srv_mutex); goto out; } midQ->mid_state = MID_REQUEST_SUBMITTED; cifs_in_send_inc(ses->server); rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); cifs_in_send_dec(ses->server); cifs_save_when_sent(midQ); mutex_unlock(&ses->server->srv_mutex); if (rc < 0) goto out; if (long_op == CIFS_ASYNC_OP) goto out; rc = wait_for_response(ses->server, midQ); if (rc != 0) { send_nt_cancel(ses->server, in_buf, midQ); spin_lock(&GlobalMid_Lock); if (midQ->mid_state == MID_REQUEST_SUBMITTED) { /* no longer considered to be "in-flight" */ midQ->callback = DeleteMidQEntry; spin_unlock(&GlobalMid_Lock); cifs_add_credits(ses->server, 1); return rc; } spin_unlock(&GlobalMid_Lock); } rc = cifs_sync_mid_result(midQ, ses->server); if (rc != 0) { cifs_add_credits(ses->server, 1); return rc; } if (!midQ->resp_buf || !out_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) { rc = -EIO; cERROR(1, "Bad MID state?"); goto out; } *pbytes_returned = get_rfc1002_length(midQ->resp_buf); memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); rc = cifs_check_receive(midQ, ses->server, 0); out: delete_mid(midQ); cifs_add_credits(ses->server, 1); return rc; } /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows blocking lock to return. */ static int send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon, struct smb_hdr *in_buf, struct smb_hdr *out_buf) { int bytes_returned; struct cifs_ses *ses = tcon->ses; LOCK_REQ *pSMB = (LOCK_REQ *)in_buf; /* We just modify the current in_buf to change the type of lock from LOCKING_ANDX_SHARED_LOCK or LOCKING_ANDX_EXCLUSIVE_LOCK to LOCKING_ANDX_CANCEL_LOCK. */ pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; pSMB->Timeout = 0; pSMB->hdr.Mid = GetNextMid(ses->server); return SendReceive(xid, ses, in_buf, out_buf, &bytes_returned, 0); } int SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, struct smb_hdr *in_buf, struct smb_hdr *out_buf, int *pbytes_returned) { int rc = 0; int rstart = 0; struct mid_q_entry *midQ; struct cifs_ses *ses; if (tcon == NULL || tcon->ses == NULL) { cERROR(1, "Null smb session"); return -EIO; } ses = tcon->ses; if (ses->server == NULL) { cERROR(1, "Null tcp session"); return -EIO; } if (ses->server->tcpStatus == CifsExiting) return -ENOENT; /* Ensure that we do not send more than 50 overlapping requests to the same server. We may make this configurable later or use ses->maxReq */ if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { cERROR(1, "Illegal length, greater than maximum frame, %d", be32_to_cpu(in_buf->smb_buf_length)); return -EIO; } rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP); if (rc) return rc; /* make sure that we sign in the same order that we send on this socket and avoid races inside tcp sendmsg code that could cause corruption of smb data */ mutex_lock(&ses->server->srv_mutex); rc = allocate_mid(ses, in_buf, &midQ); if (rc) { mutex_unlock(&ses->server->srv_mutex); return rc; } rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); if (rc) { delete_mid(midQ); mutex_unlock(&ses->server->srv_mutex); return rc; } midQ->mid_state = MID_REQUEST_SUBMITTED; cifs_in_send_inc(ses->server); rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); cifs_in_send_dec(ses->server); cifs_save_when_sent(midQ); mutex_unlock(&ses->server->srv_mutex); if (rc < 0) { delete_mid(midQ); return rc; } /* Wait for a reply - allow signals to interrupt. */ rc = wait_event_interruptible(ses->server->response_q, (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) || ((ses->server->tcpStatus != CifsGood) && (ses->server->tcpStatus != CifsNew))); /* Were we interrupted by a signal ? */ if ((rc == -ERESTARTSYS) && (midQ->mid_state == MID_REQUEST_SUBMITTED) && ((ses->server->tcpStatus == CifsGood) || (ses->server->tcpStatus == CifsNew))) { if (in_buf->Command == SMB_COM_TRANSACTION2) { /* POSIX lock. We send a NT_CANCEL SMB to cause the blocking lock to return. */ rc = send_nt_cancel(ses->server, in_buf, midQ); if (rc) { delete_mid(midQ); return rc; } } else { /* Windows lock. We send a LOCKINGX_CANCEL_LOCK to cause the blocking lock to return. */ rc = send_lock_cancel(xid, tcon, in_buf, out_buf); /* If we get -ENOLCK back the lock may have already been removed. Don't exit in this case. */ if (rc && rc != -ENOLCK) { delete_mid(midQ); return rc; } } rc = wait_for_response(ses->server, midQ); if (rc) { send_nt_cancel(ses->server, in_buf, midQ); spin_lock(&GlobalMid_Lock); if (midQ->mid_state == MID_REQUEST_SUBMITTED) { /* no longer considered to be "in-flight" */ midQ->callback = DeleteMidQEntry; spin_unlock(&GlobalMid_Lock); return rc; } spin_unlock(&GlobalMid_Lock); } /* We got the response - restart system call. */ rstart = 1; } rc = cifs_sync_mid_result(midQ, ses->server); if (rc != 0) return rc; /* rcvd frame is ok */ if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) { rc = -EIO; cERROR(1, "Bad MID state?"); goto out; } *pbytes_returned = get_rfc1002_length(midQ->resp_buf); memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); rc = cifs_check_receive(midQ, ses->server, 0); out: delete_mid(midQ); if (rstart && rc == -EACCES) return -ERESTARTSYS; return rc; }
gpl-2.0
djvoleur/V_920P_AOE2
drivers/net/wireless/ipsecdrvtl/au.c
294
11649
/* 'src_compress_deflate_crc32.c' Obfuscated by COBF (Version 1.06 2006-01-07 by BB) at Mon Dec 22 18:00:49 2014 */ #include"cobf.h" #ifdef _WIN32 #if defined( UNDER_CE) && defined( bb334) || ! defined( bb341) #define bb357 1 #define bb356 1 #else #define bb335 bb350 #define bb358 1 #define bb348 1 #endif #define bb360 1 #include"uncobf.h" #include<ndis.h> #include"cobf.h" #ifdef UNDER_CE #include"uncobf.h" #include<ndiswan.h> #include"cobf.h" #endif #include"uncobf.h" #include<stdio.h> #include<basetsd.h> #include"cobf.h" bba bbt bbl bbf, *bb3;bba bbt bbe bbo, *bb80;bba bb137 bb125, *bb351; bba bbt bbl bb41, *bb73;bba bbt bb137 bbk, *bb59;bba bbe bbu, *bb134; bba bbh bbf*bb79; #ifdef bb307 bba bbd bb61, *bb124; #endif #else #include"uncobf.h" #include<linux/module.h> #include<linux/ctype.h> #include<linux/time.h> #include<linux/slab.h> #include"cobf.h" #ifndef bb118 #define bb118 #ifdef _WIN32 #include"uncobf.h" #include<wtypes.h> #include"cobf.h" #else #ifdef bb121 #include"uncobf.h" #include<linux/types.h> #include"cobf.h" #else #include"uncobf.h" #include<stddef.h> #include<sys/types.h> #include"cobf.h" #endif #endif #ifdef _WIN32 #ifdef _MSC_VER bba bb117 bb224; #endif #else bba bbe bbu, *bb134, *bb216; #define bb200 1 #define bb202 0 bba bb261 bb249, *bb205, *bb252;bba bbe bb278, *bb255, *bb227;bba bbt bbo, *bb80, *bb215;bba bb8 bb266, *bb221;bba bbt bb8 bb226, *bb230; bba bb8 bb119, *bb212;bba bbt bb8 bb63, *bb237;bba bb63 bb228, *bb251 ;bba bb63 bb259, *bb220;bba bb119 bb117, *bb217;bba bb244 bb289;bba bb210 bb125;bba bb262 bb85;bba bb112 bb116;bba bb112 bb235; #ifdef bb234 bba bb233 bb41, *bb73;bba bb287 bbk, *bb59;bba bb209 bbd, *bb31;bba bb222 bb57, *bb120; #else bba bb231 bb41, *bb73;bba bb253 bbk, *bb59;bba bb245 bbd, *bb31;bba bb229 bb57, *bb120; #endif bba bb41 bbf, *bb3, *bb263;bba bbk bb206, *bb225, *bb286;bba bbk bb282 , *bb246, *bb284;bba bbd bb61, *bb124, *bb269;bba bb85 bb38, *bb241, * bb223;bba bbd bb239, *bb265, *bb243;bba bb116 bb272, *bb213, *bb281; bba bb57 bb270, *bb240, *bb208; #define bb143 bbb bba bbb*bb247, *bb81;bba bbh bbb*bb271;bba bbl bb218;bba bbl*bb207; bba bbh bbl*bb62; #if defined( bb121) bba bbe bb115; #endif bba bb115 bb19;bba bb19*bb273;bba bbh bb19*bb186; #if defined( bb268) || defined( bb248) bba bb19 bb37;bba bb19 bb111; #else bba bbl bb37;bba bbt bbl bb111; #endif bba bbh bb37*bb279;bba bb37*bb277;bba bb61 bb211, *bb219;bba bbb* bb107;bba bb107*bb257; #define bb250( bb36) bbj bb36##__ { bbe bb267; }; bba bbj bb36##__ * \ bb36 bba bbj{bb38 bb190,bb260,bb214,bb285;}bb232, *bb238, *bb283;bba bbj{ bb38 bb10,bb177;}bb254, *bb280, *bb242;bba bbj{bb38 bb264,bb275;} bb274, *bb288, *bb276; #endif bba bbh bbf*bb79; #endif bba bbf bb103; #define IN #define OUT #ifdef _DEBUG #define bb147( bbc) bb27( bbc) #else #define bb147( bbc) ( bbb)( bbc) #endif bba bbe bb160, *bb172; #define bb293 0 #define bb313 1 #define bb297 2 #define bb325 3 #define bb354 4 bba bbe bb361;bba bbb*bb123; #endif #ifdef _WIN32 #ifndef UNDER_CE #define bb32 bb346 #define bb43 bb347 bba bbt bb8 bb32;bba bb8 bb43; #endif #else #endif #ifdef _WIN32 bbb*bb127(bb32 bb48);bbb bb108(bbb* );bbb*bb138(bb32 bb158,bb32 bb48); #else #define bb127( bbc) bb146(1, bbc, bb141) #define bb108( bbc) bb340( bbc) #define bb138( bbc, bbp) bb146( bbc, bbp, bb141) #endif #ifdef _WIN32 #define bb27( bbc) bb339( bbc) #else #ifdef _DEBUG bbe bb145(bbh bbl*bb98,bbh bbl*bb26,bbt bb258); #define bb27( bbc) ( bbb)(( bbc) || ( bb145(# bbc, __FILE__, __LINE__ \ ))) #else #define bb27( bbc) (( bbb)0) #endif #endif bb43 bb301(bb43*bb319); #ifndef _WIN32 bbe bb331(bbh bbl*bbg);bbe bb320(bbh bbl*bb20,...); #endif #ifdef _WIN32 bba bb355 bb95; #define bb142( bbc) bb353( bbc) #define bb144( bbc) bb336( bbc) #define bb135( bbc) bb359( bbc) #define bb133( bbc) bb342( bbc) #else bba bb343 bb95; #define bb142( bbc) ( bbb)( * bbc = bb337( bbc)) #define bb144( bbc) (( bbb)0) #define bb135( bbc) bb352( bbc) #define bb133( bbc) bb344( bbc) #endif #if ( defined( _WIN32) || defined( __WIN32__)) && ! defined( WIN32) #define WIN32 #endif #if defined( __GNUC__) || defined( WIN32) || defined( bb1257) || \ defined( bb1247) #ifndef bb407 #define bb407 #endif #endif #if defined( __MSDOS__) && ! defined( bb169) #define bb169 #endif #if defined( bb169) && ! defined( bb407) #define bb533 #endif #ifdef bb169 #define bb1076 #endif #if ( defined( bb169) || defined( bb1238) || defined( WIN32)) && ! \ defined( bb139) #define bb139 #endif #if defined( __STDC__) || defined( __cplusplus) || defined( bb1268) #ifndef bb139 #define bb139 #endif #endif #ifndef bb139 #ifndef bbh #define bbh #endif #endif #if defined( __BORLANDC__) && ( __BORLANDC__ < 0x500) #define bb1148 #endif #ifndef bb292 #ifdef bb533 #define bb292 8 #else #define bb292 9 #endif #endif #ifndef bbq #ifdef bb139 #define bbq( bb421) bb421 #else #define bbq( bb421) () #endif #endif bba bbf bb154;bba bbt bbe bb9;bba bbt bb8 bb25;bba bb154 bb34;bba bbl bb447;bba bbe bb1171;bba bb9 bb165;bba bb25 bb167; #ifdef bb139 bba bbb*bb72;bba bbb*bb191; #else bba bb154*bb72;bba bb154*bb191; #endif #ifdef __cplusplus bbr"\x43"{ #endif bba bb72( *bb526)bbq((bb72 bb122,bb9 bb510,bb9 bb48));bba bbb( *bb524 )bbq((bb72 bb122,bb72 bb1138));bbj bb390;bba bbj bb1221{bb34*bb128; bb9 bb149;bb25 bb193;bb34*bb619;bb9 bb397;bb25 bb642;bbl*bb327;bbj bb390*bb23;bb526 bb414;bb524 bb379;bb72 bb122;bbe bb1000;bb25 bb377; bb25 bb1189;}bb451;bba bb451*bb16;bbr bbh bbl*bb1196 bbq((bbb));bbr bbe bb529 bbq((bb16 bb15,bbe bb176));bbr bbe bb971 bbq((bb16 bb15)); bbr bbe bb1086 bbq((bb16 bb15,bbe bb176));bbr bbe bb959 bbq((bb16 bb15 ));bbr bbe bb1217 bbq((bb16 bb15,bbh bb34*bb441,bb9 bb449));bbr bbe bb1188 bbq((bb16 bb132,bb16 bb185));bbr bbe bb1089 bbq((bb16 bb15)); bbr bbe bb1215 bbq((bb16 bb15,bbe bb126,bbe bb303));bbr bbe bb1218 bbq ((bb16 bb15,bbh bb34*bb441,bb9 bb449));bbr bbe bb1197 bbq((bb16 bb15)); bbr bbe bb1045 bbq((bb16 bb15));bbr bbe bb1187 bbq((bb34*bb132,bb167* bb321,bbh bb34*bb185,bb25 bb332));bbr bbe bb1181 bbq((bb34*bb132, bb167*bb321,bbh bb34*bb185,bb25 bb332,bbe bb126));bbr bbe bb1202 bbq( (bb34*bb132,bb167*bb321,bbh bb34*bb185,bb25 bb332));bba bb191 bb39; bbr bb39 bb1237 bbq((bbh bbl*bb1039,bbh bbl*bb45));bbr bb39 bb1239 bbq ((bbe bb486,bbh bbl*bb45));bbr bbe bb1262 bbq((bb39 bb26,bbe bb126, bbe bb303));bbr bbe bb1226 bbq((bb39 bb26,bb191 bb42,bbt bb22));bbr bbe bb1222 bbq((bb39 bb26,bbh bb191 bb42,bbt bb22));bbr bbe bb1248 bbq ((bb39 bb26,bbh bbl*bb1265,...));bbr bbe bb1223 bbq((bb39 bb26,bbh bbl *bbg));bbr bbl*bb1270 bbq((bb39 bb26,bbl*bb42,bbe bb22));bbr bbe bb1242 bbq((bb39 bb26,bbe bbn));bbr bbe bb1271 bbq((bb39 bb26));bbr bbe bb1259 bbq((bb39 bb26,bbe bb176));bbr bb8 bb1228 bbq((bb39 bb26, bb8 bb97,bbe bb1233));bbr bbe bb1269 bbq((bb39 bb26));bbr bb8 bb1244 bbq((bb39 bb26));bbr bbe bb1231 bbq((bb39 bb26));bbr bbe bb1234 bbq(( bb39 bb26));bbr bbh bbl*bb1220 bbq((bb39 bb26,bbe*bb1267));bbr bb25 bb1018 bbq((bb25 bb377,bbh bb34*bb42,bb9 bb22));bbr bb25 bb1206 bbq(( bb25 bb391,bbh bb34*bb42,bb9 bb22));bbr bbe bb1149 bbq((bb16 bb15,bbe bb126,bbh bbl*bb195,bbe bb196));bbr bbe bb1158 bbq((bb16 bb15,bbh bbl *bb195,bbe bb196));bbr bbe bb1124 bbq((bb16 bb15,bbe bb126,bbe bb590 ,bbe bb466,bbe bb967,bbe bb303,bbh bbl*bb195,bbe bb196));bbr bbe bb1122 bbq((bb16 bb15,bbe bb466,bbh bbl*bb195,bbe bb196));bbj bb390{ bbe bb460;};bbr bbh bbl*bb1209 bbq((bbe bb18));bbr bbe bb1214 bbq(( bb16 bb0));bbr bbh bb167*bb1204 bbq((bbb)); #ifdef __cplusplus } #endif bb40 bbh bb167 bb1540[256 ]={0x00000000L ,0x77073096L ,0xee0e612cL , 0x990951baL ,0x076dc419L ,0x706af48fL ,0xe963a535L ,0x9e6495a3L , 0x0edb8832L ,0x79dcb8a4L ,0xe0d5e91eL ,0x97d2d988L ,0x09b64c2bL , 0x7eb17cbdL ,0xe7b82d07L ,0x90bf1d91L ,0x1db71064L ,0x6ab020f2L , 0xf3b97148L ,0x84be41deL ,0x1adad47dL ,0x6ddde4ebL ,0xf4d4b551L , 0x83d385c7L ,0x136c9856L ,0x646ba8c0L ,0xfd62f97aL ,0x8a65c9ecL , 0x14015c4fL ,0x63066cd9L ,0xfa0f3d63L ,0x8d080df5L ,0x3b6e20c8L , 0x4c69105eL ,0xd56041e4L ,0xa2677172L ,0x3c03e4d1L ,0x4b04d447L , 0xd20d85fdL ,0xa50ab56bL ,0x35b5a8faL ,0x42b2986cL ,0xdbbbc9d6L , 0xacbcf940L ,0x32d86ce3L ,0x45df5c75L ,0xdcd60dcfL ,0xabd13d59L , 0x26d930acL ,0x51de003aL ,0xc8d75180L ,0xbfd06116L ,0x21b4f4b5L , 0x56b3c423L ,0xcfba9599L ,0xb8bda50fL ,0x2802b89eL ,0x5f058808L , 0xc60cd9b2L ,0xb10be924L ,0x2f6f7c87L ,0x58684c11L ,0xc1611dabL , 0xb6662d3dL ,0x76dc4190L ,0x01db7106L ,0x98d220bcL ,0xefd5102aL , 0x71b18589L ,0x06b6b51fL ,0x9fbfe4a5L ,0xe8b8d433L ,0x7807c9a2L , 0x0f00f934L ,0x9609a88eL ,0xe10e9818L ,0x7f6a0dbbL ,0x086d3d2dL , 0x91646c97L ,0xe6635c01L ,0x6b6b51f4L ,0x1c6c6162L ,0x856530d8L , 0xf262004eL ,0x6c0695edL ,0x1b01a57bL ,0x8208f4c1L ,0xf50fc457L , 0x65b0d9c6L ,0x12b7e950L ,0x8bbeb8eaL ,0xfcb9887cL ,0x62dd1ddfL , 0x15da2d49L ,0x8cd37cf3L ,0xfbd44c65L ,0x4db26158L ,0x3ab551ceL , 0xa3bc0074L ,0xd4bb30e2L ,0x4adfa541L ,0x3dd895d7L ,0xa4d1c46dL , 0xd3d6f4fbL ,0x4369e96aL ,0x346ed9fcL ,0xad678846L ,0xda60b8d0L , 0x44042d73L ,0x33031de5L ,0xaa0a4c5fL ,0xdd0d7cc9L ,0x5005713cL , 0x270241aaL ,0xbe0b1010L ,0xc90c2086L ,0x5768b525L ,0x206f85b3L , 0xb966d409L ,0xce61e49fL ,0x5edef90eL ,0x29d9c998L ,0xb0d09822L , 0xc7d7a8b4L ,0x59b33d17L ,0x2eb40d81L ,0xb7bd5c3bL ,0xc0ba6cadL , 0xedb88320L ,0x9abfb3b6L ,0x03b6e20cL ,0x74b1d29aL ,0xead54739L , 0x9dd277afL ,0x04db2615L ,0x73dc1683L ,0xe3630b12L ,0x94643b84L , 0x0d6d6a3eL ,0x7a6a5aa8L ,0xe40ecf0bL ,0x9309ff9dL ,0x0a00ae27L , 0x7d079eb1L ,0xf00f9344L ,0x8708a3d2L ,0x1e01f268L ,0x6906c2feL , 0xf762575dL ,0x806567cbL ,0x196c3671L ,0x6e6b06e7L ,0xfed41b76L , 0x89d32be0L ,0x10da7a5aL ,0x67dd4accL ,0xf9b9df6fL ,0x8ebeeff9L , 0x17b7be43L ,0x60b08ed5L ,0xd6d6a3e8L ,0xa1d1937eL ,0x38d8c2c4L , 0x4fdff252L ,0xd1bb67f1L ,0xa6bc5767L ,0x3fb506ddL ,0x48b2364bL , 0xd80d2bdaL ,0xaf0a1b4cL ,0x36034af6L ,0x41047a60L ,0xdf60efc3L , 0xa867df55L ,0x316e8eefL ,0x4669be79L ,0xcb61b38cL ,0xbc66831aL , 0x256fd2a0L ,0x5268e236L ,0xcc0c7795L ,0xbb0b4703L ,0x220216b9L , 0x5505262fL ,0xc5ba3bbeL ,0xb2bd0b28L ,0x2bb45a92L ,0x5cb36a04L , 0xc2d7ffa7L ,0xb5d0cf31L ,0x2cd99e8bL ,0x5bdeae1dL ,0x9b64c2b0L , 0xec63f226L ,0x756aa39cL ,0x026d930aL ,0x9c0906a9L ,0xeb0e363fL , 0x72076785L ,0x05005713L ,0x95bf4a82L ,0xe2b87a14L ,0x7bb12baeL , 0x0cb61b38L ,0x92d28e9bL ,0xe5d5be0dL ,0x7cdcefb7L ,0x0bdbdf21L , 0x86d3d2d4L ,0xf1d4e242L ,0x68ddb3f8L ,0x1fda836eL ,0x81be16cdL , 0xf6b9265bL ,0x6fb077e1L ,0x18b74777L ,0x88085ae6L ,0xff0f6a70L , 0x66063bcaL ,0x11010b5cL ,0x8f659effL ,0xf862ae69L ,0x616bffd3L , 0x166ccf45L ,0xa00ae278L ,0xd70dd2eeL ,0x4e048354L ,0x3903b3c2L , 0xa7672661L ,0xd06016f7L ,0x4969474dL ,0x3e6e77dbL ,0xaed16a4aL , 0xd9d65adcL ,0x40df0b66L ,0x37d83bf0L ,0xa9bcae53L ,0xdebb9ec5L , 0x47b2cf7fL ,0x30b5ffe9L ,0xbdbdf21cL ,0xcabac28aL ,0x53b39330L , 0x24b4a3a6L ,0xbad03605L ,0xcdd70693L ,0x54de5729L ,0x23d967bfL , 0xb3667a2eL ,0xc4614ab8L ,0x5d681b02L ,0x2a6f2b94L ,0xb40bbe37L , 0xc30c8ea1L ,0x5a05df1bL ,0x2d02ef8dL };bbh bb167*bb1204(){bb4(bbh bb167 * )bb1540;}bb25 bb1206(bb391,bb42,bb22)bb25 bb391;bbh bb34*bb42;bb9 bb22;{bbm(bb42==0 )bb4 0L ;bb391=bb391^0xffffffffL ;bb109(bb22>=8 ){bb391 =bb1540[((bbe)bb391^( *bb42++))&0xff ]^(bb391>>8 );;bb391=bb1540[((bbe)bb391 ^( *bb42++))&0xff ]^(bb391>>8 );;;bb391=bb1540[((bbe)bb391^( *bb42++))& 0xff ]^(bb391>>8 );;bb391=bb1540[((bbe)bb391^( *bb42++))&0xff ]^(bb391>> 8 );;;;bb391=bb1540[((bbe)bb391^( *bb42++))&0xff ]^(bb391>>8 );;bb391= bb1540[((bbe)bb391^( *bb42++))&0xff ]^(bb391>>8 );;;bb391=bb1540[((bbe)bb391 ^( *bb42++))&0xff ]^(bb391>>8 );;bb391=bb1540[((bbe)bb391^( *bb42++))& 0xff ]^(bb391>>8 );;;;;bb22-=8 ;}bbm(bb22)bb599{bb391=bb1540[((bbe)bb391 ^( *bb42++))&0xff ]^(bb391>>8 );;}bb109(--bb22);bb4 bb391^0xffffffffL ;}
gpl-2.0
androidarmv6/android_kernel_htc_msm7x27
drivers/usb/serial/mos7840.c
806
78452
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Clean ups from Moschip version and a few ioctl implementations by: * Paul B Schroeder <pschroeder "at" uplogix "dot" com> * * Originally based on drivers/usb/serial/io_edgeport.c which is: * Copyright (C) 2000 Inside Out Networks, All rights reserved. * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/serial.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/uaccess.h> /* * Version Information */ #define DRIVER_VERSION "1.3.2" #define DRIVER_DESC "Moschip 7840/7820 USB Serial Driver" /* * 16C50 UART register defines */ #define LCR_BITS_5 0x00 /* 5 bits/char */ #define LCR_BITS_6 0x01 /* 6 bits/char */ #define LCR_BITS_7 0x02 /* 7 bits/char */ #define LCR_BITS_8 0x03 /* 8 bits/char */ #define LCR_BITS_MASK 0x03 /* Mask for bits/char field */ #define LCR_STOP_1 0x00 /* 1 stop bit */ #define LCR_STOP_1_5 0x04 /* 1.5 stop bits (if 5 bits/char) */ #define LCR_STOP_2 0x04 /* 2 stop bits (if 6-8 bits/char) */ #define LCR_STOP_MASK 0x04 /* Mask for stop bits field */ #define LCR_PAR_NONE 0x00 /* No parity */ #define LCR_PAR_ODD 0x08 /* Odd parity */ #define LCR_PAR_EVEN 0x18 /* Even parity */ #define LCR_PAR_MARK 0x28 /* Force parity bit to 1 */ #define LCR_PAR_SPACE 0x38 /* Force parity bit to 0 */ #define LCR_PAR_MASK 0x38 /* Mask for parity field */ #define LCR_SET_BREAK 0x40 /* Set Break condition */ #define LCR_DL_ENABLE 0x80 /* Enable access to divisor latch */ #define MCR_DTR 0x01 /* Assert DTR */ #define MCR_RTS 0x02 /* Assert RTS */ #define MCR_OUT1 0x04 /* Loopback only: Sets state of RI */ #define MCR_MASTER_IE 0x08 /* Enable interrupt outputs */ #define MCR_LOOPBACK 0x10 /* Set internal (digital) loopback mode */ #define MCR_XON_ANY 0x20 /* Enable any char to exit XOFF mode */ #define MOS7840_MSR_CTS 0x10 /* Current state of CTS */ #define MOS7840_MSR_DSR 0x20 /* Current state of DSR */ #define MOS7840_MSR_RI 0x40 /* Current state of RI */ #define MOS7840_MSR_CD 0x80 /* Current state of CD */ /* * Defines used for sending commands to port */ #define WAIT_FOR_EVER (HZ * 0) /* timeout urb is wait for ever */ #define MOS_WDR_TIMEOUT (HZ * 5) /* default urb timeout */ #define MOS_PORT1 0x0200 #define MOS_PORT2 0x0300 #define MOS_VENREG 0x0000 #define MOS_MAX_PORT 0x02 #define MOS_WRITE 0x0E #define MOS_READ 0x0D /* Requests */ #define MCS_RD_RTYPE 0xC0 #define MCS_WR_RTYPE 0x40 #define MCS_RDREQ 0x0D #define MCS_WRREQ 0x0E #define MCS_CTRL_TIMEOUT 500 #define VENDOR_READ_LENGTH (0x01) #define MAX_NAME_LEN 64 #define ZLP_REG1 0x3A /* Zero_Flag_Reg1 58 */ #define ZLP_REG5 0x3E /* Zero_Flag_Reg5 62 */ /* For higher baud Rates use TIOCEXBAUD */ #define TIOCEXBAUD 0x5462 /* vendor id and device id defines */ /* The native mos7840/7820 component */ #define USB_VENDOR_ID_MOSCHIP 0x9710 #define MOSCHIP_DEVICE_ID_7840 0x7840 #define MOSCHIP_DEVICE_ID_7820 0x7820 /* The native component can have its vendor/device id's overridden * in vendor-specific implementations. Such devices can be handled * by making a change here, in moschip_port_id_table, and in * moschip_id_table_combined */ #define USB_VENDOR_ID_BANDB 0x0856 #define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 #define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00 #define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 #define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01 #define BANDB_DEVICE_ID_US9ML2_2 0xAC29 #define BANDB_DEVICE_ID_US9ML2_4 0xAC30 #define BANDB_DEVICE_ID_USPTL4_2 0xAC31 #define BANDB_DEVICE_ID_USPTL4_4 0xAC32 #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 #define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02 #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 #define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03 #define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 /* This driver also supports * ATEN UC2324 device using Moschip MCS7840 * ATEN UC2322 device using Moschip MCS7820 */ #define USB_VENDOR_ID_ATENINTL 0x0557 #define ATENINTL_DEVICE_ID_UC2324 0x2011 #define ATENINTL_DEVICE_ID_UC2322 0x7820 /* Interrupt Routine Defines */ #define SERIAL_IIR_RLS 0x06 #define SERIAL_IIR_MS 0x00 /* * Emulation of the bit mask on the LINE STATUS REGISTER. */ #define SERIAL_LSR_DR 0x0001 #define SERIAL_LSR_OE 0x0002 #define SERIAL_LSR_PE 0x0004 #define SERIAL_LSR_FE 0x0008 #define SERIAL_LSR_BI 0x0010 #define MOS_MSR_DELTA_CTS 0x10 #define MOS_MSR_DELTA_DSR 0x20 #define MOS_MSR_DELTA_RI 0x40 #define MOS_MSR_DELTA_CD 0x80 /* Serial Port register Address */ #define INTERRUPT_ENABLE_REGISTER ((__u16)(0x01)) #define FIFO_CONTROL_REGISTER ((__u16)(0x02)) #define LINE_CONTROL_REGISTER ((__u16)(0x03)) #define MODEM_CONTROL_REGISTER ((__u16)(0x04)) #define LINE_STATUS_REGISTER ((__u16)(0x05)) #define MODEM_STATUS_REGISTER ((__u16)(0x06)) #define SCRATCH_PAD_REGISTER ((__u16)(0x07)) #define DIVISOR_LATCH_LSB ((__u16)(0x00)) #define DIVISOR_LATCH_MSB ((__u16)(0x01)) #define CLK_MULTI_REGISTER ((__u16)(0x02)) #define CLK_START_VALUE_REGISTER ((__u16)(0x03)) #define SERIAL_LCR_DLAB ((__u16)(0x0080)) /* * URB POOL related defines */ #define NUM_URBS 16 /* URB Count */ #define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */ static const struct usb_device_id moschip_port_id_table[] = { {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, {} /* terminating entry */ }; static const struct usb_device_id moschip_id_table_combined[] __devinitconst = { {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, {} /* terminating entry */ }; MODULE_DEVICE_TABLE(usb, moschip_id_table_combined); /* This structure holds all of the local port information */ struct moschip_port { int port_num; /*Actual port number in the device(1,2,etc) */ struct urb *write_urb; /* write URB for this port */ struct urb *read_urb; /* read URB for this port */ struct urb *int_urb; __u8 shadowLCR; /* last LCR value received */ __u8 shadowMCR; /* last MCR value received */ char open; char open_ports; char zombie; wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ int delta_msr_cond; struct async_icount icount; struct usb_serial_port *port; /* loop back to the owner of this object */ /* Offsets */ __u8 SpRegOffset; __u8 ControlRegOffset; __u8 DcrRegOffset; /* for processing control URBS in interrupt context */ struct urb *control_urb; struct usb_ctrlrequest *dr; char *ctrl_buf; int MsrLsr; spinlock_t pool_lock; struct urb *write_urb_pool[NUM_URBS]; char busy[NUM_URBS]; bool read_urb_busy; }; static int debug; /* * mos7840_set_reg_sync * To set the Control register by calling usb_fill_control_urb function * by passing usb_sndctrlpipe function as parameter. */ static int mos7840_set_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 val) { struct usb_device *dev = port->serial->dev; val = val & 0x00ff; dbg("mos7840_set_reg_sync offset is %x, value %x", reg, val); return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ, MCS_WR_RTYPE, val, reg, NULL, 0, MOS_WDR_TIMEOUT); } /* * mos7840_get_reg_sync * To set the Uart register by calling usb_fill_control_urb function by * passing usb_rcvctrlpipe function as parameter. */ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg, __u16 *val) { struct usb_device *dev = port->serial->dev; int ret = 0; u8 *buf; buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL); if (!buf) return -ENOMEM; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); *val = buf[0]; dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val); kfree(buf); return ret; } /* * mos7840_set_uart_reg * To set the Uart register by calling usb_fill_control_urb function by * passing usb_sndctrlpipe function as parameter. */ static int mos7840_set_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 val) { struct usb_device *dev = port->serial->dev; val = val & 0x00ff; /* For the UART control registers, the application number need to be Or'ed */ if (port->serial->num_ports == 4) { val |= (((__u16) port->number - (__u16) (port->serial->minor)) + 1) << 8; dbg("mos7840_set_uart_reg application number is %x", val); } else { if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) { val |= (((__u16) port->number - (__u16) (port->serial->minor)) + 1) << 8; dbg("mos7840_set_uart_reg application number is %x", val); } else { val |= (((__u16) port->number - (__u16) (port->serial->minor)) + 2) << 8; dbg("mos7840_set_uart_reg application number is %x", val); } } return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ, MCS_WR_RTYPE, val, reg, NULL, 0, MOS_WDR_TIMEOUT); } /* * mos7840_get_uart_reg * To set the Control register by calling usb_fill_control_urb function * by passing usb_rcvctrlpipe function as parameter. */ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg, __u16 *val) { struct usb_device *dev = port->serial->dev; int ret = 0; __u16 Wval; u8 *buf; buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL); if (!buf) return -ENOMEM; /* dbg("application number is %4x", (((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */ /* Wval is same as application number */ if (port->serial->num_ports == 4) { Wval = (((__u16) port->number - (__u16) (port->serial->minor)) + 1) << 8; dbg("mos7840_get_uart_reg application number is %x", Wval); } else { if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) { Wval = (((__u16) port->number - (__u16) (port->serial->minor)) + 1) << 8; dbg("mos7840_get_uart_reg application number is %x", Wval); } else { Wval = (((__u16) port->number - (__u16) (port->serial->minor)) + 2) << 8; dbg("mos7840_get_uart_reg application number is %x", Wval); } } ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); *val = buf[0]; kfree(buf); return ret; } static void mos7840_dump_serial_port(struct moschip_port *mos7840_port) { dbg("***************************************"); dbg("SpRegOffset is %2x", mos7840_port->SpRegOffset); dbg("ControlRegOffset is %2x", mos7840_port->ControlRegOffset); dbg("DCRRegOffset is %2x", mos7840_port->DcrRegOffset); dbg("***************************************"); } /************************************************************************/ /************************************************************************/ /* I N T E R F A C E F U N C T I O N S */ /* I N T E R F A C E F U N C T I O N S */ /************************************************************************/ /************************************************************************/ static inline void mos7840_set_port_private(struct usb_serial_port *port, struct moschip_port *data) { usb_set_serial_port_data(port, (void *)data); } static inline struct moschip_port *mos7840_get_port_private(struct usb_serial_port *port) { return (struct moschip_port *)usb_get_serial_port_data(port); } static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr) { struct moschip_port *mos7840_port; struct async_icount *icount; mos7840_port = port; icount = &mos7840_port->icount; if (new_msr & (MOS_MSR_DELTA_CTS | MOS_MSR_DELTA_DSR | MOS_MSR_DELTA_RI | MOS_MSR_DELTA_CD)) { icount = &mos7840_port->icount; /* update input line counters */ if (new_msr & MOS_MSR_DELTA_CTS) { icount->cts++; smp_wmb(); } if (new_msr & MOS_MSR_DELTA_DSR) { icount->dsr++; smp_wmb(); } if (new_msr & MOS_MSR_DELTA_CD) { icount->dcd++; smp_wmb(); } if (new_msr & MOS_MSR_DELTA_RI) { icount->rng++; smp_wmb(); } } } static void mos7840_handle_new_lsr(struct moschip_port *port, __u8 new_lsr) { struct async_icount *icount; dbg("%s - %02x", __func__, new_lsr); if (new_lsr & SERIAL_LSR_BI) { /* * Parity and Framing errors only count if they * occur exclusive of a break being * received. */ new_lsr &= (__u8) (SERIAL_LSR_OE | SERIAL_LSR_BI); } /* update input line counters */ icount = &port->icount; if (new_lsr & SERIAL_LSR_BI) { icount->brk++; smp_wmb(); } if (new_lsr & SERIAL_LSR_OE) { icount->overrun++; smp_wmb(); } if (new_lsr & SERIAL_LSR_PE) { icount->parity++; smp_wmb(); } if (new_lsr & SERIAL_LSR_FE) { icount->frame++; smp_wmb(); } } /************************************************************************/ /************************************************************************/ /* U S B C A L L B A C K F U N C T I O N S */ /* U S B C A L L B A C K F U N C T I O N S */ /************************************************************************/ /************************************************************************/ static void mos7840_control_callback(struct urb *urb) { unsigned char *data; struct moschip_port *mos7840_port; __u8 regval = 0x0; int result = 0; int status = urb->status; mos7840_port = urb->context; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, status); return; default: dbg("%s - nonzero urb status received: %d", __func__, status); goto exit; } dbg("%s urb buffer size is %d", __func__, urb->actual_length); dbg("%s mos7840_port->MsrLsr is %d port %d", __func__, mos7840_port->MsrLsr, mos7840_port->port_num); data = urb->transfer_buffer; regval = (__u8) data[0]; dbg("%s data is %x", __func__, regval); if (mos7840_port->MsrLsr == 0) mos7840_handle_new_msr(mos7840_port, regval); else if (mos7840_port->MsrLsr == 1) mos7840_handle_new_lsr(mos7840_port, regval); exit: spin_lock(&mos7840_port->pool_lock); if (!mos7840_port->zombie) result = usb_submit_urb(mos7840_port->int_urb, GFP_ATOMIC); spin_unlock(&mos7840_port->pool_lock); if (result) { dev_err(&urb->dev->dev, "%s - Error %d submitting interrupt urb\n", __func__, result); } } static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, __u16 *val) { struct usb_device *dev = mcs->port->serial->dev; struct usb_ctrlrequest *dr = mcs->dr; unsigned char *buffer = mcs->ctrl_buf; int ret; dr->bRequestType = MCS_RD_RTYPE; dr->bRequest = MCS_RDREQ; dr->wValue = cpu_to_le16(Wval); /* 0 */ dr->wIndex = cpu_to_le16(reg); dr->wLength = cpu_to_le16(2); usb_fill_control_urb(mcs->control_urb, dev, usb_rcvctrlpipe(dev, 0), (unsigned char *)dr, buffer, 2, mos7840_control_callback, mcs); mcs->control_urb->transfer_buffer_length = 2; ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC); return ret; } /***************************************************************************** * mos7840_interrupt_callback * this is the callback function for when we have received data on the * interrupt endpoint. *****************************************************************************/ static void mos7840_interrupt_callback(struct urb *urb) { int result; int length; struct moschip_port *mos7840_port; struct usb_serial *serial; __u16 Data; unsigned char *data; __u8 sp[5], st; int i, rv = 0; __u16 wval, wreg = 0; int status = urb->status; dbg("%s", " : Entering"); switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, status); return; default: dbg("%s - nonzero urb status received: %d", __func__, status); goto exit; } length = urb->actual_length; data = urb->transfer_buffer; serial = urb->context; /* Moschip get 5 bytes * Byte 1 IIR Port 1 (port.number is 0) * Byte 2 IIR Port 2 (port.number is 1) * Byte 3 IIR Port 3 (port.number is 2) * Byte 4 IIR Port 4 (port.number is 3) * Byte 5 FIFO status for both */ if (length && length > 5) { dbg("%s", "Wrong data !!!"); return; } sp[0] = (__u8) data[0]; sp[1] = (__u8) data[1]; sp[2] = (__u8) data[2]; sp[3] = (__u8) data[3]; st = (__u8) data[4]; for (i = 0; i < serial->num_ports; i++) { mos7840_port = mos7840_get_port_private(serial->port[i]); wval = (((__u16) serial->port[i]->number - (__u16) (serial->minor)) + 1) << 8; if (mos7840_port->open) { if (sp[i] & 0x01) { dbg("SP%d No Interrupt !!!", i); } else { switch (sp[i] & 0x0f) { case SERIAL_IIR_RLS: dbg("Serial Port %d: Receiver status error or ", i); dbg("address bit detected in 9-bit mode"); mos7840_port->MsrLsr = 1; wreg = LINE_STATUS_REGISTER; break; case SERIAL_IIR_MS: dbg("Serial Port %d: Modem status change", i); mos7840_port->MsrLsr = 0; wreg = MODEM_STATUS_REGISTER; break; } spin_lock(&mos7840_port->pool_lock); if (!mos7840_port->zombie) { rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data); } else { spin_unlock(&mos7840_port->pool_lock); return; } spin_unlock(&mos7840_port->pool_lock); } } } if (!(rv < 0)) /* the completion handler for the control urb will resubmit */ return; exit: result = usb_submit_urb(urb, GFP_ATOMIC); if (result) { dev_err(&urb->dev->dev, "%s - Error %d submitting interrupt urb\n", __func__, result); } } static int mos7840_port_paranoia_check(struct usb_serial_port *port, const char *function) { if (!port) { dbg("%s - port == NULL", function); return -1; } if (!port->serial) { dbg("%s - port->serial == NULL", function); return -1; } return 0; } /* Inline functions to check the sanity of a pointer that is passed to us */ static int mos7840_serial_paranoia_check(struct usb_serial *serial, const char *function) { if (!serial) { dbg("%s - serial == NULL", function); return -1; } if (!serial->type) { dbg("%s - serial->type == NULL!", function); return -1; } return 0; } static struct usb_serial *mos7840_get_usb_serial(struct usb_serial_port *port, const char *function) { /* if no port was specified, or it fails a paranoia check */ if (!port || mos7840_port_paranoia_check(port, function) || mos7840_serial_paranoia_check(port->serial, function)) { /* then say that we don't have a valid usb_serial thing, * which will end up genrating -ENODEV return values */ return NULL; } return port->serial; } /***************************************************************************** * mos7840_bulk_in_callback * this is the callback function for when we have received data on the * bulk in endpoint. *****************************************************************************/ static void mos7840_bulk_in_callback(struct urb *urb) { int retval; unsigned char *data; struct usb_serial *serial; struct usb_serial_port *port; struct moschip_port *mos7840_port; struct tty_struct *tty; int status = urb->status; mos7840_port = urb->context; if (!mos7840_port) { dbg("%s", "NULL mos7840_port pointer"); return; } if (status) { dbg("nonzero read bulk status received: %d", status); mos7840_port->read_urb_busy = false; return; } port = (struct usb_serial_port *)mos7840_port->port; if (mos7840_port_paranoia_check(port, __func__)) { dbg("%s", "Port Paranoia failed"); mos7840_port->read_urb_busy = false; return; } serial = mos7840_get_usb_serial(port, __func__); if (!serial) { dbg("%s", "Bad serial pointer"); mos7840_port->read_urb_busy = false; return; } dbg("%s", "Entering... "); data = urb->transfer_buffer; dbg("%s", "Entering ..........."); if (urb->actual_length) { tty = tty_port_tty_get(&mos7840_port->port->port); if (tty) { tty_insert_flip_string(tty, data, urb->actual_length); dbg(" %s ", data); tty_flip_buffer_push(tty); tty_kref_put(tty); } mos7840_port->icount.rx += urb->actual_length; smp_wmb(); dbg("mos7840_port->icount.rx is %d:", mos7840_port->icount.rx); } if (!mos7840_port->read_urb) { dbg("%s", "URB KILLED !!!"); mos7840_port->read_urb_busy = false; return; } mos7840_port->read_urb->dev = serial->dev; mos7840_port->read_urb_busy = true; retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC); if (retval) { dbg("usb_submit_urb(read bulk) failed, retval = %d", retval); mos7840_port->read_urb_busy = false; } } /***************************************************************************** * mos7840_bulk_out_data_callback * this is the callback function for when we have finished sending * serial data on the bulk out endpoint. *****************************************************************************/ static void mos7840_bulk_out_data_callback(struct urb *urb) { struct moschip_port *mos7840_port; struct tty_struct *tty; int status = urb->status; int i; mos7840_port = urb->context; spin_lock(&mos7840_port->pool_lock); for (i = 0; i < NUM_URBS; i++) { if (urb == mos7840_port->write_urb_pool[i]) { mos7840_port->busy[i] = 0; break; } } spin_unlock(&mos7840_port->pool_lock); if (status) { dbg("nonzero write bulk status received:%d", status); return; } if (mos7840_port_paranoia_check(mos7840_port->port, __func__)) { dbg("%s", "Port Paranoia failed"); return; } dbg("%s", "Entering ........."); tty = tty_port_tty_get(&mos7840_port->port->port); if (tty && mos7840_port->open) tty_wakeup(tty); tty_kref_put(tty); } /************************************************************************/ /* D R I V E R T T Y I N T E R F A C E F U N C T I O N S */ /************************************************************************/ #ifdef MCSSerialProbe static int mos7840_serial_probe(struct usb_serial *serial, const struct usb_device_id *id) { /*need to implement the mode_reg reading and updating\ structures usb_serial_ device_type\ (i.e num_ports, num_bulkin,bulkout etc) */ /* Also we can update the changes attach */ return 1; } #endif /***************************************************************************** * mos7840_open * this function is called by the tty driver when a port is opened * If successful, we return 0 * Otherwise we return a negative error number. *****************************************************************************/ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) { int response; int j; struct usb_serial *serial; struct urb *urb; __u16 Data; int status; struct moschip_port *mos7840_port; struct moschip_port *port0; dbg ("%s enter", __func__); if (mos7840_port_paranoia_check(port, __func__)) { dbg("%s", "Port Paranoia failed"); return -ENODEV; } serial = port->serial; if (mos7840_serial_paranoia_check(serial, __func__)) { dbg("%s", "Serial Paranoia failed"); return -ENODEV; } mos7840_port = mos7840_get_port_private(port); port0 = mos7840_get_port_private(serial->port[0]); if (mos7840_port == NULL || port0 == NULL) return -ENODEV; usb_clear_halt(serial->dev, port->write_urb->pipe); usb_clear_halt(serial->dev, port->read_urb->pipe); port0->open_ports++; /* Initialising the write urb pool */ for (j = 0; j < NUM_URBS; ++j) { urb = usb_alloc_urb(0, GFP_KERNEL); mos7840_port->write_urb_pool[j] = urb; if (urb == NULL) { dev_err(&port->dev, "No more urbs???\n"); continue; } urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); if (!urb->transfer_buffer) { usb_free_urb(urb); mos7840_port->write_urb_pool[j] = NULL; dev_err(&port->dev, "%s-out of memory for urb buffers.\n", __func__); continue; } } /***************************************************************************** * Initialize MCS7840 -- Write Init values to corresponding Registers * * Register Index * 1 : IER * 2 : FCR * 3 : LCR * 4 : MCR * * 0x08 : SP1/2 Control Reg *****************************************************************************/ /* NEED to check the following Block */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); if (status < 0) { dbg("Reading Spreg failed"); return -1; } Data |= 0x80; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); if (status < 0) { dbg("writing Spreg failed"); return -1; } Data &= ~0x80; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); if (status < 0) { dbg("writing Spreg failed"); return -1; } /* End of block to be checked */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset, &Data); if (status < 0) { dbg("Reading Controlreg failed"); return -1; } Data |= 0x08; /* Driver done bit */ Data |= 0x20; /* rx_disable */ status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset, Data); if (status < 0) { dbg("writing Controlreg failed"); return -1; } /* do register settings here */ /* Set all regs to the device default values. */ /*********************************** * First Disable all interrupts. ***********************************/ Data = 0x00; status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); if (status < 0) { dbg("disabling interrupts failed"); return -1; } /* Set FIFO_CONTROL_REGISTER to the default value */ Data = 0x00; status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); if (status < 0) { dbg("Writing FIFO_CONTROL_REGISTER failed"); return -1; } Data = 0xcf; status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); if (status < 0) { dbg("Writing FIFO_CONTROL_REGISTER failed"); return -1; } Data = 0x03; status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); mos7840_port->shadowLCR = Data; Data = 0x0b; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); mos7840_port->shadowMCR = Data; Data = 0x00; status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); mos7840_port->shadowLCR = Data; Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */ status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); Data = 0x0c; status = mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data); Data = 0x0; status = mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data); Data = 0x00; status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); Data = Data & ~SERIAL_LCR_DLAB; status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); mos7840_port->shadowLCR = Data; /* clearing Bulkin and Bulkout Fifo */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); Data = Data | 0x0c; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); Data = Data & ~0x0c; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); /* Finally enable all interrupts */ Data = 0x0c; status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); /* clearing rx_disable */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset, &Data); Data = Data & ~0x20; status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset, Data); /* rx_negate */ Data = 0x0; status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset, &Data); Data = Data | 0x10; status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset, Data); /* Check to see if we've set up our endpoint info yet * * (can't set it up in mos7840_startup as the structures * * were not set up at that time.) */ if (port0->open_ports == 1) { if (serial->port[0]->interrupt_in_buffer == NULL) { /* set up interrupt urb */ usb_fill_int_urb(serial->port[0]->interrupt_in_urb, serial->dev, usb_rcvintpipe(serial->dev, serial->port[0]->interrupt_in_endpointAddress), serial->port[0]->interrupt_in_buffer, serial->port[0]->interrupt_in_urb-> transfer_buffer_length, mos7840_interrupt_callback, serial, serial->port[0]->interrupt_in_urb->interval); /* start interrupt read for mos7840 * * will continue as long as mos7840 is connected */ response = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); if (response) { dev_err(&port->dev, "%s - Error %d submitting " "interrupt urb\n", __func__, response); } } } /* see if we've set up our endpoint info yet * * (can't set it up in mos7840_startup as the * * structures were not set up at that time.) */ dbg("port number is %d", port->number); dbg("serial number is %d", port->serial->minor); dbg("Bulkin endpoint is %d", port->bulk_in_endpointAddress); dbg("BulkOut endpoint is %d", port->bulk_out_endpointAddress); dbg("Interrupt endpoint is %d", port->interrupt_in_endpointAddress); dbg("port's number in the device is %d", mos7840_port->port_num); mos7840_port->read_urb = port->read_urb; /* set up our bulk in urb */ usb_fill_bulk_urb(mos7840_port->read_urb, serial->dev, usb_rcvbulkpipe(serial->dev, port->bulk_in_endpointAddress), port->bulk_in_buffer, mos7840_port->read_urb->transfer_buffer_length, mos7840_bulk_in_callback, mos7840_port); dbg("mos7840_open: bulkin endpoint is %d", port->bulk_in_endpointAddress); mos7840_port->read_urb_busy = true; response = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL); if (response) { dev_err(&port->dev, "%s - Error %d submitting control urb\n", __func__, response); mos7840_port->read_urb_busy = false; } /* initialize our wait queues */ init_waitqueue_head(&mos7840_port->wait_chase); init_waitqueue_head(&mos7840_port->delta_msr_wait); /* initialize our icount structure */ memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount)); /* initialize our port settings */ /* Must set to enable ints! */ mos7840_port->shadowMCR = MCR_MASTER_IE; /* send a open port command */ mos7840_port->open = 1; /* mos7840_change_port_settings(mos7840_port,old_termios); */ mos7840_port->icount.tx = 0; mos7840_port->icount.rx = 0; dbg("usb_serial serial:%p mos7840_port:%p\n usb_serial_port port:%p", serial, mos7840_port, port); dbg ("%s leave", __func__); return 0; } /***************************************************************************** * mos7840_chars_in_buffer * this function is called by the tty driver when it wants to know how many * bytes of data we currently have outstanding in the port (data that has * been written, but hasn't made it out the port yet) * If successful, we return the number of bytes left to be written in the * system, * Otherwise we return zero. *****************************************************************************/ static int mos7840_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; int i; int chars = 0; unsigned long flags; struct moschip_port *mos7840_port; dbg("%s", " mos7840_chars_in_buffer:entering ..........."); if (mos7840_port_paranoia_check(port, __func__)) { dbg("%s", "Invalid port"); return 0; } mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) { dbg("%s", "mos7840_break:leaving ..........."); return 0; } spin_lock_irqsave(&mos7840_port->pool_lock, flags); for (i = 0; i < NUM_URBS; ++i) if (mos7840_port->busy[i]) chars += URB_TRANSFER_BUFFER_SIZE; spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); dbg("%s - returns %d", __func__, chars); return chars; } /***************************************************************************** * mos7840_close * this function is called by the tty driver when a port is closed *****************************************************************************/ static void mos7840_close(struct usb_serial_port *port) { struct usb_serial *serial; struct moschip_port *mos7840_port; struct moschip_port *port0; int j; __u16 Data; dbg("%s", "mos7840_close:entering..."); if (mos7840_port_paranoia_check(port, __func__)) { dbg("%s", "Port Paranoia failed"); return; } serial = mos7840_get_usb_serial(port, __func__); if (!serial) { dbg("%s", "Serial Paranoia failed"); return; } mos7840_port = mos7840_get_port_private(port); port0 = mos7840_get_port_private(serial->port[0]); if (mos7840_port == NULL || port0 == NULL) return; for (j = 0; j < NUM_URBS; ++j) usb_kill_urb(mos7840_port->write_urb_pool[j]); /* Freeing Write URBs */ for (j = 0; j < NUM_URBS; ++j) { if (mos7840_port->write_urb_pool[j]) { if (mos7840_port->write_urb_pool[j]->transfer_buffer) kfree(mos7840_port->write_urb_pool[j]-> transfer_buffer); usb_free_urb(mos7840_port->write_urb_pool[j]); } } /* While closing port, shutdown all bulk read, write * * and interrupt read if they exists */ if (serial->dev) { if (mos7840_port->write_urb) { dbg("%s", "Shutdown bulk write"); usb_kill_urb(mos7840_port->write_urb); } if (mos7840_port->read_urb) { dbg("%s", "Shutdown bulk read"); usb_kill_urb(mos7840_port->read_urb); mos7840_port->read_urb_busy = false; } if ((&mos7840_port->control_urb)) { dbg("%s", "Shutdown control read"); /*/ usb_kill_urb (mos7840_port->control_urb); */ } } /* if(mos7840_port->ctrl_buf != NULL) */ /* kfree(mos7840_port->ctrl_buf); */ port0->open_ports--; dbg("mos7840_num_open_ports in close%d:in port%d", port0->open_ports, port->number); if (port0->open_ports == 0) { if (serial->port[0]->interrupt_in_urb) { dbg("%s", "Shutdown interrupt_in_urb"); usb_kill_urb(serial->port[0]->interrupt_in_urb); } } if (mos7840_port->write_urb) { /* if this urb had a transfer buffer already (old tx) free it */ if (mos7840_port->write_urb->transfer_buffer != NULL) kfree(mos7840_port->write_urb->transfer_buffer); usb_free_urb(mos7840_port->write_urb); } Data = 0x0; mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); Data = 0x00; mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); mos7840_port->open = 0; dbg("%s", "Leaving ............"); } /************************************************************************ * * mos7840_block_until_chase_response * * This function will block the close until one of the following: * 1. Response to our Chase comes from mos7840 * 2. A timeout of 10 seconds without activity has expired * (1K of mos7840 data @ 2400 baud ==> 4 sec to empty) * ************************************************************************/ static void mos7840_block_until_chase_response(struct tty_struct *tty, struct moschip_port *mos7840_port) { int timeout = 1 * HZ; int wait = 10; int count; while (1) { count = mos7840_chars_in_buffer(tty); /* Check for Buffer status */ if (count <= 0) return; /* Block the thread for a while */ interruptible_sleep_on_timeout(&mos7840_port->wait_chase, timeout); /* No activity.. count down section */ wait--; if (wait == 0) { dbg("%s - TIMEOUT", __func__); return; } else { /* Reset timeout value back to seconds */ wait = 10; } } } /***************************************************************************** * mos7840_break * this function sends a break to the port *****************************************************************************/ static void mos7840_break(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; unsigned char data; struct usb_serial *serial; struct moschip_port *mos7840_port; dbg("%s", "Entering ..........."); dbg("mos7840_break: Start"); if (mos7840_port_paranoia_check(port, __func__)) { dbg("%s", "Port Paranoia failed"); return; } serial = mos7840_get_usb_serial(port, __func__); if (!serial) { dbg("%s", "Serial Paranoia failed"); return; } mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return; if (serial->dev) /* flush and block until tx is empty */ mos7840_block_until_chase_response(tty, mos7840_port); if (break_state == -1) data = mos7840_port->shadowLCR | LCR_SET_BREAK; else data = mos7840_port->shadowLCR & ~LCR_SET_BREAK; /* FIXME: no locking on shadowLCR anywhere in driver */ mos7840_port->shadowLCR = data; dbg("mcs7840_break mos7840_port->shadowLCR is %x", mos7840_port->shadowLCR); mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, mos7840_port->shadowLCR); return; } /***************************************************************************** * mos7840_write_room * this function is called by the tty driver when it wants to know how many * bytes of data we can accept for a specific port. * If successful, we return the amount of room that we have for this port * Otherwise we return a negative error number. *****************************************************************************/ static int mos7840_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; int i; int room = 0; unsigned long flags; struct moschip_port *mos7840_port; dbg("%s", " mos7840_write_room:entering ..........."); if (mos7840_port_paranoia_check(port, __func__)) { dbg("%s", "Invalid port"); dbg("%s", " mos7840_write_room:leaving ..........."); return -1; } mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) { dbg("%s", "mos7840_break:leaving ..........."); return -1; } spin_lock_irqsave(&mos7840_port->pool_lock, flags); for (i = 0; i < NUM_URBS; ++i) { if (!mos7840_port->busy[i]) room += URB_TRANSFER_BUFFER_SIZE; } spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); room = (room == 0) ? 0 : room - URB_TRANSFER_BUFFER_SIZE + 1; dbg("%s - returns %d", __func__, room); return room; } /***************************************************************************** * mos7840_write * this function is called by the tty driver when data should be written to * the port. * If successful, we return the number of bytes written, otherwise we * return a negative error number. *****************************************************************************/ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count) { int status; int i; int bytes_sent = 0; int transfer_size; unsigned long flags; struct moschip_port *mos7840_port; struct usb_serial *serial; struct urb *urb; /* __u16 Data; */ const unsigned char *current_position = data; unsigned char *data1; dbg("%s", "entering ..........."); /* dbg("mos7840_write: mos7840_port->shadowLCR is %x", mos7840_port->shadowLCR); */ #ifdef NOTMOS7840 Data = 0x00; status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); mos7840_port->shadowLCR = Data; dbg("mos7840_write: LINE_CONTROL_REGISTER is %x", Data); dbg("mos7840_write: mos7840_port->shadowLCR is %x", mos7840_port->shadowLCR); /* Data = 0x03; */ /* status = mos7840_set_uart_reg(port,LINE_CONTROL_REGISTER,Data); */ /* mos7840_port->shadowLCR=Data;//Need to add later */ Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */ status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); /* Data = 0x0c; */ /* status = mos7840_set_uart_reg(port,DIVISOR_LATCH_LSB,Data); */ Data = 0x00; status = mos7840_get_uart_reg(port, DIVISOR_LATCH_LSB, &Data); dbg("mos7840_write:DLL value is %x", Data); Data = 0x0; status = mos7840_get_uart_reg(port, DIVISOR_LATCH_MSB, &Data); dbg("mos7840_write:DLM value is %x", Data); Data = Data & ~SERIAL_LCR_DLAB; dbg("mos7840_write: mos7840_port->shadowLCR is %x", mos7840_port->shadowLCR); status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); #endif if (mos7840_port_paranoia_check(port, __func__)) { dbg("%s", "Port Paranoia failed"); return -1; } serial = port->serial; if (mos7840_serial_paranoia_check(serial, __func__)) { dbg("%s", "Serial Paranoia failed"); return -1; } mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) { dbg("%s", "mos7840_port is NULL"); return -1; } /* try to find a free urb in the list */ urb = NULL; spin_lock_irqsave(&mos7840_port->pool_lock, flags); for (i = 0; i < NUM_URBS; ++i) { if (!mos7840_port->busy[i]) { mos7840_port->busy[i] = 1; urb = mos7840_port->write_urb_pool[i]; dbg("URB:%d", i); break; } } spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); if (urb == NULL) { dbg("%s - no more free urbs", __func__); goto exit; } if (urb->transfer_buffer == NULL) { urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); if (urb->transfer_buffer == NULL) { dev_err(&port->dev, "%s no more kernel memory...\n", __func__); goto exit; } } transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); memcpy(urb->transfer_buffer, current_position, transfer_size); /* fill urb with data and submit */ usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress), urb->transfer_buffer, transfer_size, mos7840_bulk_out_data_callback, mos7840_port); data1 = urb->transfer_buffer; dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress); /* send it down the pipe */ status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { mos7840_port->busy[i] = 0; dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed " "with status = %d\n", __func__, status); bytes_sent = status; goto exit; } bytes_sent = transfer_size; mos7840_port->icount.tx += transfer_size; smp_wmb(); dbg("mos7840_port->icount.tx is %d:", mos7840_port->icount.tx); exit: return bytes_sent; } /***************************************************************************** * mos7840_throttle * this function is called by the tty driver when it wants to stop the data * being read from the port. *****************************************************************************/ static void mos7840_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct moschip_port *mos7840_port; int status; if (mos7840_port_paranoia_check(port, __func__)) { dbg("%s", "Invalid port"); return; } dbg("- port %d", port->number); mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return; if (!mos7840_port->open) { dbg("%s", "port not opened"); return; } dbg("%s", "Entering .........."); /* if we are implementing XON/XOFF, send the stop character */ if (I_IXOFF(tty)) { unsigned char stop_char = STOP_CHAR(tty); status = mos7840_write(tty, port, &stop_char, 1); if (status <= 0) return; } /* if we are implementing RTS/CTS, toggle that line */ if (tty->termios->c_cflag & CRTSCTS) { mos7840_port->shadowMCR &= ~MCR_RTS; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mos7840_port->shadowMCR); if (status < 0) return; } return; } /***************************************************************************** * mos7840_unthrottle * this function is called by the tty driver when it wants to resume * the data being read from the port (called after mos7840_throttle is * called) *****************************************************************************/ static void mos7840_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; int status; struct moschip_port *mos7840_port = mos7840_get_port_private(port); if (mos7840_port_paranoia_check(port, __func__)) { dbg("%s", "Invalid port"); return; } if (mos7840_port == NULL) return; if (!mos7840_port->open) { dbg("%s - port not opened", __func__); return; } dbg("%s", "Entering .........."); /* if we are implementing XON/XOFF, send the start character */ if (I_IXOFF(tty)) { unsigned char start_char = START_CHAR(tty); status = mos7840_write(tty, port, &start_char, 1); if (status <= 0) return; } /* if we are implementing RTS/CTS, toggle that line */ if (tty->termios->c_cflag & CRTSCTS) { mos7840_port->shadowMCR |= MCR_RTS; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mos7840_port->shadowMCR); if (status < 0) return; } } static int mos7840_tiocmget(struct tty_struct *tty, struct file *file) { struct usb_serial_port *port = tty->driver_data; struct moschip_port *mos7840_port; unsigned int result; __u16 msr; __u16 mcr; int status; mos7840_port = mos7840_get_port_private(port); dbg("%s - port %d", __func__, port->number); if (mos7840_port == NULL) return -ENODEV; status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr); status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr); result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0) | ((mcr & MCR_RTS) ? TIOCM_RTS : 0) | ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0) | ((msr & MOS7840_MSR_CTS) ? TIOCM_CTS : 0) | ((msr & MOS7840_MSR_CD) ? TIOCM_CAR : 0) | ((msr & MOS7840_MSR_RI) ? TIOCM_RI : 0) | ((msr & MOS7840_MSR_DSR) ? TIOCM_DSR : 0); dbg("%s - 0x%04X", __func__, result); return result; } static int mos7840_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct moschip_port *mos7840_port; unsigned int mcr; int status; dbg("%s - port %d", __func__, port->number); mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return -ENODEV; /* FIXME: What locks the port registers ? */ mcr = mos7840_port->shadowMCR; if (clear & TIOCM_RTS) mcr &= ~MCR_RTS; if (clear & TIOCM_DTR) mcr &= ~MCR_DTR; if (clear & TIOCM_LOOP) mcr &= ~MCR_LOOPBACK; if (set & TIOCM_RTS) mcr |= MCR_RTS; if (set & TIOCM_DTR) mcr |= MCR_DTR; if (set & TIOCM_LOOP) mcr |= MCR_LOOPBACK; mos7840_port->shadowMCR = mcr; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr); if (status < 0) { dbg("setting MODEM_CONTROL_REGISTER Failed"); return status; } return 0; } /***************************************************************************** * mos7840_calc_baud_rate_divisor * this function calculates the proper baud rate divisor for the specified * baud rate. *****************************************************************************/ static int mos7840_calc_baud_rate_divisor(int baudRate, int *divisor, __u16 *clk_sel_val) { dbg("%s - %d", __func__, baudRate); if (baudRate <= 115200) { *divisor = 115200 / baudRate; *clk_sel_val = 0x0; } if ((baudRate > 115200) && (baudRate <= 230400)) { *divisor = 230400 / baudRate; *clk_sel_val = 0x10; } else if ((baudRate > 230400) && (baudRate <= 403200)) { *divisor = 403200 / baudRate; *clk_sel_val = 0x20; } else if ((baudRate > 403200) && (baudRate <= 460800)) { *divisor = 460800 / baudRate; *clk_sel_val = 0x30; } else if ((baudRate > 460800) && (baudRate <= 806400)) { *divisor = 806400 / baudRate; *clk_sel_val = 0x40; } else if ((baudRate > 806400) && (baudRate <= 921600)) { *divisor = 921600 / baudRate; *clk_sel_val = 0x50; } else if ((baudRate > 921600) && (baudRate <= 1572864)) { *divisor = 1572864 / baudRate; *clk_sel_val = 0x60; } else if ((baudRate > 1572864) && (baudRate <= 3145728)) { *divisor = 3145728 / baudRate; *clk_sel_val = 0x70; } return 0; #ifdef NOTMCS7840 for (i = 0; i < ARRAY_SIZE(mos7840_divisor_table); i++) { if (mos7840_divisor_table[i].BaudRate == baudrate) { *divisor = mos7840_divisor_table[i].Divisor; return 0; } } /* After trying for all the standard baud rates * * Try calculating the divisor for this baud rate */ if (baudrate > 75 && baudrate < 230400) { /* get the divisor */ custom = (__u16) (230400L / baudrate); /* Check for round off */ round1 = (__u16) (2304000L / baudrate); round = (__u16) (round1 - (custom * 10)); if (round > 4) custom++; *divisor = custom; dbg(" Baud %d = %d", baudrate, custom); return 0; } dbg("%s", " Baud calculation Failed..."); return -1; #endif } /***************************************************************************** * mos7840_send_cmd_write_baud_rate * this function sends the proper command to change the baud rate of the * specified port. *****************************************************************************/ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port, int baudRate) { int divisor = 0; int status; __u16 Data; unsigned char number; __u16 clk_sel_val; struct usb_serial_port *port; if (mos7840_port == NULL) return -1; port = (struct usb_serial_port *)mos7840_port->port; if (mos7840_port_paranoia_check(port, __func__)) { dbg("%s", "Invalid port"); return -1; } if (mos7840_serial_paranoia_check(port->serial, __func__)) { dbg("%s", "Invalid Serial"); return -1; } dbg("%s", "Entering .........."); number = mos7840_port->port->number - mos7840_port->port->serial->minor; dbg("%s - port = %d, baud = %d", __func__, mos7840_port->port->number, baudRate); /* reset clk_uart_sel in spregOffset */ if (baudRate > 115200) { #ifdef HW_flow_control /* NOTE: need to see the pther register to modify */ /* setting h/w flow control bit to 1 */ Data = 0x2b; mos7840_port->shadowMCR = Data; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); if (status < 0) { dbg("Writing spreg failed in set_serial_baud"); return -1; } #endif } else { #ifdef HW_flow_control / *setting h/w flow control bit to 0 */ Data = 0xb; mos7840_port->shadowMCR = Data; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); if (status < 0) { dbg("Writing spreg failed in set_serial_baud"); return -1; } #endif } if (1) { /* baudRate <= 115200) */ clk_sel_val = 0x0; Data = 0x0; status = mos7840_calc_baud_rate_divisor(baudRate, &divisor, &clk_sel_val); status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); if (status < 0) { dbg("reading spreg failed in set_serial_baud"); return -1; } Data = (Data & 0x8f) | clk_sel_val; status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); if (status < 0) { dbg("Writing spreg failed in set_serial_baud"); return -1; } /* Calculate the Divisor */ if (status) { dev_err(&port->dev, "%s - bad baud rate\n", __func__); return status; } /* Enable access to divisor latch */ Data = mos7840_port->shadowLCR | SERIAL_LCR_DLAB; mos7840_port->shadowLCR = Data; mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); /* Write the divisor */ Data = (unsigned char)(divisor & 0xff); dbg("set_serial_baud Value to write DLL is %x", Data); mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data); Data = (unsigned char)((divisor & 0xff00) >> 8); dbg("set_serial_baud Value to write DLM is %x", Data); mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data); /* Disable access to divisor latch */ Data = mos7840_port->shadowLCR & ~SERIAL_LCR_DLAB; mos7840_port->shadowLCR = Data; mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); } return status; } /***************************************************************************** * mos7840_change_port_settings * This routine is called to set the UART on the device to match * the specified new settings. *****************************************************************************/ static void mos7840_change_port_settings(struct tty_struct *tty, struct moschip_port *mos7840_port, struct ktermios *old_termios) { int baud; unsigned cflag; unsigned iflag; __u8 lData; __u8 lParity; __u8 lStop; int status; __u16 Data; struct usb_serial_port *port; struct usb_serial *serial; if (mos7840_port == NULL) return; port = (struct usb_serial_port *)mos7840_port->port; if (mos7840_port_paranoia_check(port, __func__)) { dbg("%s", "Invalid port"); return; } if (mos7840_serial_paranoia_check(port->serial, __func__)) { dbg("%s", "Invalid Serial"); return; } serial = port->serial; dbg("%s - port %d", __func__, mos7840_port->port->number); if (!mos7840_port->open) { dbg("%s - port not opened", __func__); return; } dbg("%s", "Entering .........."); lData = LCR_BITS_8; lStop = LCR_STOP_1; lParity = LCR_PAR_NONE; cflag = tty->termios->c_cflag; iflag = tty->termios->c_iflag; /* Change the number of bits */ if (cflag & CSIZE) { switch (cflag & CSIZE) { case CS5: lData = LCR_BITS_5; break; case CS6: lData = LCR_BITS_6; break; case CS7: lData = LCR_BITS_7; break; default: case CS8: lData = LCR_BITS_8; break; } } /* Change the Parity bit */ if (cflag & PARENB) { if (cflag & PARODD) { lParity = LCR_PAR_ODD; dbg("%s - parity = odd", __func__); } else { lParity = LCR_PAR_EVEN; dbg("%s - parity = even", __func__); } } else { dbg("%s - parity = none", __func__); } if (cflag & CMSPAR) lParity = lParity | 0x20; /* Change the Stop bit */ if (cflag & CSTOPB) { lStop = LCR_STOP_2; dbg("%s - stop bits = 2", __func__); } else { lStop = LCR_STOP_1; dbg("%s - stop bits = 1", __func__); } /* Update the LCR with the correct value */ mos7840_port->shadowLCR &= ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK); mos7840_port->shadowLCR |= (lData | lParity | lStop); dbg("mos7840_change_port_settings mos7840_port->shadowLCR is %x", mos7840_port->shadowLCR); /* Disable Interrupts */ Data = 0x00; mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); Data = 0x00; mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); Data = 0xcf; mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); /* Send the updated LCR value to the mos7840 */ Data = mos7840_port->shadowLCR; mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); Data = 0x00b; mos7840_port->shadowMCR = Data; mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); Data = 0x00b; mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); /* set up the MCR register and send it to the mos7840 */ mos7840_port->shadowMCR = MCR_MASTER_IE; if (cflag & CBAUD) mos7840_port->shadowMCR |= (MCR_DTR | MCR_RTS); if (cflag & CRTSCTS) mos7840_port->shadowMCR |= (MCR_XON_ANY); else mos7840_port->shadowMCR &= ~(MCR_XON_ANY); Data = mos7840_port->shadowMCR; mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); /* Determine divisor based on baud rate */ baud = tty_get_baud_rate(tty); if (!baud) { /* pick a default, any default... */ dbg("%s", "Picked default baud..."); baud = 9600; } dbg("%s - baud rate = %d", __func__, baud); status = mos7840_send_cmd_write_baud_rate(mos7840_port, baud); /* Enable Interrupts */ Data = 0x0c; mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); if (mos7840_port->read_urb_busy == false) { mos7840_port->read_urb->dev = serial->dev; mos7840_port->read_urb_busy = true; status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC); if (status) { dbg("usb_submit_urb(read bulk) failed, status = %d", status); mos7840_port->read_urb_busy = false; } } wake_up(&mos7840_port->delta_msr_wait); mos7840_port->delta_msr_cond = 1; dbg("mos7840_change_port_settings mos7840_port->shadowLCR is End %x", mos7840_port->shadowLCR); return; } /***************************************************************************** * mos7840_set_termios * this function is called by the tty driver when it wants to change * the termios structure *****************************************************************************/ static void mos7840_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { int status; unsigned int cflag; struct usb_serial *serial; struct moschip_port *mos7840_port; dbg("mos7840_set_termios: START"); if (mos7840_port_paranoia_check(port, __func__)) { dbg("%s", "Invalid port"); return; } serial = port->serial; if (mos7840_serial_paranoia_check(serial, __func__)) { dbg("%s", "Invalid Serial"); return; } mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return; if (!mos7840_port->open) { dbg("%s - port not opened", __func__); return; } dbg("%s", "setting termios - "); cflag = tty->termios->c_cflag; dbg("%s - clfag %08x iflag %08x", __func__, tty->termios->c_cflag, RELEVANT_IFLAG(tty->termios->c_iflag)); dbg("%s - old clfag %08x old iflag %08x", __func__, old_termios->c_cflag, RELEVANT_IFLAG(old_termios->c_iflag)); dbg("%s - port %d", __func__, port->number); /* change the port settings to the new ones specified */ mos7840_change_port_settings(tty, mos7840_port, old_termios); if (!mos7840_port->read_urb) { dbg("%s", "URB KILLED !!!!!"); return; } if (mos7840_port->read_urb_busy == false) { mos7840_port->read_urb->dev = serial->dev; mos7840_port->read_urb_busy = true; status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC); if (status) { dbg("usb_submit_urb(read bulk) failed, status = %d", status); mos7840_port->read_urb_busy = false; } } return; } /***************************************************************************** * mos7840_get_lsr_info - get line status register info * * Purpose: Let user call ioctl() to get info when the UART physically * is emptied. On bus types like RS485, the transmitter must * release the bus after transmitting. This must be done when * the transmit shift register is empty, not be done when the * transmit holding register is empty. This functionality * allows an RS485 driver to be written in user space. *****************************************************************************/ static int mos7840_get_lsr_info(struct tty_struct *tty, unsigned int __user *value) { int count; unsigned int result = 0; count = mos7840_chars_in_buffer(tty); if (count == 0) { dbg("%s -- Empty", __func__); result = TIOCSER_TEMT; } if (copy_to_user(value, &result, sizeof(int))) return -EFAULT; return 0; } /***************************************************************************** * mos7840_get_serial_info * function to get information about serial port *****************************************************************************/ static int mos7840_get_serial_info(struct moschip_port *mos7840_port, struct serial_struct __user *retinfo) { struct serial_struct tmp; if (mos7840_port == NULL) return -1; if (!retinfo) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.type = PORT_16550A; tmp.line = mos7840_port->port->serial->minor; tmp.port = mos7840_port->port->number; tmp.irq = 0; tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ; tmp.xmit_fifo_size = NUM_URBS * URB_TRANSFER_BUFFER_SIZE; tmp.baud_base = 9600; tmp.close_delay = 5 * HZ; tmp.closing_wait = 30 * HZ; if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; return 0; } /***************************************************************************** * SerialIoctl * this function handles any ioctl calls to the driver *****************************************************************************/ static int mos7840_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; void __user *argp = (void __user *)arg; struct moschip_port *mos7840_port; struct async_icount cnow; struct async_icount cprev; struct serial_icounter_struct icount; if (mos7840_port_paranoia_check(port, __func__)) { dbg("%s", "Invalid port"); return -1; } mos7840_port = mos7840_get_port_private(port); if (mos7840_port == NULL) return -1; dbg("%s - port %d, cmd = 0x%x", __func__, port->number, cmd); switch (cmd) { /* return number of bytes available */ case TIOCSERGETLSR: dbg("%s (%d) TIOCSERGETLSR", __func__, port->number); return mos7840_get_lsr_info(tty, argp); return 0; case TIOCGSERIAL: dbg("%s (%d) TIOCGSERIAL", __func__, port->number); return mos7840_get_serial_info(mos7840_port, argp); case TIOCSSERIAL: dbg("%s (%d) TIOCSSERIAL", __func__, port->number); break; case TIOCMIWAIT: dbg("%s (%d) TIOCMIWAIT", __func__, port->number); cprev = mos7840_port->icount; while (1) { /* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */ mos7840_port->delta_msr_cond = 0; wait_event_interruptible(mos7840_port->delta_msr_wait, (mos7840_port-> delta_msr_cond == 1)); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; cnow = mos7840_port->icount; smp_rmb(); if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) return -EIO; /* no change => error */ if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { return 0; } cprev = cnow; } /* NOTREACHED */ break; case TIOCGICOUNT: cnow = mos7840_port->icount; smp_rmb(); memset(&icount, 0, sizeof(struct serial_icounter_struct)); icount.cts = cnow.cts; icount.dsr = cnow.dsr; icount.rng = cnow.rng; icount.dcd = cnow.dcd; icount.rx = cnow.rx; icount.tx = cnow.tx; icount.frame = cnow.frame; icount.overrun = cnow.overrun; icount.parity = cnow.parity; icount.brk = cnow.brk; icount.buf_overrun = cnow.buf_overrun; dbg("%s (%d) TIOCGICOUNT RX=%d, TX=%d", __func__, port->number, icount.rx, icount.tx); if (copy_to_user(argp, &icount, sizeof(icount))) return -EFAULT; return 0; default: break; } return -ENOIOCTLCMD; } static int mos7840_calc_num_ports(struct usb_serial *serial) { int mos7840_num_ports = 0; dbg("numberofendpoints: cur %d, alt %d", (int)serial->interface->cur_altsetting->desc.bNumEndpoints, (int)serial->interface->altsetting->desc.bNumEndpoints); if (serial->interface->cur_altsetting->desc.bNumEndpoints == 5) { mos7840_num_ports = serial->num_ports = 2; } else if (serial->interface->cur_altsetting->desc.bNumEndpoints == 9) { serial->num_bulk_in = 4; serial->num_bulk_out = 4; mos7840_num_ports = serial->num_ports = 4; } dbg ("mos7840_num_ports = %d", mos7840_num_ports); return mos7840_num_ports; } /**************************************************************************** * mos7840_startup ****************************************************************************/ static int mos7840_startup(struct usb_serial *serial) { struct moschip_port *mos7840_port; struct usb_device *dev; int i, status; __u16 Data; dbg("%s", "mos7840_startup :Entering.........."); if (!serial) { dbg("%s", "Invalid Handler"); return -1; } dev = serial->dev; dbg("%s", "Entering..."); dbg ("mos7840_startup: serial = %p", serial); /* we set up the pointers to the endpoints in the mos7840_open * * function, as the structures aren't created yet. */ /* set up port private structures */ for (i = 0; i < serial->num_ports; ++i) { dbg ("mos7840_startup: configuring port %d............", i); mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL); if (mos7840_port == NULL) { dev_err(&dev->dev, "%s - Out of memory\n", __func__); status = -ENOMEM; i--; /* don't follow NULL pointer cleaning up */ goto error; } /* Initialize all port interrupt end point to port 0 int * endpoint. Our device has only one interrupt end point * common to all port */ mos7840_port->port = serial->port[i]; mos7840_set_port_private(serial->port[i], mos7840_port); spin_lock_init(&mos7840_port->pool_lock); /* minor is not initialised until later by * usb-serial.c:get_free_serial() and cannot therefore be used * to index device instances */ mos7840_port->port_num = i + 1; dbg ("serial->port[i]->number = %d", serial->port[i]->number); dbg ("serial->port[i]->serial->minor = %d", serial->port[i]->serial->minor); dbg ("mos7840_port->port_num = %d", mos7840_port->port_num); dbg ("serial->minor = %d", serial->minor); if (mos7840_port->port_num == 1) { mos7840_port->SpRegOffset = 0x0; mos7840_port->ControlRegOffset = 0x1; mos7840_port->DcrRegOffset = 0x4; } else if ((mos7840_port->port_num == 2) && (serial->num_ports == 4)) { mos7840_port->SpRegOffset = 0x8; mos7840_port->ControlRegOffset = 0x9; mos7840_port->DcrRegOffset = 0x16; } else if ((mos7840_port->port_num == 2) && (serial->num_ports == 2)) { mos7840_port->SpRegOffset = 0xa; mos7840_port->ControlRegOffset = 0xb; mos7840_port->DcrRegOffset = 0x19; } else if ((mos7840_port->port_num == 3) && (serial->num_ports == 4)) { mos7840_port->SpRegOffset = 0xa; mos7840_port->ControlRegOffset = 0xb; mos7840_port->DcrRegOffset = 0x19; } else if ((mos7840_port->port_num == 4) && (serial->num_ports == 4)) { mos7840_port->SpRegOffset = 0xc; mos7840_port->ControlRegOffset = 0xd; mos7840_port->DcrRegOffset = 0x1c; } mos7840_dump_serial_port(mos7840_port); mos7840_set_port_private(serial->port[i], mos7840_port); /* enable rx_disable bit in control register */ status = mos7840_get_reg_sync(serial->port[i], mos7840_port->ControlRegOffset, &Data); if (status < 0) { dbg("Reading ControlReg failed status-0x%x", status); break; } else dbg("ControlReg Reading success val is %x, status%d", Data, status); Data |= 0x08; /* setting driver done bit */ Data |= 0x04; /* sp1_bit to have cts change reflect in modem status reg */ /* Data |= 0x20; //rx_disable bit */ status = mos7840_set_reg_sync(serial->port[i], mos7840_port->ControlRegOffset, Data); if (status < 0) { dbg("Writing ControlReg failed(rx_disable) status-0x%x", status); break; } else dbg("ControlReg Writing success(rx_disable) status%d", status); /* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2 and 0x24 in DCR3 */ Data = 0x01; status = mos7840_set_reg_sync(serial->port[i], (__u16) (mos7840_port->DcrRegOffset + 0), Data); if (status < 0) { dbg("Writing DCR0 failed status-0x%x", status); break; } else dbg("DCR0 Writing success status%d", status); Data = 0x05; status = mos7840_set_reg_sync(serial->port[i], (__u16) (mos7840_port->DcrRegOffset + 1), Data); if (status < 0) { dbg("Writing DCR1 failed status-0x%x", status); break; } else dbg("DCR1 Writing success status%d", status); Data = 0x24; status = mos7840_set_reg_sync(serial->port[i], (__u16) (mos7840_port->DcrRegOffset + 2), Data); if (status < 0) { dbg("Writing DCR2 failed status-0x%x", status); break; } else dbg("DCR2 Writing success status%d", status); /* write values in clkstart0x0 and clkmulti 0x20 */ Data = 0x0; status = mos7840_set_reg_sync(serial->port[i], CLK_START_VALUE_REGISTER, Data); if (status < 0) { dbg("Writing CLK_START_VALUE_REGISTER failed status-0x%x", status); break; } else dbg("CLK_START_VALUE_REGISTER Writing success status%d", status); Data = 0x20; status = mos7840_set_reg_sync(serial->port[i], CLK_MULTI_REGISTER, Data); if (status < 0) { dbg("Writing CLK_MULTI_REGISTER failed status-0x%x", status); goto error; } else dbg("CLK_MULTI_REGISTER Writing success status%d", status); /* write value 0x0 to scratchpad register */ Data = 0x00; status = mos7840_set_uart_reg(serial->port[i], SCRATCH_PAD_REGISTER, Data); if (status < 0) { dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x", status); break; } else dbg("SCRATCH_PAD_REGISTER Writing success status%d", status); /* Zero Length flag register */ if ((mos7840_port->port_num != 1) && (serial->num_ports == 2)) { Data = 0xff; status = mos7840_set_reg_sync(serial->port[i], (__u16) (ZLP_REG1 + ((__u16)mos7840_port->port_num)), Data); dbg("ZLIP offset %x", (__u16) (ZLP_REG1 + ((__u16) mos7840_port->port_num))); if (status < 0) { dbg("Writing ZLP_REG%d failed status-0x%x", i + 2, status); break; } else dbg("ZLP_REG%d Writing success status%d", i + 2, status); } else { Data = 0xff; status = mos7840_set_reg_sync(serial->port[i], (__u16) (ZLP_REG1 + ((__u16)mos7840_port->port_num) - 0x1), Data); dbg("ZLIP offset %x", (__u16) (ZLP_REG1 + ((__u16) mos7840_port->port_num) - 0x1)); if (status < 0) { dbg("Writing ZLP_REG%d failed status-0x%x", i + 1, status); break; } else dbg("ZLP_REG%d Writing success status%d", i + 1, status); } mos7840_port->control_urb = usb_alloc_urb(0, GFP_KERNEL); mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL); mos7840_port->dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!mos7840_port->control_urb || !mos7840_port->ctrl_buf || !mos7840_port->dr) { status = -ENOMEM; goto error; } } dbg ("mos7840_startup: all ports configured..........."); /* Zero Length flag enable */ Data = 0x0f; status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data); if (status < 0) { dbg("Writing ZLP_REG5 failed status-0x%x", status); goto error; } else dbg("ZLP_REG5 Writing success status%d", status); /* setting configuration feature to one */ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ); return 0; error: for (/* nothing */; i >= 0; i--) { mos7840_port = mos7840_get_port_private(serial->port[i]); kfree(mos7840_port->dr); kfree(mos7840_port->ctrl_buf); usb_free_urb(mos7840_port->control_urb); kfree(mos7840_port); serial->port[i] = NULL; } return status; } /**************************************************************************** * mos7840_disconnect * This function is called whenever the device is removed from the usb bus. ****************************************************************************/ static void mos7840_disconnect(struct usb_serial *serial) { int i; unsigned long flags; struct moschip_port *mos7840_port; dbg("%s", " disconnect :entering.........."); if (!serial) { dbg("%s", "Invalid Handler"); return; } /* check for the ports to be closed,close the ports and disconnect */ /* free private structure allocated for serial port * * stop reads and writes on all ports */ for (i = 0; i < serial->num_ports; ++i) { mos7840_port = mos7840_get_port_private(serial->port[i]); dbg ("mos7840_port %d = %p", i, mos7840_port); if (mos7840_port) { spin_lock_irqsave(&mos7840_port->pool_lock, flags); mos7840_port->zombie = 1; spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); usb_kill_urb(mos7840_port->control_urb); } } dbg("%s", "Thank u :: "); } /**************************************************************************** * mos7840_release * This function is called when the usb_serial structure is freed. ****************************************************************************/ static void mos7840_release(struct usb_serial *serial) { int i; struct moschip_port *mos7840_port; dbg("%s", " release :entering.........."); if (!serial) { dbg("%s", "Invalid Handler"); return; } /* check for the ports to be closed,close the ports and disconnect */ /* free private structure allocated for serial port * * stop reads and writes on all ports */ for (i = 0; i < serial->num_ports; ++i) { mos7840_port = mos7840_get_port_private(serial->port[i]); dbg("mos7840_port %d = %p", i, mos7840_port); if (mos7840_port) { kfree(mos7840_port->ctrl_buf); kfree(mos7840_port->dr); kfree(mos7840_port); } } dbg("%s", "Thank u :: "); } static struct usb_driver io_driver = { .name = "mos7840", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = moschip_id_table_combined, .no_dynamic_id = 1, }; static struct usb_serial_driver moschip7840_4port_device = { .driver = { .owner = THIS_MODULE, .name = "mos7840", }, .description = DRIVER_DESC, .usb_driver = &io_driver, .id_table = moschip_port_id_table, .num_ports = 4, .open = mos7840_open, .close = mos7840_close, .write = mos7840_write, .write_room = mos7840_write_room, .chars_in_buffer = mos7840_chars_in_buffer, .throttle = mos7840_throttle, .unthrottle = mos7840_unthrottle, .calc_num_ports = mos7840_calc_num_ports, #ifdef MCSSerialProbe .probe = mos7840_serial_probe, #endif .ioctl = mos7840_ioctl, .set_termios = mos7840_set_termios, .break_ctl = mos7840_break, .tiocmget = mos7840_tiocmget, .tiocmset = mos7840_tiocmset, .attach = mos7840_startup, .disconnect = mos7840_disconnect, .release = mos7840_release, .read_bulk_callback = mos7840_bulk_in_callback, .read_int_callback = mos7840_interrupt_callback, }; /**************************************************************************** * moschip7840_init * This is called by the module subsystem, or on startup to initialize us ****************************************************************************/ static int __init moschip7840_init(void) { int retval; dbg("%s", " mos7840_init :entering.........."); /* Register with the usb serial */ retval = usb_serial_register(&moschip7840_4port_device); if (retval) goto failed_port_device_register; dbg("%s", "Entering..."); printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); /* Register with the usb */ retval = usb_register(&io_driver); if (retval == 0) { dbg("%s", "Leaving..."); return 0; } usb_serial_deregister(&moschip7840_4port_device); failed_port_device_register: return retval; } /**************************************************************************** * moschip7840_exit * Called when the driver is about to be unloaded. ****************************************************************************/ static void __exit moschip7840_exit(void) { dbg("%s", " mos7840_exit :entering.........."); usb_deregister(&io_driver); usb_serial_deregister(&moschip7840_4port_device); dbg("%s", "Entering..."); } module_init(moschip7840_init); module_exit(moschip7840_exit); /* Module information */ MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not");
gpl-2.0
kula85/perf-sqlite3
arch/x86/crypto/serpent_sse2_glue.c
806
16924
/* * Glue Code for SSE2 assembler versions of Serpent Cipher * * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * Glue code based on aesni-intel_glue.c by: * Copyright (C) 2008, Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * CTR part based on code (crypto/ctr.c) by: * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <linux/module.h> #include <linux/hardirq.h> #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> #include <crypto/ablk_helper.h> #include <crypto/algapi.h> #include <crypto/serpent.h> #include <crypto/cryptd.h> #include <crypto/b128ops.h> #include <crypto/ctr.h> #include <crypto/lrw.h> #include <crypto/xts.h> #include <asm/crypto/serpent-sse2.h> #include <asm/crypto/glue_helper.h> static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) { u128 ivs[SERPENT_PARALLEL_BLOCKS - 1]; unsigned int j; for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) ivs[j] = src[j]; serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src); for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) u128_xor(dst + (j + 1), dst + (j + 1), ivs + j); } static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) { be128 ctrblk; le128_to_be128(&ctrblk, iv); le128_inc(iv); __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); u128_xor(dst, src, (u128 *)&ctrblk); } static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src, le128 *iv) { be128 ctrblks[SERPENT_PARALLEL_BLOCKS]; unsigned int i; for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) { if (dst != src) dst[i] = src[i]; le128_to_be128(&ctrblks[i], iv); le128_inc(iv); } serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks); } static const struct common_glue_ctx serpent_enc = { .num_funcs = 2, .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) } }, { .num_blocks = 1, .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } } } }; static const struct common_glue_ctx serpent_ctr = { .num_funcs = 2, .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) } }, { .num_blocks = 1, .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) } } } }; static const struct common_glue_ctx serpent_dec = { .num_funcs = 2, .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) } }, { .num_blocks = 1, .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } } } }; static const struct common_glue_ctx serpent_dec_cbc = { .num_funcs = 2, .fpu_blocks_limit = SERPENT_PARALLEL_BLOCKS, .funcs = { { .num_blocks = SERPENT_PARALLEL_BLOCKS, .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) } }, { .num_blocks = 1, .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } } } }; static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes); } static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes); } static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc, dst, src, nbytes); } static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src, nbytes); } static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes); } static inline bool serpent_fpu_begin(bool fpu_enabled, unsigned int nbytes) { return glue_fpu_begin(SERPENT_BLOCK_SIZE, SERPENT_PARALLEL_BLOCKS, NULL, fpu_enabled, nbytes); } static inline void serpent_fpu_end(bool fpu_enabled) { glue_fpu_end(fpu_enabled); } struct crypt_priv { struct serpent_ctx *ctx; bool fpu_enabled; }; static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) { const unsigned int bsize = SERPENT_BLOCK_SIZE; struct crypt_priv *ctx = priv; int i; ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes); if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) { serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst); return; } for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) __serpent_encrypt(ctx->ctx, srcdst, srcdst); } static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes) { const unsigned int bsize = SERPENT_BLOCK_SIZE; struct crypt_priv *ctx = priv; int i; ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes); if (nbytes == bsize * SERPENT_PARALLEL_BLOCKS) { serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst); return; } for (i = 0; i < nbytes / bsize; i++, srcdst += bsize) __serpent_decrypt(ctx->ctx, srcdst, srcdst); } struct serpent_lrw_ctx { struct lrw_table_ctx lrw_table; struct serpent_ctx serpent_ctx; }; static int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm); int err; err = __serpent_setkey(&ctx->serpent_ctx, key, keylen - SERPENT_BLOCK_SIZE); if (err) return err; return lrw_init_table(&ctx->lrw_table, key + keylen - SERPENT_BLOCK_SIZE); } static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[SERPENT_PARALLEL_BLOCKS]; struct crypt_priv crypt_ctx = { .ctx = &ctx->serpent_ctx, .fpu_enabled = false, }; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = &crypt_ctx, .crypt_fn = encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ret = lrw_crypt(desc, dst, src, nbytes, &req); serpent_fpu_end(crypt_ctx.fpu_enabled); return ret; } static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[SERPENT_PARALLEL_BLOCKS]; struct crypt_priv crypt_ctx = { .ctx = &ctx->serpent_ctx, .fpu_enabled = false, }; struct lrw_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .table_ctx = &ctx->lrw_table, .crypt_ctx = &crypt_ctx, .crypt_fn = decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ret = lrw_crypt(desc, dst, src, nbytes, &req); serpent_fpu_end(crypt_ctx.fpu_enabled); return ret; } static void lrw_exit_tfm(struct crypto_tfm *tfm) { struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_free_table(&ctx->lrw_table); } struct serpent_xts_ctx { struct serpent_ctx tweak_ctx; struct serpent_ctx crypt_ctx; }; static int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; int err; /* key consists of keys of equal size concatenated, therefore * the length must be even */ if (keylen % 2) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } /* first half of xts-key is for crypt */ err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2); if (err) return err; /* second half of xts-key is for tweak */ return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); } static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[SERPENT_PARALLEL_BLOCKS]; struct crypt_priv crypt_ctx = { .ctx = &ctx->crypt_ctx, .fpu_enabled = false, }; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = &ctx->tweak_ctx, .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt), .crypt_ctx = &crypt_ctx, .crypt_fn = encrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ret = xts_crypt(desc, dst, src, nbytes, &req); serpent_fpu_end(crypt_ctx.fpu_enabled); return ret; } static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); be128 buf[SERPENT_PARALLEL_BLOCKS]; struct crypt_priv crypt_ctx = { .ctx = &ctx->crypt_ctx, .fpu_enabled = false, }; struct xts_crypt_req req = { .tbuf = buf, .tbuflen = sizeof(buf), .tweak_ctx = &ctx->tweak_ctx, .tweak_fn = XTS_TWEAK_CAST(__serpent_encrypt), .crypt_ctx = &crypt_ctx, .crypt_fn = decrypt_callback, }; int ret; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ret = xts_crypt(desc, dst, src, nbytes, &req); serpent_fpu_end(crypt_ctx.fpu_enabled); return ret; } static struct crypto_alg serpent_algs[10] = { { .cra_name = "__ecb-serpent-sse2", .cra_driver_name = "__driver-ecb-serpent-sse2", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_INTERNAL, .cra_blocksize = SERPENT_BLOCK_SIZE, .cra_ctxsize = sizeof(struct serpent_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = SERPENT_MIN_KEY_SIZE, .max_keysize = SERPENT_MAX_KEY_SIZE, .setkey = serpent_setkey, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, }, }, { .cra_name = "__cbc-serpent-sse2", .cra_driver_name = "__driver-cbc-serpent-sse2", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_INTERNAL, .cra_blocksize = SERPENT_BLOCK_SIZE, .cra_ctxsize = sizeof(struct serpent_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = SERPENT_MIN_KEY_SIZE, .max_keysize = SERPENT_MAX_KEY_SIZE, .setkey = serpent_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, }, }, { .cra_name = "__ctr-serpent-sse2", .cra_driver_name = "__driver-ctr-serpent-sse2", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_INTERNAL, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct serpent_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = SERPENT_MIN_KEY_SIZE, .max_keysize = SERPENT_MAX_KEY_SIZE, .ivsize = SERPENT_BLOCK_SIZE, .setkey = serpent_setkey, .encrypt = ctr_crypt, .decrypt = ctr_crypt, }, }, }, { .cra_name = "__lrw-serpent-sse2", .cra_driver_name = "__driver-lrw-serpent-sse2", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_INTERNAL, .cra_blocksize = SERPENT_BLOCK_SIZE, .cra_ctxsize = sizeof(struct serpent_lrw_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_exit = lrw_exit_tfm, .cra_u = { .blkcipher = { .min_keysize = SERPENT_MIN_KEY_SIZE + SERPENT_BLOCK_SIZE, .max_keysize = SERPENT_MAX_KEY_SIZE + SERPENT_BLOCK_SIZE, .ivsize = SERPENT_BLOCK_SIZE, .setkey = lrw_serpent_setkey, .encrypt = lrw_encrypt, .decrypt = lrw_decrypt, }, }, }, { .cra_name = "__xts-serpent-sse2", .cra_driver_name = "__driver-xts-serpent-sse2", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_INTERNAL, .cra_blocksize = SERPENT_BLOCK_SIZE, .cra_ctxsize = sizeof(struct serpent_xts_ctx), .cra_alignmask = 0, .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .min_keysize = SERPENT_MIN_KEY_SIZE * 2, .max_keysize = SERPENT_MAX_KEY_SIZE * 2, .ivsize = SERPENT_BLOCK_SIZE, .setkey = xts_serpent_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, }, }, }, { .cra_name = "ecb(serpent)", .cra_driver_name = "ecb-serpent-sse2", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = SERPENT_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = SERPENT_MIN_KEY_SIZE, .max_keysize = SERPENT_MAX_KEY_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, }, }, }, { .cra_name = "cbc(serpent)", .cra_driver_name = "cbc-serpent-sse2", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = SERPENT_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = SERPENT_MIN_KEY_SIZE, .max_keysize = SERPENT_MAX_KEY_SIZE, .ivsize = SERPENT_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = __ablk_encrypt, .decrypt = ablk_decrypt, }, }, }, { .cra_name = "ctr(serpent)", .cra_driver_name = "ctr-serpent-sse2", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = SERPENT_MIN_KEY_SIZE, .max_keysize = SERPENT_MAX_KEY_SIZE, .ivsize = SERPENT_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_encrypt, .geniv = "chainiv", }, }, }, { .cra_name = "lrw(serpent)", .cra_driver_name = "lrw-serpent-sse2", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = SERPENT_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = SERPENT_MIN_KEY_SIZE + SERPENT_BLOCK_SIZE, .max_keysize = SERPENT_MAX_KEY_SIZE + SERPENT_BLOCK_SIZE, .ivsize = SERPENT_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, }, }, }, { .cra_name = "xts(serpent)", .cra_driver_name = "xts-serpent-sse2", .cra_priority = 400, .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = SERPENT_BLOCK_SIZE, .cra_ctxsize = sizeof(struct async_helper_ctx), .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = ablk_init, .cra_exit = ablk_exit, .cra_u = { .ablkcipher = { .min_keysize = SERPENT_MIN_KEY_SIZE * 2, .max_keysize = SERPENT_MAX_KEY_SIZE * 2, .ivsize = SERPENT_BLOCK_SIZE, .setkey = ablk_set_key, .encrypt = ablk_encrypt, .decrypt = ablk_decrypt, }, }, } }; static int __init serpent_sse2_init(void) { if (!cpu_has_xmm2) { printk(KERN_INFO "SSE2 instructions are not detected.\n"); return -ENODEV; } return crypto_register_algs(serpent_algs, ARRAY_SIZE(serpent_algs)); } static void __exit serpent_sse2_exit(void) { crypto_unregister_algs(serpent_algs, ARRAY_SIZE(serpent_algs)); } module_init(serpent_sse2_init); module_exit(serpent_sse2_exit); MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("serpent");
gpl-2.0
xc-racer99/blastoff_kernel_samsung_galaxys4g
net/netfilter/ipvs/ip_vs_app.c
806
13502
/* * ip_vs_app.c: Application module support for IPVS * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference * is that ip_vs_app module handles the reverse direction (incoming requests * and outgoing responses). * * IP_MASQ_APP application masquerading module * * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar> * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/netfilter.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/protocol.h> #include <net/tcp.h> #include <asm/system.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <net/ip_vs.h> EXPORT_SYMBOL(register_ip_vs_app); EXPORT_SYMBOL(unregister_ip_vs_app); EXPORT_SYMBOL(register_ip_vs_app_inc); /* ipvs application list head */ static LIST_HEAD(ip_vs_app_list); static DEFINE_MUTEX(__ip_vs_app_mutex); /* * Get an ip_vs_app object */ static inline int ip_vs_app_get(struct ip_vs_app *app) { return try_module_get(app->module); } static inline void ip_vs_app_put(struct ip_vs_app *app) { module_put(app->module); } /* * Allocate/initialize app incarnation and register it in proto apps. */ static int ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port) { struct ip_vs_protocol *pp; struct ip_vs_app *inc; int ret; if (!(pp = ip_vs_proto_get(proto))) return -EPROTONOSUPPORT; if (!pp->unregister_app) return -EOPNOTSUPP; inc = kmemdup(app, sizeof(*inc), GFP_KERNEL); if (!inc) return -ENOMEM; INIT_LIST_HEAD(&inc->p_list); INIT_LIST_HEAD(&inc->incs_list); inc->app = app; inc->port = htons(port); atomic_set(&inc->usecnt, 0); if (app->timeouts) { inc->timeout_table = ip_vs_create_timeout_table(app->timeouts, app->timeouts_size); if (!inc->timeout_table) { ret = -ENOMEM; goto out; } } ret = pp->register_app(inc); if (ret) goto out; list_add(&inc->a_list, &app->incs_list); IP_VS_DBG(9, "%s application %s:%u registered\n", pp->name, inc->name, inc->port); return 0; out: kfree(inc->timeout_table); kfree(inc); return ret; } /* * Release app incarnation */ static void ip_vs_app_inc_release(struct ip_vs_app *inc) { struct ip_vs_protocol *pp; if (!(pp = ip_vs_proto_get(inc->protocol))) return; if (pp->unregister_app) pp->unregister_app(inc); IP_VS_DBG(9, "%s App %s:%u unregistered\n", pp->name, inc->name, inc->port); list_del(&inc->a_list); kfree(inc->timeout_table); kfree(inc); } /* * Get reference to app inc (only called from softirq) * */ int ip_vs_app_inc_get(struct ip_vs_app *inc) { int result; atomic_inc(&inc->usecnt); if (unlikely((result = ip_vs_app_get(inc->app)) != 1)) atomic_dec(&inc->usecnt); return result; } /* * Put the app inc (only called from timer or net softirq) */ void ip_vs_app_inc_put(struct ip_vs_app *inc) { ip_vs_app_put(inc->app); atomic_dec(&inc->usecnt); } /* * Register an application incarnation in protocol applications */ int register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port) { int result; mutex_lock(&__ip_vs_app_mutex); result = ip_vs_app_inc_new(app, proto, port); mutex_unlock(&__ip_vs_app_mutex); return result; } /* * ip_vs_app registration routine */ int register_ip_vs_app(struct ip_vs_app *app) { /* increase the module use count */ ip_vs_use_count_inc(); mutex_lock(&__ip_vs_app_mutex); list_add(&app->a_list, &ip_vs_app_list); mutex_unlock(&__ip_vs_app_mutex); return 0; } /* * ip_vs_app unregistration routine * We are sure there are no app incarnations attached to services */ void unregister_ip_vs_app(struct ip_vs_app *app) { struct ip_vs_app *inc, *nxt; mutex_lock(&__ip_vs_app_mutex); list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) { ip_vs_app_inc_release(inc); } list_del(&app->a_list); mutex_unlock(&__ip_vs_app_mutex); /* decrease the module use count */ ip_vs_use_count_dec(); } /* * Bind ip_vs_conn to its ip_vs_app (called by cp constructor) */ int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp) { return pp->app_conn_bind(cp); } /* * Unbind cp from application incarnation (called by cp destructor) */ void ip_vs_unbind_app(struct ip_vs_conn *cp) { struct ip_vs_app *inc = cp->app; if (!inc) return; if (inc->unbind_conn) inc->unbind_conn(inc, cp); if (inc->done_conn) inc->done_conn(inc, cp); ip_vs_app_inc_put(inc); cp->app = NULL; } /* * Fixes th->seq based on ip_vs_seq info. */ static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th) { __u32 seq = ntohl(th->seq); /* * Adjust seq with delta-offset for all packets after * the most recent resized pkt seq and with previous_delta offset * for all packets before most recent resized pkt seq. */ if (vseq->delta || vseq->previous_delta) { if(after(seq, vseq->init_seq)) { th->seq = htonl(seq + vseq->delta); IP_VS_DBG(9, "%s(): added delta (%d) to seq\n", __func__, vseq->delta); } else { th->seq = htonl(seq + vseq->previous_delta); IP_VS_DBG(9, "%s(): added previous_delta (%d) to seq\n", __func__, vseq->previous_delta); } } } /* * Fixes th->ack_seq based on ip_vs_seq info. */ static inline void vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th) { __u32 ack_seq = ntohl(th->ack_seq); /* * Adjust ack_seq with delta-offset for * the packets AFTER most recent resized pkt has caused a shift * for packets before most recent resized pkt, use previous_delta */ if (vseq->delta || vseq->previous_delta) { /* since ack_seq is the number of octet that is expected to receive next, so compare it with init_seq+delta */ if(after(ack_seq, vseq->init_seq+vseq->delta)) { th->ack_seq = htonl(ack_seq - vseq->delta); IP_VS_DBG(9, "%s(): subtracted delta " "(%d) from ack_seq\n", __func__, vseq->delta); } else { th->ack_seq = htonl(ack_seq - vseq->previous_delta); IP_VS_DBG(9, "%s(): subtracted " "previous_delta (%d) from ack_seq\n", __func__, vseq->previous_delta); } } } /* * Updates ip_vs_seq if pkt has been resized * Assumes already checked proto==IPPROTO_TCP and diff!=0. */ static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq, unsigned flag, __u32 seq, int diff) { /* spinlock is to keep updating cp->flags atomic */ spin_lock(&cp->lock); if (!(cp->flags & flag) || after(seq, vseq->init_seq)) { vseq->previous_delta = vseq->delta; vseq->delta += diff; vseq->init_seq = seq; cp->flags |= flag; } spin_unlock(&cp->lock); } static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb, struct ip_vs_app *app) { int diff; const unsigned int tcp_offset = ip_hdrlen(skb); struct tcphdr *th; __u32 seq; if (!skb_make_writable(skb, tcp_offset + sizeof(*th))) return 0; th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset); /* * Remember seq number in case this pkt gets resized */ seq = ntohl(th->seq); /* * Fix seq stuff if flagged as so. */ if (cp->flags & IP_VS_CONN_F_OUT_SEQ) vs_fix_seq(&cp->out_seq, th); if (cp->flags & IP_VS_CONN_F_IN_SEQ) vs_fix_ack_seq(&cp->in_seq, th); /* * Call private output hook function */ if (app->pkt_out == NULL) return 1; if (!app->pkt_out(app, cp, skb, &diff)) return 0; /* * Update ip_vs seq stuff if len has changed. */ if (diff != 0) vs_seq_update(cp, &cp->out_seq, IP_VS_CONN_F_OUT_SEQ, seq, diff); return 1; } /* * Output pkt hook. Will call bound ip_vs_app specific function * called by ipvs packet handler, assumes previously checked cp!=NULL * returns false if it can't handle packet (oom) */ int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb) { struct ip_vs_app *app; /* * check if application module is bound to * this ip_vs_conn. */ if ((app = cp->app) == NULL) return 1; /* TCP is complicated */ if (cp->protocol == IPPROTO_TCP) return app_tcp_pkt_out(cp, skb, app); /* * Call private output hook function */ if (app->pkt_out == NULL) return 1; return app->pkt_out(app, cp, skb, NULL); } static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb, struct ip_vs_app *app) { int diff; const unsigned int tcp_offset = ip_hdrlen(skb); struct tcphdr *th; __u32 seq; if (!skb_make_writable(skb, tcp_offset + sizeof(*th))) return 0; th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset); /* * Remember seq number in case this pkt gets resized */ seq = ntohl(th->seq); /* * Fix seq stuff if flagged as so. */ if (cp->flags & IP_VS_CONN_F_IN_SEQ) vs_fix_seq(&cp->in_seq, th); if (cp->flags & IP_VS_CONN_F_OUT_SEQ) vs_fix_ack_seq(&cp->out_seq, th); /* * Call private input hook function */ if (app->pkt_in == NULL) return 1; if (!app->pkt_in(app, cp, skb, &diff)) return 0; /* * Update ip_vs seq stuff if len has changed. */ if (diff != 0) vs_seq_update(cp, &cp->in_seq, IP_VS_CONN_F_IN_SEQ, seq, diff); return 1; } /* * Input pkt hook. Will call bound ip_vs_app specific function * called by ipvs packet handler, assumes previously checked cp!=NULL. * returns false if can't handle packet (oom). */ int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb) { struct ip_vs_app *app; /* * check if application module is bound to * this ip_vs_conn. */ if ((app = cp->app) == NULL) return 1; /* TCP is complicated */ if (cp->protocol == IPPROTO_TCP) return app_tcp_pkt_in(cp, skb, app); /* * Call private input hook function */ if (app->pkt_in == NULL) return 1; return app->pkt_in(app, cp, skb, NULL); } #ifdef CONFIG_PROC_FS /* * /proc/net/ip_vs_app entry function */ static struct ip_vs_app *ip_vs_app_idx(loff_t pos) { struct ip_vs_app *app, *inc; list_for_each_entry(app, &ip_vs_app_list, a_list) { list_for_each_entry(inc, &app->incs_list, a_list) { if (pos-- == 0) return inc; } } return NULL; } static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos) { mutex_lock(&__ip_vs_app_mutex); return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN; } static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ip_vs_app *inc, *app; struct list_head *e; ++*pos; if (v == SEQ_START_TOKEN) return ip_vs_app_idx(0); inc = v; app = inc->app; if ((e = inc->a_list.next) != &app->incs_list) return list_entry(e, struct ip_vs_app, a_list); /* go on to next application */ for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) { app = list_entry(e, struct ip_vs_app, a_list); list_for_each_entry(inc, &app->incs_list, a_list) { return inc; } } return NULL; } static void ip_vs_app_seq_stop(struct seq_file *seq, void *v) { mutex_unlock(&__ip_vs_app_mutex); } static int ip_vs_app_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "prot port usecnt name\n"); else { const struct ip_vs_app *inc = v; seq_printf(seq, "%-3s %-7u %-6d %-17s\n", ip_vs_proto_name(inc->protocol), ntohs(inc->port), atomic_read(&inc->usecnt), inc->name); } return 0; } static const struct seq_operations ip_vs_app_seq_ops = { .start = ip_vs_app_seq_start, .next = ip_vs_app_seq_next, .stop = ip_vs_app_seq_stop, .show = ip_vs_app_seq_show, }; static int ip_vs_app_open(struct inode *inode, struct file *file) { return seq_open(file, &ip_vs_app_seq_ops); } static const struct file_operations ip_vs_app_fops = { .owner = THIS_MODULE, .open = ip_vs_app_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* * Replace a segment of data with a new segment */ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, char *o_buf, int o_len, char *n_buf, int n_len) { int diff; int o_offset; int o_left; EnterFunction(9); diff = n_len - o_len; o_offset = o_buf - (char *)skb->data; /* The length of left data after o_buf+o_len in the skb data */ o_left = skb->len - (o_offset + o_len); if (diff <= 0) { memmove(o_buf + n_len, o_buf + o_len, o_left); memcpy(o_buf, n_buf, n_len); skb_trim(skb, skb->len + diff); } else if (diff <= skb_tailroom(skb)) { skb_put(skb, diff); memmove(o_buf + n_len, o_buf + o_len, o_left); memcpy(o_buf, n_buf, n_len); } else { if (pskb_expand_head(skb, skb_headroom(skb), diff, pri)) return -ENOMEM; skb_put(skb, diff); memmove(skb->data + o_offset + n_len, skb->data + o_offset + o_len, o_left); skb_copy_to_linear_data_offset(skb, o_offset, n_buf, n_len); } /* must update the iph total length here */ ip_hdr(skb)->tot_len = htons(skb->len); LeaveFunction(9); return 0; } int __init ip_vs_app_init(void) { /* we will replace it with proc_net_ipvs_create() soon */ proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops); return 0; } void ip_vs_app_cleanup(void) { proc_net_remove(&init_net, "ip_vs_app"); }
gpl-2.0
lnfamous/Kernel_CyanogenMod11_Pico
drivers/gpu/ion/ion_heap.c
1318
2433
/* * drivers/gpu/ion/ion_heap.c * * Copyright (C) 2011 Google, Inc. * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/err.h> #include <linux/ion.h> #include "ion_priv.h" #include <linux/msm_ion.h> struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) { struct ion_heap *heap = NULL; switch ((int) heap_data->type) { case ION_HEAP_TYPE_SYSTEM_CONTIG: heap = ion_system_contig_heap_create(heap_data); break; case ION_HEAP_TYPE_SYSTEM: heap = ion_system_heap_create(heap_data); break; case ION_HEAP_TYPE_CARVEOUT: heap = ion_carveout_heap_create(heap_data); break; case ION_HEAP_TYPE_IOMMU: heap = ion_iommu_heap_create(heap_data); break; case ION_HEAP_TYPE_CP: heap = ion_cp_heap_create(heap_data); break; #ifdef CONFIG_CMA case ION_HEAP_TYPE_DMA: heap = ion_cma_heap_create(heap_data); break; #endif default: pr_err("%s: Invalid heap type %d\n", __func__, heap_data->type); return ERR_PTR(-EINVAL); } if (IS_ERR_OR_NULL(heap)) { pr_err("%s: error creating heap %s type %d base %lu size %u\n", __func__, heap_data->name, heap_data->type, heap_data->base, heap_data->size); return ERR_PTR(-EINVAL); } heap->name = heap_data->name; heap->id = heap_data->id; heap->priv = heap_data->priv; return heap; } void ion_heap_destroy(struct ion_heap *heap) { if (!heap) return; switch ((int) heap->type) { case ION_HEAP_TYPE_SYSTEM_CONTIG: ion_system_contig_heap_destroy(heap); break; case ION_HEAP_TYPE_SYSTEM: ion_system_heap_destroy(heap); break; case ION_HEAP_TYPE_CARVEOUT: ion_carveout_heap_destroy(heap); break; case ION_HEAP_TYPE_IOMMU: ion_iommu_heap_destroy(heap); break; case ION_HEAP_TYPE_CP: ion_cp_heap_destroy(heap); break; #ifdef CONFIG_CMA case ION_HEAP_TYPE_DMA: ion_cma_heap_destroy(heap); break; #endif default: pr_err("%s: Invalid heap type %d\n", __func__, heap->type); } }
gpl-2.0
MrStaticVoid/kernel_m8
drivers/remoteproc/remoteproc_core.c
1830
44473
/* * Remote Processor Framework * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2011 Google, Inc. * * Ohad Ben-Cohen <ohad@wizery.com> * Brian Swetland <swetland@google.com> * Mark Grosen <mgrosen@ti.com> * Fernando Guzman Lugo <fernando.lugo@ti.com> * Suman Anna <s-anna@ti.com> * Robert Tivy <rtivy@ti.com> * Armando Uribe De Leon <x0095078@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/dma-mapping.h> #include <linux/firmware.h> #include <linux/string.h> #include <linux/debugfs.h> #include <linux/remoteproc.h> #include <linux/iommu.h> #include <linux/klist.h> #include <linux/elf.h> #include <linux/virtio_ids.h> #include <linux/virtio_ring.h> #include <asm/byteorder.h> #include "remoteproc_internal.h" static void klist_rproc_get(struct klist_node *n); static void klist_rproc_put(struct klist_node *n); /* * klist of the available remote processors. * * We need this in order to support name-based lookups (needed by the * rproc_get_by_name()). * * That said, we don't use rproc_get_by_name() at this point. * The use cases that do require its existence should be * scrutinized, and hopefully migrated to rproc_boot() using device-based * binding. * * If/when this materializes, we could drop the klist (and the by_name * API). */ static DEFINE_KLIST(rprocs, klist_rproc_get, klist_rproc_put); typedef int (*rproc_handle_resources_t)(struct rproc *rproc, struct resource_table *table, int len); typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail); /* * This is the IOMMU fault handler we register with the IOMMU API * (when relevant; not all remote processors access memory through * an IOMMU). * * IOMMU core will invoke this handler whenever the remote processor * will try to access an unmapped device address. * * Currently this is mostly a stub, but it will be later used to trigger * the recovery of the remote processor. */ static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *token) { dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags); /* * Let the iommu core know we're not really handling this fault; * we just plan to use this as a recovery trigger. */ return -ENOSYS; } static int rproc_enable_iommu(struct rproc *rproc) { struct iommu_domain *domain; struct device *dev = rproc->dev; int ret; /* * We currently use iommu_present() to decide if an IOMMU * setup is needed. * * This works for simple cases, but will easily fail with * platforms that do have an IOMMU, but not for this specific * rproc. * * This will be easily solved by introducing hw capabilities * that will be set by the remoteproc driver. */ if (!iommu_present(dev->bus)) { dev_dbg(dev, "iommu not found\n"); return 0; } domain = iommu_domain_alloc(dev->bus); if (!domain) { dev_err(dev, "can't alloc iommu domain\n"); return -ENOMEM; } iommu_set_fault_handler(domain, rproc_iommu_fault, rproc); ret = iommu_attach_device(domain, dev); if (ret) { dev_err(dev, "can't attach iommu device: %d\n", ret); goto free_domain; } rproc->domain = domain; return 0; free_domain: iommu_domain_free(domain); return ret; } static void rproc_disable_iommu(struct rproc *rproc) { struct iommu_domain *domain = rproc->domain; struct device *dev = rproc->dev; if (!domain) return; iommu_detach_device(domain, dev); iommu_domain_free(domain); return; } /* * Some remote processors will ask us to allocate them physically contiguous * memory regions (which we call "carveouts"), and map them to specific * device addresses (which are hardcoded in the firmware). * * They may then ask us to copy objects into specific device addresses (e.g. * code/data sections) or expose us certain symbols in other device address * (e.g. their trace buffer). * * This function is an internal helper with which we can go over the allocated * carveouts and translate specific device address to kernel virtual addresses * so we can access the referenced memory. * * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too, * but only on kernel direct mapped RAM memory. Instead, we're just using * here the output of the DMA API, which should be more correct. */ static void *rproc_da_to_va(struct rproc *rproc, u64 da, int len) { struct rproc_mem_entry *carveout; void *ptr = NULL; list_for_each_entry(carveout, &rproc->carveouts, node) { int offset = da - carveout->da; /* try next carveout if da is too small */ if (offset < 0) continue; /* try next carveout if da is too large */ if (offset + len > carveout->len) continue; ptr = carveout->va + offset; break; } return ptr; } /** * rproc_load_segments() - load firmware segments to memory * @rproc: remote processor which will be booted using these fw segments * @elf_data: the content of the ELF firmware image * @len: firmware size (in bytes) * * This function loads the firmware segments to memory, where the remote * processor expects them. * * Some remote processors will expect their code and data to be placed * in specific device addresses, and can't have them dynamically assigned. * * We currently support only those kind of remote processors, and expect * the program header's paddr member to contain those addresses. We then go * through the physically contiguous "carveout" memory regions which we * allocated (and mapped) earlier on behalf of the remote processor, * and "translate" device address to kernel addresses, so we can copy the * segments where they are expected. * * Currently we only support remote processors that required carveout * allocations and got them mapped onto their iommus. Some processors * might be different: they might not have iommus, and would prefer to * directly allocate memory for every segment/resource. This is not yet * supported, though. */ static int rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len) { struct device *dev = rproc->dev; struct elf32_hdr *ehdr; struct elf32_phdr *phdr; int i, ret = 0; ehdr = (struct elf32_hdr *)elf_data; phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff); /* go through the available ELF segments */ for (i = 0; i < ehdr->e_phnum; i++, phdr++) { u32 da = phdr->p_paddr; u32 memsz = phdr->p_memsz; u32 filesz = phdr->p_filesz; u32 offset = phdr->p_offset; void *ptr; if (phdr->p_type != PT_LOAD) continue; dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n", phdr->p_type, da, memsz, filesz); if (filesz > memsz) { dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n", filesz, memsz); ret = -EINVAL; break; } if (offset + filesz > len) { dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n", offset + filesz, len); ret = -EINVAL; break; } /* grab the kernel address for this device address */ ptr = rproc_da_to_va(rproc, da, memsz); if (!ptr) { dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz); ret = -EINVAL; break; } /* put the segment where the remote processor expects it */ if (phdr->p_filesz) memcpy(ptr, elf_data + phdr->p_offset, filesz); /* * Zero out remaining memory for this segment. * * This isn't strictly required since dma_alloc_coherent already * did this for us. albeit harmless, we may consider removing * this. */ if (memsz > filesz) memset(ptr + filesz, 0, memsz - filesz); } return ret; } static int __rproc_handle_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) { struct rproc *rproc = rvdev->rproc; struct device *dev = rproc->dev; struct fw_rsc_vdev_vring *vring = &rsc->vring[i]; dma_addr_t dma; void *va; int ret, size, notifyid; dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n", i, vring->da, vring->num, vring->align); /* make sure reserved bytes are zeroes */ if (vring->reserved) { dev_err(dev, "vring rsc has non zero reserved bytes\n"); return -EINVAL; } /* verify queue size and vring alignment are sane */ if (!vring->num || !vring->align) { dev_err(dev, "invalid qsz (%d) or alignment (%d)\n", vring->num, vring->align); return -EINVAL; } /* actual size of vring (in bytes) */ size = PAGE_ALIGN(vring_size(vring->num, vring->align)); if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) { dev_err(dev, "idr_pre_get failed\n"); return -ENOMEM; } /* * Allocate non-cacheable memory for the vring. In the future * this call will also configure the IOMMU for us */ va = dma_alloc_coherent(dev, size, &dma, GFP_KERNEL); if (!va) { dev_err(dev, "dma_alloc_coherent failed\n"); return -EINVAL; } /* assign an rproc-wide unique index for this vring */ /* TODO: assign a notifyid for rvdev updates as well */ ret = idr_get_new(&rproc->notifyids, &rvdev->vring[i], &notifyid); if (ret) { dev_err(dev, "idr_get_new failed: %d\n", ret); dma_free_coherent(dev, size, va, dma); return ret; } /* let the rproc know the da and notifyid of this vring */ /* TODO: expose this to remote processor */ vring->da = dma; vring->notifyid = notifyid; dev_dbg(dev, "vring%d: va %p dma %x size %x idr %d\n", i, va, dma, size, notifyid); rvdev->vring[i].len = vring->num; rvdev->vring[i].align = vring->align; rvdev->vring[i].va = va; rvdev->vring[i].dma = dma; rvdev->vring[i].notifyid = notifyid; rvdev->vring[i].rvdev = rvdev; return 0; } static void __rproc_free_vrings(struct rproc_vdev *rvdev, int i) { struct rproc *rproc = rvdev->rproc; for (i--; i >= 0; i--) { struct rproc_vring *rvring = &rvdev->vring[i]; int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); dma_free_coherent(rproc->dev, size, rvring->va, rvring->dma); idr_remove(&rproc->notifyids, rvring->notifyid); } } /** * rproc_handle_vdev() - handle a vdev fw resource * @rproc: the remote processor * @rsc: the vring resource descriptor * @avail: size of available data (for sanity checking the image) * * This resource entry requests the host to statically register a virtio * device (vdev), and setup everything needed to support it. It contains * everything needed to make it possible: the virtio device id, virtio * device features, vrings information, virtio config space, etc... * * Before registering the vdev, the vrings are allocated from non-cacheable * physically contiguous memory. Currently we only support two vrings per * remote processor (temporary limitation). We might also want to consider * doing the vring allocation only later when ->find_vqs() is invoked, and * then release them upon ->del_vqs(). * * Note: @da is currently not really handled correctly: we dynamically * allocate it using the DMA API, ignoring requested hard coded addresses, * and we don't take care of any required IOMMU programming. This is all * going to be taken care of when the generic iommu-based DMA API will be * merged. Meanwhile, statically-addressed iommu-based firmware images should * use RSC_DEVMEM resource entries to map their required @da to the physical * address of their base CMA region (ouch, hacky!). * * Returns 0 on success, or an appropriate error code otherwise */ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, int avail) { struct device *dev = rproc->dev; struct rproc_vdev *rvdev; int i, ret; /* make sure resource isn't truncated */ if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) + rsc->config_len > avail) { dev_err(rproc->dev, "vdev rsc is truncated\n"); return -EINVAL; } /* make sure reserved bytes are zeroes */ if (rsc->reserved[0] || rsc->reserved[1]) { dev_err(dev, "vdev rsc has non zero reserved bytes\n"); return -EINVAL; } dev_dbg(dev, "vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n", rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings); /* we currently support only two vrings per rvdev */ if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) { dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings); return -EINVAL; } rvdev = kzalloc(sizeof(struct rproc_vdev), GFP_KERNEL); if (!rvdev) return -ENOMEM; rvdev->rproc = rproc; /* allocate the vrings */ for (i = 0; i < rsc->num_of_vrings; i++) { ret = __rproc_handle_vring(rvdev, rsc, i); if (ret) goto free_vrings; } /* remember the device features */ rvdev->dfeatures = rsc->dfeatures; list_add_tail(&rvdev->node, &rproc->rvdevs); /* it is now safe to add the virtio device */ ret = rproc_add_virtio_dev(rvdev, rsc->id); if (ret) goto free_vrings; return 0; free_vrings: __rproc_free_vrings(rvdev, i); kfree(rvdev); return ret; } /** * rproc_handle_trace() - handle a shared trace buffer resource * @rproc: the remote processor * @rsc: the trace resource descriptor * @avail: size of available data (for sanity checking the image) * * In case the remote processor dumps trace logs into memory, * export it via debugfs. * * Currently, the 'da' member of @rsc should contain the device address * where the remote processor is dumping the traces. Later we could also * support dynamically allocating this address using the generic * DMA API (but currently there isn't a use case for that). * * Returns 0 on success, or an appropriate error code otherwise */ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, int avail) { struct rproc_mem_entry *trace; struct device *dev = rproc->dev; void *ptr; char name[15]; if (sizeof(*rsc) > avail) { dev_err(rproc->dev, "trace rsc is truncated\n"); return -EINVAL; } /* make sure reserved bytes are zeroes */ if (rsc->reserved) { dev_err(dev, "trace rsc has non zero reserved bytes\n"); return -EINVAL; } /* what's the kernel address of this resource ? */ ptr = rproc_da_to_va(rproc, rsc->da, rsc->len); if (!ptr) { dev_err(dev, "erroneous trace resource entry\n"); return -EINVAL; } trace = kzalloc(sizeof(*trace), GFP_KERNEL); if (!trace) { dev_err(dev, "kzalloc trace failed\n"); return -ENOMEM; } /* set the trace buffer dma properties */ trace->len = rsc->len; trace->va = ptr; /* make sure snprintf always null terminates, even if truncating */ snprintf(name, sizeof(name), "trace%d", rproc->num_traces); /* create the debugfs entry */ trace->priv = rproc_create_trace_file(name, rproc, trace); if (!trace->priv) { trace->va = NULL; kfree(trace); return -EINVAL; } list_add_tail(&trace->node, &rproc->traces); rproc->num_traces++; dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n", name, ptr, rsc->da, rsc->len); return 0; } /** * rproc_handle_devmem() - handle devmem resource entry * @rproc: remote processor handle * @rsc: the devmem resource entry * @avail: size of available data (for sanity checking the image) * * Remote processors commonly need to access certain on-chip peripherals. * * Some of these remote processors access memory via an iommu device, * and might require us to configure their iommu before they can access * the on-chip peripherals they need. * * This resource entry is a request to map such a peripheral device. * * These devmem entries will contain the physical address of the device in * the 'pa' member. If a specific device address is expected, then 'da' will * contain it (currently this is the only use case supported). 'len' will * contain the size of the physical region we need to map. * * Currently we just "trust" those devmem entries to contain valid physical * addresses, but this is going to change: we want the implementations to * tell us ranges of physical addresses the firmware is allowed to request, * and not allow firmwares to request access to physical addresses that * are outside those ranges. */ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc, int avail) { struct rproc_mem_entry *mapping; int ret; /* no point in handling this resource without a valid iommu domain */ if (!rproc->domain) return -EINVAL; if (sizeof(*rsc) > avail) { dev_err(rproc->dev, "devmem rsc is truncated\n"); return -EINVAL; } /* make sure reserved bytes are zeroes */ if (rsc->reserved) { dev_err(rproc->dev, "devmem rsc has non zero reserved bytes\n"); return -EINVAL; } mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); if (!mapping) { dev_err(rproc->dev, "kzalloc mapping failed\n"); return -ENOMEM; } ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags); if (ret) { dev_err(rproc->dev, "failed to map devmem: %d\n", ret); goto out; } /* * We'll need this info later when we'll want to unmap everything * (e.g. on shutdown). * * We can't trust the remote processor not to change the resource * table, so we must maintain this info independently. */ mapping->da = rsc->da; mapping->len = rsc->len; list_add_tail(&mapping->node, &rproc->mappings); dev_dbg(rproc->dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n", rsc->pa, rsc->da, rsc->len); return 0; out: kfree(mapping); return ret; } /** * rproc_handle_carveout() - handle phys contig memory allocation requests * @rproc: rproc handle * @rsc: the resource entry * @avail: size of available data (for image validation) * * This function will handle firmware requests for allocation of physically * contiguous memory regions. * * These request entries should come first in the firmware's resource table, * as other firmware entries might request placing other data objects inside * these memory regions (e.g. data/code segments, trace resource entries, ...). * * Allocating memory this way helps utilizing the reserved physical memory * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB * pressure is important; it may have a substantial impact on performance. */ static int rproc_handle_carveout(struct rproc *rproc, struct fw_rsc_carveout *rsc, int avail) { struct rproc_mem_entry *carveout, *mapping; struct device *dev = rproc->dev; dma_addr_t dma; void *va; int ret; if (sizeof(*rsc) > avail) { dev_err(rproc->dev, "carveout rsc is truncated\n"); return -EINVAL; } /* make sure reserved bytes are zeroes */ if (rsc->reserved) { dev_err(dev, "carveout rsc has non zero reserved bytes\n"); return -EINVAL; } dev_dbg(dev, "carveout rsc: da %x, pa %x, len %x, flags %x\n", rsc->da, rsc->pa, rsc->len, rsc->flags); mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); if (!mapping) { dev_err(dev, "kzalloc mapping failed\n"); return -ENOMEM; } carveout = kzalloc(sizeof(*carveout), GFP_KERNEL); if (!carveout) { dev_err(dev, "kzalloc carveout failed\n"); ret = -ENOMEM; goto free_mapping; } va = dma_alloc_coherent(dev, rsc->len, &dma, GFP_KERNEL); if (!va) { dev_err(dev, "failed to dma alloc carveout: %d\n", rsc->len); ret = -ENOMEM; goto free_carv; } dev_dbg(dev, "carveout va %p, dma %x, len 0x%x\n", va, dma, rsc->len); /* * Ok, this is non-standard. * * Sometimes we can't rely on the generic iommu-based DMA API * to dynamically allocate the device address and then set the IOMMU * tables accordingly, because some remote processors might * _require_ us to use hard coded device addresses that their * firmware was compiled with. * * In this case, we must use the IOMMU API directly and map * the memory to the device address as expected by the remote * processor. * * Obviously such remote processor devices should not be configured * to use the iommu-based DMA API: we expect 'dma' to contain the * physical address in this case. */ if (rproc->domain) { ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len, rsc->flags); if (ret) { dev_err(dev, "iommu_map failed: %d\n", ret); goto dma_free; } /* * We'll need this info later when we'll want to unmap * everything (e.g. on shutdown). * * We can't trust the remote processor not to change the * resource table, so we must maintain this info independently. */ mapping->da = rsc->da; mapping->len = rsc->len; list_add_tail(&mapping->node, &rproc->mappings); dev_dbg(dev, "carveout mapped 0x%x to 0x%x\n", rsc->da, dma); /* * Some remote processors might need to know the pa * even though they are behind an IOMMU. E.g., OMAP4's * remote M3 processor needs this so it can control * on-chip hardware accelerators that are not behind * the IOMMU, and therefor must know the pa. * * Generally we don't want to expose physical addresses * if we don't have to (remote processors are generally * _not_ trusted), so we might want to do this only for * remote processor that _must_ have this (e.g. OMAP4's * dual M3 subsystem). */ rsc->pa = dma; } carveout->va = va; carveout->len = rsc->len; carveout->dma = dma; carveout->da = rsc->da; list_add_tail(&carveout->node, &rproc->carveouts); return 0; dma_free: dma_free_coherent(dev, rsc->len, va, dma); free_carv: kfree(carveout); free_mapping: kfree(mapping); return ret; } /* * A lookup table for resource handlers. The indices are defined in * enum fw_resource_type. */ static rproc_handle_resource_t rproc_handle_rsc[] = { [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout, [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem, [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace, [RSC_VDEV] = NULL, /* VDEVs were handled upon registrarion */ }; /* handle firmware resource entries before booting the remote processor */ static int rproc_handle_boot_rsc(struct rproc *rproc, struct resource_table *table, int len) { struct device *dev = rproc->dev; rproc_handle_resource_t handler; int ret = 0, i; for (i = 0; i < table->num; i++) { int offset = table->offset[i]; struct fw_rsc_hdr *hdr = (void *)table + offset; int avail = len - offset - sizeof(*hdr); void *rsc = (void *)hdr + sizeof(*hdr); /* make sure table isn't truncated */ if (avail < 0) { dev_err(dev, "rsc table is truncated\n"); return -EINVAL; } dev_dbg(dev, "rsc: type %d\n", hdr->type); if (hdr->type >= RSC_LAST) { dev_warn(dev, "unsupported resource %d\n", hdr->type); continue; } handler = rproc_handle_rsc[hdr->type]; if (!handler) continue; ret = handler(rproc, rsc, avail); if (ret) break; } return ret; } /* handle firmware resource entries while registering the remote processor */ static int rproc_handle_virtio_rsc(struct rproc *rproc, struct resource_table *table, int len) { struct device *dev = rproc->dev; int ret = 0, i; for (i = 0; i < table->num; i++) { int offset = table->offset[i]; struct fw_rsc_hdr *hdr = (void *)table + offset; int avail = len - offset - sizeof(*hdr); struct fw_rsc_vdev *vrsc; /* make sure table isn't truncated */ if (avail < 0) { dev_err(dev, "rsc table is truncated\n"); return -EINVAL; } dev_dbg(dev, "%s: rsc type %d\n", __func__, hdr->type); if (hdr->type != RSC_VDEV) continue; vrsc = (struct fw_rsc_vdev *)hdr->data; ret = rproc_handle_vdev(rproc, vrsc, avail); if (ret) break; } return ret; } /** * rproc_find_rsc_table() - find the resource table * @rproc: the rproc handle * @elf_data: the content of the ELF firmware image * @len: firmware size (in bytes) * @tablesz: place holder for providing back the table size * * This function finds the resource table inside the remote processor's * firmware. It is used both upon the registration of @rproc (in order * to look for and register the supported virito devices), and when the * @rproc is booted. * * Returns the pointer to the resource table if it is found, and write its * size into @tablesz. If a valid table isn't found, NULL is returned * (and @tablesz isn't set). */ static struct resource_table * rproc_find_rsc_table(struct rproc *rproc, const u8 *elf_data, size_t len, int *tablesz) { struct elf32_hdr *ehdr; struct elf32_shdr *shdr; const char *name_table; struct device *dev = rproc->dev; struct resource_table *table = NULL; int i; ehdr = (struct elf32_hdr *)elf_data; shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff); name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset; /* look for the resource table and handle it */ for (i = 0; i < ehdr->e_shnum; i++, shdr++) { int size = shdr->sh_size; int offset = shdr->sh_offset; if (strcmp(name_table + shdr->sh_name, ".resource_table")) continue; table = (struct resource_table *)(elf_data + offset); /* make sure we have the entire table */ if (offset + size > len) { dev_err(dev, "resource table truncated\n"); return NULL; } /* make sure table has at least the header */ if (sizeof(struct resource_table) > size) { dev_err(dev, "header-less resource table\n"); return NULL; } /* we don't support any version beyond the first */ if (table->ver != 1) { dev_err(dev, "unsupported fw ver: %d\n", table->ver); return NULL; } /* make sure reserved bytes are zeroes */ if (table->reserved[0] || table->reserved[1]) { dev_err(dev, "non zero reserved bytes\n"); return NULL; } /* make sure the offsets array isn't truncated */ if (table->num * sizeof(table->offset[0]) + sizeof(struct resource_table) > size) { dev_err(dev, "resource table incomplete\n"); return NULL; } *tablesz = shdr->sh_size; break; } return table; } /** * rproc_resource_cleanup() - clean up and free all acquired resources * @rproc: rproc handle * * This function will free all resources acquired for @rproc, and it * is called whenever @rproc either shuts down or fails to boot. */ static void rproc_resource_cleanup(struct rproc *rproc) { struct rproc_mem_entry *entry, *tmp; struct device *dev = rproc->dev; /* clean up debugfs trace entries */ list_for_each_entry_safe(entry, tmp, &rproc->traces, node) { rproc_remove_trace_file(entry->priv); rproc->num_traces--; list_del(&entry->node); kfree(entry); } /* clean up carveout allocations */ list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { dma_free_coherent(dev, entry->len, entry->va, entry->dma); list_del(&entry->node); kfree(entry); } /* clean up iommu mapping entries */ list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) { size_t unmapped; unmapped = iommu_unmap(rproc->domain, entry->da, entry->len); if (unmapped != entry->len) { /* nothing much to do besides complaining */ dev_err(dev, "failed to unmap %u/%u\n", entry->len, unmapped); } list_del(&entry->node); kfree(entry); } } /* make sure this fw image is sane */ static int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw) { const char *name = rproc->firmware; struct device *dev = rproc->dev; struct elf32_hdr *ehdr; char class; if (!fw) { dev_err(dev, "failed to load %s\n", name); return -EINVAL; } if (fw->size < sizeof(struct elf32_hdr)) { dev_err(dev, "Image is too small\n"); return -EINVAL; } ehdr = (struct elf32_hdr *)fw->data; /* We only support ELF32 at this point */ class = ehdr->e_ident[EI_CLASS]; if (class != ELFCLASS32) { dev_err(dev, "Unsupported class: %d\n", class); return -EINVAL; } /* We assume the firmware has the same endianess as the host */ # ifdef __LITTLE_ENDIAN if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) { # else /* BIG ENDIAN */ if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) { # endif dev_err(dev, "Unsupported firmware endianess\n"); return -EINVAL; } if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) { dev_err(dev, "Image is too small\n"); return -EINVAL; } if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) { dev_err(dev, "Image is corrupted (bad magic)\n"); return -EINVAL; } if (ehdr->e_phnum == 0) { dev_err(dev, "No loadable segments\n"); return -EINVAL; } if (ehdr->e_phoff > fw->size) { dev_err(dev, "Firmware size is too small\n"); return -EINVAL; } return 0; } /* * take a firmware and boot a remote processor with it. */ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) { struct device *dev = rproc->dev; const char *name = rproc->firmware; struct elf32_hdr *ehdr; struct resource_table *table; int ret, tablesz; ret = rproc_fw_sanity_check(rproc, fw); if (ret) return ret; ehdr = (struct elf32_hdr *)fw->data; dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size); /* * if enabling an IOMMU isn't relevant for this rproc, this is * just a nop */ ret = rproc_enable_iommu(rproc); if (ret) { dev_err(dev, "can't enable iommu: %d\n", ret); return ret; } /* * The ELF entry point is the rproc's boot addr (though this is not * a configurable property of all remote processors: some will always * boot at a specific hardcoded address). */ rproc->bootaddr = ehdr->e_entry; /* look for the resource table */ table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz); if (!table) goto clean_up; /* handle fw resources which are required to boot rproc */ ret = rproc_handle_boot_rsc(rproc, table, tablesz); if (ret) { dev_err(dev, "Failed to process resources: %d\n", ret); goto clean_up; } /* load the ELF segments to memory */ ret = rproc_load_segments(rproc, fw->data, fw->size); if (ret) { dev_err(dev, "Failed to load program segments: %d\n", ret); goto clean_up; } /* power up the remote processor */ ret = rproc->ops->start(rproc); if (ret) { dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret); goto clean_up; } rproc->state = RPROC_RUNNING; dev_info(dev, "remote processor %s is now up\n", rproc->name); return 0; clean_up: rproc_resource_cleanup(rproc); rproc_disable_iommu(rproc); return ret; } /* * take a firmware and look for virtio devices to register. * * Note: this function is called asynchronously upon registration of the * remote processor (so we must wait until it completes before we try * to unregister the device. one other option is just to use kref here, * that might be cleaner). */ static void rproc_fw_config_virtio(const struct firmware *fw, void *context) { struct rproc *rproc = context; struct resource_table *table; int ret, tablesz; if (rproc_fw_sanity_check(rproc, fw) < 0) goto out; /* look for the resource table */ table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz); if (!table) goto out; /* look for virtio devices and register them */ ret = rproc_handle_virtio_rsc(rproc, table, tablesz); if (ret) goto out; out: if (fw) release_firmware(fw); /* allow rproc_unregister() contexts, if any, to proceed */ complete_all(&rproc->firmware_loading_complete); } /** * rproc_boot() - boot a remote processor * @rproc: handle of a remote processor * * Boot a remote processor (i.e. load its firmware, power it on, ...). * * If the remote processor is already powered on, this function immediately * returns (successfully). * * Returns 0 on success, and an appropriate error value otherwise. */ int rproc_boot(struct rproc *rproc) { const struct firmware *firmware_p; struct device *dev; int ret; if (!rproc) { pr_err("invalid rproc handle\n"); return -EINVAL; } dev = rproc->dev; ret = mutex_lock_interruptible(&rproc->lock); if (ret) { dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); return ret; } /* loading a firmware is required */ if (!rproc->firmware) { dev_err(dev, "%s: no firmware to load\n", __func__); ret = -EINVAL; goto unlock_mutex; } /* prevent underlying implementation from being removed */ if (!try_module_get(dev->driver->owner)) { dev_err(dev, "%s: can't get owner\n", __func__); ret = -EINVAL; goto unlock_mutex; } /* skip the boot process if rproc is already powered up */ if (atomic_inc_return(&rproc->power) > 1) { ret = 0; goto unlock_mutex; } dev_info(dev, "powering up %s\n", rproc->name); /* load firmware */ ret = request_firmware(&firmware_p, rproc->firmware, dev); if (ret < 0) { dev_err(dev, "request_firmware failed: %d\n", ret); goto downref_rproc; } ret = rproc_fw_boot(rproc, firmware_p); release_firmware(firmware_p); downref_rproc: if (ret) { module_put(dev->driver->owner); atomic_dec(&rproc->power); } unlock_mutex: mutex_unlock(&rproc->lock); return ret; } EXPORT_SYMBOL(rproc_boot); /** * rproc_shutdown() - power off the remote processor * @rproc: the remote processor * * Power off a remote processor (previously booted with rproc_boot()). * * In case @rproc is still being used by an additional user(s), then * this function will just decrement the power refcount and exit, * without really powering off the device. * * Every call to rproc_boot() must (eventually) be accompanied by a call * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug. * * Notes: * - we're not decrementing the rproc's refcount, only the power refcount. * which means that the @rproc handle stays valid even after rproc_shutdown() * returns, and users can still use it with a subsequent rproc_boot(), if * needed. * - don't call rproc_shutdown() to unroll rproc_get_by_name(), exactly * because rproc_shutdown() _does not_ decrement the refcount of @rproc. * To decrement the refcount of @rproc, use rproc_put() (but _only_ if * you acquired @rproc using rproc_get_by_name()). */ void rproc_shutdown(struct rproc *rproc) { struct device *dev = rproc->dev; int ret; ret = mutex_lock_interruptible(&rproc->lock); if (ret) { dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); return; } /* if the remote proc is still needed, bail out */ if (!atomic_dec_and_test(&rproc->power)) goto out; /* power off the remote processor */ ret = rproc->ops->stop(rproc); if (ret) { atomic_inc(&rproc->power); dev_err(dev, "can't stop rproc: %d\n", ret); goto out; } /* clean up all acquired resources */ rproc_resource_cleanup(rproc); rproc_disable_iommu(rproc); rproc->state = RPROC_OFFLINE; dev_info(dev, "stopped remote processor %s\n", rproc->name); out: mutex_unlock(&rproc->lock); if (!ret) module_put(dev->driver->owner); } EXPORT_SYMBOL(rproc_shutdown); /** * rproc_release() - completely deletes the existence of a remote processor * @kref: the rproc's kref * * This function should _never_ be called directly. * * The only reasonable location to use it is as an argument when kref_put'ing * @rproc's refcount. * * This way it will be called when no one holds a valid pointer to this @rproc * anymore (and obviously after it is removed from the rprocs klist). * * Note: this function is not static because rproc_vdev_release() needs it when * it decrements @rproc's refcount. */ void rproc_release(struct kref *kref) { struct rproc *rproc = container_of(kref, struct rproc, refcount); struct rproc_vdev *rvdev, *rvtmp; dev_info(rproc->dev, "removing %s\n", rproc->name); rproc_delete_debug_dir(rproc); /* clean up remote vdev entries */ list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) { __rproc_free_vrings(rvdev, RVDEV_NUM_VRINGS); list_del(&rvdev->node); } /* * At this point no one holds a reference to rproc anymore, * so we can directly unroll rproc_alloc() */ rproc_free(rproc); } /* will be called when an rproc is added to the rprocs klist */ static void klist_rproc_get(struct klist_node *n) { struct rproc *rproc = container_of(n, struct rproc, node); kref_get(&rproc->refcount); } /* will be called when an rproc is removed from the rprocs klist */ static void klist_rproc_put(struct klist_node *n) { struct rproc *rproc = container_of(n, struct rproc, node); kref_put(&rproc->refcount, rproc_release); } static struct rproc *next_rproc(struct klist_iter *i) { struct klist_node *n; n = klist_next(i); if (!n) return NULL; return container_of(n, struct rproc, node); } /** * rproc_get_by_name() - find a remote processor by name and boot it * @name: name of the remote processor * * Finds an rproc handle using the remote processor's name, and then * boot it. If it's already powered on, then just immediately return * (successfully). * * Returns the rproc handle on success, and NULL on failure. * * This function increments the remote processor's refcount, so always * use rproc_put() to decrement it back once rproc isn't needed anymore. * * Note: currently this function (and its counterpart rproc_put()) are not * being used. We need to scrutinize the use cases * that still need them, and see if we can migrate them to use the non * name-based boot/shutdown interface. */ struct rproc *rproc_get_by_name(const char *name) { struct rproc *rproc; struct klist_iter i; int ret; /* find the remote processor, and upref its refcount */ klist_iter_init(&rprocs, &i); while ((rproc = next_rproc(&i)) != NULL) if (!strcmp(rproc->name, name)) { kref_get(&rproc->refcount); break; } klist_iter_exit(&i); /* can't find this rproc ? */ if (!rproc) { pr_err("can't find remote processor %s\n", name); return NULL; } ret = rproc_boot(rproc); if (ret < 0) { kref_put(&rproc->refcount, rproc_release); return NULL; } return rproc; } EXPORT_SYMBOL(rproc_get_by_name); /** * rproc_put() - decrement the refcount of a remote processor, and shut it down * @rproc: the remote processor * * This function tries to shutdown @rproc, and it then decrements its * refcount. * * After this function returns, @rproc may _not_ be used anymore, and its * handle should be considered invalid. * * This function should be called _iff_ the @rproc handle was grabbed by * calling rproc_get_by_name(). */ void rproc_put(struct rproc *rproc) { /* try to power off the remote processor */ rproc_shutdown(rproc); /* downref rproc's refcount */ kref_put(&rproc->refcount, rproc_release); } EXPORT_SYMBOL(rproc_put); /** * rproc_register() - register a remote processor * @rproc: the remote processor handle to register * * Registers @rproc with the remoteproc framework, after it has been * allocated with rproc_alloc(). * * This is called by the platform-specific rproc implementation, whenever * a new remote processor device is probed. * * Returns 0 on success and an appropriate error code otherwise. * * Note: this function initiates an asynchronous firmware loading * context, which will look for virtio devices supported by the rproc's * firmware. * * If found, those virtio devices will be created and added, so as a result * of registering this remote processor, additional virtio drivers might be * probed. */ int rproc_register(struct rproc *rproc) { struct device *dev = rproc->dev; int ret = 0; /* expose to rproc_get_by_name users */ klist_add_tail(&rproc->node, &rprocs); dev_info(rproc->dev, "%s is available\n", rproc->name); dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n"); dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n"); /* create debugfs entries */ rproc_create_debug_dir(rproc); /* rproc_unregister() calls must wait until async loader completes */ init_completion(&rproc->firmware_loading_complete); /* * We must retrieve early virtio configuration info from * the firmware (e.g. whether to register a virtio device, * what virtio features does it support, ...). * * We're initiating an asynchronous firmware loading, so we can * be built-in kernel code, without hanging the boot process. */ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, rproc->firmware, dev, GFP_KERNEL, rproc, rproc_fw_config_virtio); if (ret < 0) { dev_err(dev, "request_firmware_nowait failed: %d\n", ret); complete_all(&rproc->firmware_loading_complete); klist_remove(&rproc->node); } return ret; } EXPORT_SYMBOL(rproc_register); /** * rproc_alloc() - allocate a remote processor handle * @dev: the underlying device * @name: name of this remote processor * @ops: platform-specific handlers (mainly start/stop) * @firmware: name of firmware file to load * @len: length of private data needed by the rproc driver (in bytes) * * Allocates a new remote processor handle, but does not register * it yet. * * This function should be used by rproc implementations during initialization * of the remote processor. * * After creating an rproc handle using this function, and when ready, * implementations should then call rproc_register() to complete * the registration of the remote processor. * * On success the new rproc is returned, and on failure, NULL. * * Note: _never_ directly deallocate @rproc, even if it was not registered * yet. Instead, if you just need to unroll rproc_alloc(), use rproc_free(). */ struct rproc *rproc_alloc(struct device *dev, const char *name, const struct rproc_ops *ops, const char *firmware, int len) { struct rproc *rproc; if (!dev || !name || !ops) return NULL; rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL); if (!rproc) { dev_err(dev, "%s: kzalloc failed\n", __func__); return NULL; } rproc->dev = dev; rproc->name = name; rproc->ops = ops; rproc->firmware = firmware; rproc->priv = &rproc[1]; atomic_set(&rproc->power, 0); kref_init(&rproc->refcount); mutex_init(&rproc->lock); idr_init(&rproc->notifyids); INIT_LIST_HEAD(&rproc->carveouts); INIT_LIST_HEAD(&rproc->mappings); INIT_LIST_HEAD(&rproc->traces); INIT_LIST_HEAD(&rproc->rvdevs); rproc->state = RPROC_OFFLINE; return rproc; } EXPORT_SYMBOL(rproc_alloc); /** * rproc_free() - free an rproc handle that was allocated by rproc_alloc * @rproc: the remote processor handle * * This function should _only_ be used if @rproc was only allocated, * but not registered yet. * * If @rproc was already successfully registered (by calling rproc_register()), * then use rproc_unregister() instead. */ void rproc_free(struct rproc *rproc) { idr_remove_all(&rproc->notifyids); idr_destroy(&rproc->notifyids); kfree(rproc); } EXPORT_SYMBOL(rproc_free); /** * rproc_unregister() - unregister a remote processor * @rproc: rproc handle to unregister * * Unregisters a remote processor, and decrements its refcount. * If its refcount drops to zero, then @rproc will be freed. If not, * it will be freed later once the last reference is dropped. * * This function should be called when the platform specific rproc * implementation decides to remove the rproc device. it should * _only_ be called if a previous invocation of rproc_register() * has completed successfully. * * After rproc_unregister() returns, @rproc is _not_ valid anymore and * it shouldn't be used. More specifically, don't call rproc_free() * or try to directly free @rproc after rproc_unregister() returns; * none of these are needed, and calling them is a bug. * * Returns 0 on success and -EINVAL if @rproc isn't valid. */ int rproc_unregister(struct rproc *rproc) { struct rproc_vdev *rvdev; if (!rproc) return -EINVAL; /* if rproc is just being registered, wait */ wait_for_completion(&rproc->firmware_loading_complete); /* clean up remote vdev entries */ list_for_each_entry(rvdev, &rproc->rvdevs, node) rproc_remove_virtio_dev(rvdev); /* the rproc is downref'ed as soon as it's removed from the klist */ klist_del(&rproc->node); /* the rproc will only be released after its refcount drops to zero */ kref_put(&rproc->refcount, rproc_release); return 0; } EXPORT_SYMBOL(rproc_unregister); static int __init remoteproc_init(void) { rproc_init_debugfs(); return 0; } module_init(remoteproc_init); static void __exit remoteproc_exit(void) { rproc_exit_debugfs(); } module_exit(remoteproc_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Generic Remote Processor Framework");
gpl-2.0
AudioGod/DTS-Sound-Integration_CAF-Android-kernel
drivers/rtc/rtc-max6900.c
2086
6811
/* * rtc class driver for the Maxim MAX6900 chip * * Author: Dale Farnsworth <dale@farnsworth.org> * * based on previously existing rtc class drivers * * 2007 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/bcd.h> #include <linux/rtc.h> #include <linux/delay.h> #define DRV_VERSION "0.2" /* * register indices */ #define MAX6900_REG_SC 0 /* seconds 00-59 */ #define MAX6900_REG_MN 1 /* minutes 00-59 */ #define MAX6900_REG_HR 2 /* hours 00-23 */ #define MAX6900_REG_DT 3 /* day of month 00-31 */ #define MAX6900_REG_MO 4 /* month 01-12 */ #define MAX6900_REG_DW 5 /* day of week 1-7 */ #define MAX6900_REG_YR 6 /* year 00-99 */ #define MAX6900_REG_CT 7 /* control */ /* register 8 is undocumented */ #define MAX6900_REG_CENTURY 9 /* century */ #define MAX6900_REG_LEN 10 #define MAX6900_BURST_LEN 8 /* can burst r/w first 8 regs */ #define MAX6900_REG_CT_WP (1 << 7) /* Write Protect */ /* * register read/write commands */ #define MAX6900_REG_CONTROL_WRITE 0x8e #define MAX6900_REG_CENTURY_WRITE 0x92 #define MAX6900_REG_CENTURY_READ 0x93 #define MAX6900_REG_RESERVED_READ 0x96 #define MAX6900_REG_BURST_WRITE 0xbe #define MAX6900_REG_BURST_READ 0xbf #define MAX6900_IDLE_TIME_AFTER_WRITE 3 /* specification says 2.5 mS */ static struct i2c_driver max6900_driver; static int max6900_i2c_read_regs(struct i2c_client *client, u8 *buf) { u8 reg_burst_read[1] = { MAX6900_REG_BURST_READ }; u8 reg_century_read[1] = { MAX6900_REG_CENTURY_READ }; struct i2c_msg msgs[4] = { { .addr = client->addr, .flags = 0, /* write */ .len = sizeof(reg_burst_read), .buf = reg_burst_read} , { .addr = client->addr, .flags = I2C_M_RD, .len = MAX6900_BURST_LEN, .buf = buf} , { .addr = client->addr, .flags = 0, /* write */ .len = sizeof(reg_century_read), .buf = reg_century_read} , { .addr = client->addr, .flags = I2C_M_RD, .len = sizeof(buf[MAX6900_REG_CENTURY]), .buf = &buf[MAX6900_REG_CENTURY] } }; int rc; rc = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (rc != ARRAY_SIZE(msgs)) { dev_err(&client->dev, "%s: register read failed\n", __func__); return -EIO; } return 0; } static int max6900_i2c_write_regs(struct i2c_client *client, u8 const *buf) { u8 i2c_century_buf[1 + 1] = { MAX6900_REG_CENTURY_WRITE }; struct i2c_msg century_msgs[1] = { { .addr = client->addr, .flags = 0, /* write */ .len = sizeof(i2c_century_buf), .buf = i2c_century_buf} }; u8 i2c_burst_buf[MAX6900_BURST_LEN + 1] = { MAX6900_REG_BURST_WRITE }; struct i2c_msg burst_msgs[1] = { { .addr = client->addr, .flags = 0, /* write */ .len = sizeof(i2c_burst_buf), .buf = i2c_burst_buf} }; int rc; /* * We have to make separate calls to i2c_transfer because of * the need to delay after each write to the chip. Also, * we write the century byte first, since we set the write-protect * bit as part of the burst write. */ i2c_century_buf[1] = buf[MAX6900_REG_CENTURY]; rc = i2c_transfer(client->adapter, century_msgs, ARRAY_SIZE(century_msgs)); if (rc != ARRAY_SIZE(century_msgs)) goto write_failed; msleep(MAX6900_IDLE_TIME_AFTER_WRITE); memcpy(&i2c_burst_buf[1], buf, MAX6900_BURST_LEN); rc = i2c_transfer(client->adapter, burst_msgs, ARRAY_SIZE(burst_msgs)); if (rc != ARRAY_SIZE(burst_msgs)) goto write_failed; msleep(MAX6900_IDLE_TIME_AFTER_WRITE); return 0; write_failed: dev_err(&client->dev, "%s: register write failed\n", __func__); return -EIO; } static int max6900_i2c_read_time(struct i2c_client *client, struct rtc_time *tm) { int rc; u8 regs[MAX6900_REG_LEN]; rc = max6900_i2c_read_regs(client, regs); if (rc < 0) return rc; tm->tm_sec = bcd2bin(regs[MAX6900_REG_SC]); tm->tm_min = bcd2bin(regs[MAX6900_REG_MN]); tm->tm_hour = bcd2bin(regs[MAX6900_REG_HR] & 0x3f); tm->tm_mday = bcd2bin(regs[MAX6900_REG_DT]); tm->tm_mon = bcd2bin(regs[MAX6900_REG_MO]) - 1; tm->tm_year = bcd2bin(regs[MAX6900_REG_YR]) + bcd2bin(regs[MAX6900_REG_CENTURY]) * 100 - 1900; tm->tm_wday = bcd2bin(regs[MAX6900_REG_DW]); return rtc_valid_tm(tm); } static int max6900_i2c_clear_write_protect(struct i2c_client *client) { int rc; rc = i2c_smbus_write_byte_data(client, MAX6900_REG_CONTROL_WRITE, 0); if (rc < 0) { dev_err(&client->dev, "%s: control register write failed\n", __func__); return -EIO; } return 0; } static int max6900_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm) { u8 regs[MAX6900_REG_LEN]; int rc; rc = max6900_i2c_clear_write_protect(client); if (rc < 0) return rc; regs[MAX6900_REG_SC] = bin2bcd(tm->tm_sec); regs[MAX6900_REG_MN] = bin2bcd(tm->tm_min); regs[MAX6900_REG_HR] = bin2bcd(tm->tm_hour); regs[MAX6900_REG_DT] = bin2bcd(tm->tm_mday); regs[MAX6900_REG_MO] = bin2bcd(tm->tm_mon + 1); regs[MAX6900_REG_DW] = bin2bcd(tm->tm_wday); regs[MAX6900_REG_YR] = bin2bcd(tm->tm_year % 100); regs[MAX6900_REG_CENTURY] = bin2bcd((tm->tm_year + 1900) / 100); /* set write protect */ regs[MAX6900_REG_CT] = MAX6900_REG_CT_WP; rc = max6900_i2c_write_regs(client, regs); if (rc < 0) return rc; return 0; } static int max6900_rtc_read_time(struct device *dev, struct rtc_time *tm) { return max6900_i2c_read_time(to_i2c_client(dev), tm); } static int max6900_rtc_set_time(struct device *dev, struct rtc_time *tm) { return max6900_i2c_set_time(to_i2c_client(dev), tm); } static int max6900_remove(struct i2c_client *client) { return 0; } static const struct rtc_class_ops max6900_rtc_ops = { .read_time = max6900_rtc_read_time, .set_time = max6900_rtc_set_time, }; static int max6900_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct rtc_device *rtc; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); rtc = devm_rtc_device_register(&client->dev, max6900_driver.driver.name, &max6900_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); i2c_set_clientdata(client, rtc); return 0; } static struct i2c_device_id max6900_id[] = { { "max6900", 0 }, { } }; static struct i2c_driver max6900_driver = { .driver = { .name = "rtc-max6900", }, .probe = max6900_probe, .remove = max6900_remove, .id_table = max6900_id, }; module_i2c_driver(max6900_driver); MODULE_DESCRIPTION("Maxim MAX6900 RTC driver"); MODULE_AUTHOR("Dale Farnsworth <dale@farnsworth.org>"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
gerard87/kernel_angler_n_mr1
drivers/net/ethernet/ibm/emac/mal.c
2086
19983
/* * drivers/net/ethernet/ibm/emac/mal.c * * Memory Access Layer (MAL) support * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Based on the arch/ppc version of the driver: * * Copyright (c) 2004, 2005 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * * Based on original work by * Benjamin Herrenschmidt <benh@kernel.crashing.org>, * David Gibson <hermes@gibson.dropbear.id.au>, * * Armin Kuster <akuster@mvista.com> * Copyright 2002 MontaVista Softare Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/delay.h> #include <linux/slab.h> #include "core.h" #include <asm/dcr-regs.h> static int mal_count; int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac) { unsigned long flags; spin_lock_irqsave(&mal->lock, flags); MAL_DBG(mal, "reg(%08x, %08x)" NL, commac->tx_chan_mask, commac->rx_chan_mask); /* Don't let multiple commacs claim the same channel(s) */ if ((mal->tx_chan_mask & commac->tx_chan_mask) || (mal->rx_chan_mask & commac->rx_chan_mask)) { spin_unlock_irqrestore(&mal->lock, flags); printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n", mal->index); return -EBUSY; } if (list_empty(&mal->list)) napi_enable(&mal->napi); mal->tx_chan_mask |= commac->tx_chan_mask; mal->rx_chan_mask |= commac->rx_chan_mask; list_add(&commac->list, &mal->list); spin_unlock_irqrestore(&mal->lock, flags); return 0; } void mal_unregister_commac(struct mal_instance *mal, struct mal_commac *commac) { unsigned long flags; spin_lock_irqsave(&mal->lock, flags); MAL_DBG(mal, "unreg(%08x, %08x)" NL, commac->tx_chan_mask, commac->rx_chan_mask); mal->tx_chan_mask &= ~commac->tx_chan_mask; mal->rx_chan_mask &= ~commac->rx_chan_mask; list_del_init(&commac->list); if (list_empty(&mal->list)) napi_disable(&mal->napi); spin_unlock_irqrestore(&mal->lock, flags); } int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size) { BUG_ON(channel < 0 || channel >= mal->num_rx_chans || size > MAL_MAX_RX_SIZE); MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size); if (size & 0xf) { printk(KERN_WARNING "mal%d: incorrect RX size %lu for the channel %d\n", mal->index, size, channel); return -EINVAL; } set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4); return 0; } int mal_tx_bd_offset(struct mal_instance *mal, int channel) { BUG_ON(channel < 0 || channel >= mal->num_tx_chans); return channel * NUM_TX_BUFF; } int mal_rx_bd_offset(struct mal_instance *mal, int channel) { BUG_ON(channel < 0 || channel >= mal->num_rx_chans); return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF; } void mal_enable_tx_channel(struct mal_instance *mal, int channel) { unsigned long flags; spin_lock_irqsave(&mal->lock, flags); MAL_DBG(mal, "enable_tx(%d)" NL, channel); set_mal_dcrn(mal, MAL_TXCASR, get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel)); spin_unlock_irqrestore(&mal->lock, flags); } void mal_disable_tx_channel(struct mal_instance *mal, int channel) { set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel)); MAL_DBG(mal, "disable_tx(%d)" NL, channel); } void mal_enable_rx_channel(struct mal_instance *mal, int channel) { unsigned long flags; /* * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple * of 8, but enabling in MAL_RXCASR needs the divided by 8 value * for the bitmask */ if (!(channel % 8)) channel >>= 3; spin_lock_irqsave(&mal->lock, flags); MAL_DBG(mal, "enable_rx(%d)" NL, channel); set_mal_dcrn(mal, MAL_RXCASR, get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel)); spin_unlock_irqrestore(&mal->lock, flags); } void mal_disable_rx_channel(struct mal_instance *mal, int channel) { /* * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple * of 8, but enabling in MAL_RXCASR needs the divided by 8 value * for the bitmask */ if (!(channel % 8)) channel >>= 3; set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); MAL_DBG(mal, "disable_rx(%d)" NL, channel); } void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac) { unsigned long flags; spin_lock_irqsave(&mal->lock, flags); MAL_DBG(mal, "poll_add(%p)" NL, commac); /* starts disabled */ set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); list_add_tail(&commac->poll_list, &mal->poll_list); spin_unlock_irqrestore(&mal->lock, flags); } void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac) { unsigned long flags; spin_lock_irqsave(&mal->lock, flags); MAL_DBG(mal, "poll_del(%p)" NL, commac); list_del(&commac->poll_list); spin_unlock_irqrestore(&mal->lock, flags); } /* synchronized by mal_poll() */ static inline void mal_enable_eob_irq(struct mal_instance *mal) { MAL_DBG2(mal, "enable_irq" NL); // XXX might want to cache MAL_CFG as the DCR read can be slooooow set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); } /* synchronized by NAPI state */ static inline void mal_disable_eob_irq(struct mal_instance *mal) { // XXX might want to cache MAL_CFG as the DCR read can be slooooow set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE); MAL_DBG2(mal, "disable_irq" NL); } static irqreturn_t mal_serr(int irq, void *dev_instance) { struct mal_instance *mal = dev_instance; u32 esr = get_mal_dcrn(mal, MAL_ESR); /* Clear the error status register */ set_mal_dcrn(mal, MAL_ESR, esr); MAL_DBG(mal, "SERR %08x" NL, esr); if (esr & MAL_ESR_EVB) { if (esr & MAL_ESR_DE) { /* We ignore Descriptor error, * TXDE or RXDE interrupt will be generated anyway. */ return IRQ_HANDLED; } if (esr & MAL_ESR_PEIN) { /* PLB error, it's probably buggy hardware or * incorrect physical address in BD (i.e. bug) */ if (net_ratelimit()) printk(KERN_ERR "mal%d: system error, " "PLB (ESR = 0x%08x)\n", mal->index, esr); return IRQ_HANDLED; } /* OPB error, it's probably buggy hardware or incorrect * EBC setup */ if (net_ratelimit()) printk(KERN_ERR "mal%d: system error, OPB (ESR = 0x%08x)\n", mal->index, esr); } return IRQ_HANDLED; } static inline void mal_schedule_poll(struct mal_instance *mal) { if (likely(napi_schedule_prep(&mal->napi))) { MAL_DBG2(mal, "schedule_poll" NL); mal_disable_eob_irq(mal); __napi_schedule(&mal->napi); } else MAL_DBG2(mal, "already in poll" NL); } static irqreturn_t mal_txeob(int irq, void *dev_instance) { struct mal_instance *mal = dev_instance; u32 r = get_mal_dcrn(mal, MAL_TXEOBISR); MAL_DBG2(mal, "txeob %08x" NL, r); mal_schedule_poll(mal); set_mal_dcrn(mal, MAL_TXEOBISR, r); #ifdef CONFIG_PPC_DCR_NATIVE if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) mtdcri(SDR0, DCRN_SDR_ICINTSTAT, (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX)); #endif return IRQ_HANDLED; } static irqreturn_t mal_rxeob(int irq, void *dev_instance) { struct mal_instance *mal = dev_instance; u32 r = get_mal_dcrn(mal, MAL_RXEOBISR); MAL_DBG2(mal, "rxeob %08x" NL, r); mal_schedule_poll(mal); set_mal_dcrn(mal, MAL_RXEOBISR, r); #ifdef CONFIG_PPC_DCR_NATIVE if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) mtdcri(SDR0, DCRN_SDR_ICINTSTAT, (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX)); #endif return IRQ_HANDLED; } static irqreturn_t mal_txde(int irq, void *dev_instance) { struct mal_instance *mal = dev_instance; u32 deir = get_mal_dcrn(mal, MAL_TXDEIR); set_mal_dcrn(mal, MAL_TXDEIR, deir); MAL_DBG(mal, "txde %08x" NL, deir); if (net_ratelimit()) printk(KERN_ERR "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n", mal->index, deir); return IRQ_HANDLED; } static irqreturn_t mal_rxde(int irq, void *dev_instance) { struct mal_instance *mal = dev_instance; struct list_head *l; u32 deir = get_mal_dcrn(mal, MAL_RXDEIR); MAL_DBG(mal, "rxde %08x" NL, deir); list_for_each(l, &mal->list) { struct mal_commac *mc = list_entry(l, struct mal_commac, list); if (deir & mc->rx_chan_mask) { set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags); mc->ops->rxde(mc->dev); } } mal_schedule_poll(mal); set_mal_dcrn(mal, MAL_RXDEIR, deir); return IRQ_HANDLED; } static irqreturn_t mal_int(int irq, void *dev_instance) { struct mal_instance *mal = dev_instance; u32 esr = get_mal_dcrn(mal, MAL_ESR); if (esr & MAL_ESR_EVB) { /* descriptor error */ if (esr & MAL_ESR_DE) { if (esr & MAL_ESR_CIDT) return mal_rxde(irq, dev_instance); else return mal_txde(irq, dev_instance); } else { /* SERR */ return mal_serr(irq, dev_instance); } } return IRQ_HANDLED; } void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac) { /* Spinlock-type semantics: only one caller disable poll at a time */ while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) msleep(1); /* Synchronize with the MAL NAPI poller */ napi_synchronize(&mal->napi); } void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) { smp_wmb(); clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); /* Feels better to trigger a poll here to catch up with events that * may have happened on this channel while disabled. It will most * probably be delayed until the next interrupt but that's mostly a * non-issue in the context where this is called. */ napi_schedule(&mal->napi); } static int mal_poll(struct napi_struct *napi, int budget) { struct mal_instance *mal = container_of(napi, struct mal_instance, napi); struct list_head *l; int received = 0; unsigned long flags; MAL_DBG2(mal, "poll(%d)" NL, budget); again: /* Process TX skbs */ list_for_each(l, &mal->poll_list) { struct mal_commac *mc = list_entry(l, struct mal_commac, poll_list); mc->ops->poll_tx(mc->dev); } /* Process RX skbs. * * We _might_ need something more smart here to enforce polling * fairness. */ list_for_each(l, &mal->poll_list) { struct mal_commac *mc = list_entry(l, struct mal_commac, poll_list); int n; if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) continue; n = mc->ops->poll_rx(mc->dev, budget); if (n) { received += n; budget -= n; if (budget <= 0) goto more_work; // XXX What if this is the last one ? } } /* We need to disable IRQs to protect from RXDE IRQ here */ spin_lock_irqsave(&mal->lock, flags); __napi_complete(napi); mal_enable_eob_irq(mal); spin_unlock_irqrestore(&mal->lock, flags); /* Check for "rotting" packet(s) */ list_for_each(l, &mal->poll_list) { struct mal_commac *mc = list_entry(l, struct mal_commac, poll_list); if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) continue; if (unlikely(mc->ops->peek_rx(mc->dev) || test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { MAL_DBG2(mal, "rotting packet" NL); if (napi_reschedule(napi)) mal_disable_eob_irq(mal); else MAL_DBG2(mal, "already in poll list" NL); if (budget > 0) goto again; else goto more_work; } mc->ops->poll_tx(mc->dev); } more_work: MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received); return received; } static void mal_reset(struct mal_instance *mal) { int n = 10; MAL_DBG(mal, "reset" NL); set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR); /* Wait for reset to complete (1 system clock) */ while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n) --n; if (unlikely(!n)) printk(KERN_ERR "mal%d: reset timeout\n", mal->index); } int mal_get_regs_len(struct mal_instance *mal) { return sizeof(struct emac_ethtool_regs_subhdr) + sizeof(struct mal_regs); } void *mal_dump_regs(struct mal_instance *mal, void *buf) { struct emac_ethtool_regs_subhdr *hdr = buf; struct mal_regs *regs = (struct mal_regs *)(hdr + 1); int i; hdr->version = mal->version; hdr->index = mal->index; regs->tx_count = mal->num_tx_chans; regs->rx_count = mal->num_rx_chans; regs->cfg = get_mal_dcrn(mal, MAL_CFG); regs->esr = get_mal_dcrn(mal, MAL_ESR); regs->ier = get_mal_dcrn(mal, MAL_IER); regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR); regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR); regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR); regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR); regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR); regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR); regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR); regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR); for (i = 0; i < regs->tx_count; ++i) regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i)); for (i = 0; i < regs->rx_count; ++i) { regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i)); regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i)); } return regs + 1; } static int mal_probe(struct platform_device *ofdev) { struct mal_instance *mal; int err = 0, i, bd_size; int index = mal_count++; unsigned int dcr_base; const u32 *prop; u32 cfg; unsigned long irqflags; irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde; mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL); if (!mal) return -ENOMEM; mal->index = index; mal->ofdev = ofdev; mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1; MAL_DBG(mal, "probe" NL); prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL); if (prop == NULL) { printk(KERN_ERR "mal%d: can't find MAL num-tx-chans property!\n", index); err = -ENODEV; goto fail; } mal->num_tx_chans = prop[0]; prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL); if (prop == NULL) { printk(KERN_ERR "mal%d: can't find MAL num-rx-chans property!\n", index); err = -ENODEV; goto fail; } mal->num_rx_chans = prop[0]; dcr_base = dcr_resource_start(ofdev->dev.of_node, 0); if (dcr_base == 0) { printk(KERN_ERR "mal%d: can't find DCR resource!\n", index); err = -ENODEV; goto fail; } mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100); if (!DCR_MAP_OK(mal->dcr_host)) { printk(KERN_ERR "mal%d: failed to map DCRs !\n", index); err = -ENODEV; goto fail; } if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) { #if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \ defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR) mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | MAL_FTR_COMMON_ERR_INT); #else printk(KERN_ERR "%s: Support for 405EZ not enabled!\n", ofdev->dev.of_node->full_name); err = -ENODEV; goto fail; #endif } mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1); mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2); if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { mal->txde_irq = mal->rxde_irq = mal->serr_irq; } else { mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3); mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4); } if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ || mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ || mal->rxde_irq == NO_IRQ) { printk(KERN_ERR "mal%d: failed to map interrupts !\n", index); err = -ENODEV; goto fail_unmap; } INIT_LIST_HEAD(&mal->poll_list); INIT_LIST_HEAD(&mal->list); spin_lock_init(&mal->lock); init_dummy_netdev(&mal->dummy_dev); netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll, CONFIG_IBM_EMAC_POLL_WEIGHT); /* Load power-on reset defaults */ mal_reset(mal); /* Set the MAL configuration register */ cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT; cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA; /* Current Axon is not happy with priority being non-0, it can * deadlock, fix it up here */ if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon")) cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10); /* Apply configuration */ set_mal_dcrn(mal, MAL_CFG, cfg); /* Allocate space for BD rings */ BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32); BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32); bd_size = sizeof(struct mal_descriptor) * (NUM_TX_BUFF * mal->num_tx_chans + NUM_RX_BUFF * mal->num_rx_chans); mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, GFP_KERNEL | __GFP_ZERO); if (mal->bd_virt == NULL) { err = -ENOMEM; goto fail_unmap; } for (i = 0; i < mal->num_tx_chans; ++i) set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + sizeof(struct mal_descriptor) * mal_tx_bd_offset(mal, i)); for (i = 0; i < mal->num_rx_chans; ++i) set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma + sizeof(struct mal_descriptor) * mal_rx_bd_offset(mal, i)); if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) { irqflags = IRQF_SHARED; hdlr_serr = hdlr_txde = hdlr_rxde = mal_int; } else { irqflags = 0; hdlr_serr = mal_serr; hdlr_txde = mal_txde; hdlr_rxde = mal_rxde; } err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal); if (err) goto fail2; err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal); if (err) goto fail3; err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); if (err) goto fail4; err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal); if (err) goto fail5; err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); if (err) goto fail6; /* Enable all MAL SERR interrupt sources */ if (mal->version == 2) set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS); else set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS); /* Enable EOB interrupt */ mal_enable_eob_irq(mal); printk(KERN_INFO "MAL v%d %s, %d TX channels, %d RX channels\n", mal->version, ofdev->dev.of_node->full_name, mal->num_tx_chans, mal->num_rx_chans); /* Advertise this instance to the rest of the world */ wmb(); dev_set_drvdata(&ofdev->dev, mal); mal_dbg_register(mal); return 0; fail6: free_irq(mal->rxde_irq, mal); fail5: free_irq(mal->txeob_irq, mal); fail4: free_irq(mal->txde_irq, mal); fail3: free_irq(mal->serr_irq, mal); fail2: dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma); fail_unmap: dcr_unmap(mal->dcr_host, 0x100); fail: kfree(mal); return err; } static int mal_remove(struct platform_device *ofdev) { struct mal_instance *mal = dev_get_drvdata(&ofdev->dev); MAL_DBG(mal, "remove" NL); /* Synchronize with scheduled polling */ napi_disable(&mal->napi); if (!list_empty(&mal->list)) /* This is *very* bad */ WARN(1, KERN_EMERG "mal%d: commac list is not empty on remove!\n", mal->index); dev_set_drvdata(&ofdev->dev, NULL); free_irq(mal->serr_irq, mal); free_irq(mal->txde_irq, mal); free_irq(mal->txeob_irq, mal); free_irq(mal->rxde_irq, mal); free_irq(mal->rxeob_irq, mal); mal_reset(mal); mal_dbg_unregister(mal); dma_free_coherent(&ofdev->dev, sizeof(struct mal_descriptor) * (NUM_TX_BUFF * mal->num_tx_chans + NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt, mal->bd_dma); kfree(mal); return 0; } static struct of_device_id mal_platform_match[] = { { .compatible = "ibm,mcmal", }, { .compatible = "ibm,mcmal2", }, /* Backward compat */ { .type = "mcmal-dma", .compatible = "ibm,mcmal", }, { .type = "mcmal-dma", .compatible = "ibm,mcmal2", }, {}, }; static struct platform_driver mal_of_driver = { .driver = { .name = "mcmal", .owner = THIS_MODULE, .of_match_table = mal_platform_match, }, .probe = mal_probe, .remove = mal_remove, }; int __init mal_init(void) { return platform_driver_register(&mal_of_driver); } void mal_exit(void) { platform_driver_unregister(&mal_of_driver); }
gpl-2.0
NamelessRom/android_kernel_nvidia_shieldtablet
drivers/mmc/host/mxcmmc.c
2086
31506
/* * linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver * * This is a driver for the SDHC controller found in Freescale MX2/MX3 * SoCs. It is basically the same hardware as found on MX1 (imxmmc.c). * Unlike the hardware found on MX1, this hardware just works and does * not need all the quirks found in imxmmc.c, hence the separate driver. * * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com> * * derived from pxamci.c by Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/blkdev.h> #include <linux/dma-mapping.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/dmaengine.h> #include <linux/types.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_dma.h> #include <linux/of_gpio.h> #include <linux/mmc/slot-gpio.h> #include <asm/dma.h> #include <asm/irq.h> #include <linux/platform_data/mmc-mxcmmc.h> #include <linux/platform_data/dma-imx.h> #define DRIVER_NAME "mxc-mmc" #define MXCMCI_TIMEOUT_MS 10000 #define MMC_REG_STR_STP_CLK 0x00 #define MMC_REG_STATUS 0x04 #define MMC_REG_CLK_RATE 0x08 #define MMC_REG_CMD_DAT_CONT 0x0C #define MMC_REG_RES_TO 0x10 #define MMC_REG_READ_TO 0x14 #define MMC_REG_BLK_LEN 0x18 #define MMC_REG_NOB 0x1C #define MMC_REG_REV_NO 0x20 #define MMC_REG_INT_CNTR 0x24 #define MMC_REG_CMD 0x28 #define MMC_REG_ARG 0x2C #define MMC_REG_RES_FIFO 0x34 #define MMC_REG_BUFFER_ACCESS 0x38 #define STR_STP_CLK_RESET (1 << 3) #define STR_STP_CLK_START_CLK (1 << 1) #define STR_STP_CLK_STOP_CLK (1 << 0) #define STATUS_CARD_INSERTION (1 << 31) #define STATUS_CARD_REMOVAL (1 << 30) #define STATUS_YBUF_EMPTY (1 << 29) #define STATUS_XBUF_EMPTY (1 << 28) #define STATUS_YBUF_FULL (1 << 27) #define STATUS_XBUF_FULL (1 << 26) #define STATUS_BUF_UND_RUN (1 << 25) #define STATUS_BUF_OVFL (1 << 24) #define STATUS_SDIO_INT_ACTIVE (1 << 14) #define STATUS_END_CMD_RESP (1 << 13) #define STATUS_WRITE_OP_DONE (1 << 12) #define STATUS_DATA_TRANS_DONE (1 << 11) #define STATUS_READ_OP_DONE (1 << 11) #define STATUS_WR_CRC_ERROR_CODE_MASK (3 << 10) #define STATUS_CARD_BUS_CLK_RUN (1 << 8) #define STATUS_BUF_READ_RDY (1 << 7) #define STATUS_BUF_WRITE_RDY (1 << 6) #define STATUS_RESP_CRC_ERR (1 << 5) #define STATUS_CRC_READ_ERR (1 << 3) #define STATUS_CRC_WRITE_ERR (1 << 2) #define STATUS_TIME_OUT_RESP (1 << 1) #define STATUS_TIME_OUT_READ (1 << 0) #define STATUS_ERR_MASK 0x2f #define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1 << 12) #define CMD_DAT_CONT_STOP_READWAIT (1 << 11) #define CMD_DAT_CONT_START_READWAIT (1 << 10) #define CMD_DAT_CONT_BUS_WIDTH_4 (2 << 8) #define CMD_DAT_CONT_INIT (1 << 7) #define CMD_DAT_CONT_WRITE (1 << 4) #define CMD_DAT_CONT_DATA_ENABLE (1 << 3) #define CMD_DAT_CONT_RESPONSE_48BIT_CRC (1 << 0) #define CMD_DAT_CONT_RESPONSE_136BIT (2 << 0) #define CMD_DAT_CONT_RESPONSE_48BIT (3 << 0) #define INT_SDIO_INT_WKP_EN (1 << 18) #define INT_CARD_INSERTION_WKP_EN (1 << 17) #define INT_CARD_REMOVAL_WKP_EN (1 << 16) #define INT_CARD_INSERTION_EN (1 << 15) #define INT_CARD_REMOVAL_EN (1 << 14) #define INT_SDIO_IRQ_EN (1 << 13) #define INT_DAT0_EN (1 << 12) #define INT_BUF_READ_EN (1 << 4) #define INT_BUF_WRITE_EN (1 << 3) #define INT_END_CMD_RES_EN (1 << 2) #define INT_WRITE_OP_DONE_EN (1 << 1) #define INT_READ_OP_EN (1 << 0) enum mxcmci_type { IMX21_MMC, IMX31_MMC, MPC512X_MMC, }; struct mxcmci_host { struct mmc_host *mmc; struct resource *res; void __iomem *base; int irq; int detect_irq; struct dma_chan *dma; struct dma_async_tx_descriptor *desc; int do_dma; int default_irq_mask; int use_sdio; unsigned int power_mode; struct imxmmc_platform_data *pdata; struct mmc_request *req; struct mmc_command *cmd; struct mmc_data *data; unsigned int datasize; unsigned int dma_dir; u16 rev_no; unsigned int cmdat; struct clk *clk_ipg; struct clk *clk_per; int clock; struct work_struct datawork; spinlock_t lock; struct regulator *vcc; int burstlen; int dmareq; struct dma_slave_config dma_slave_config; struct imx_dma_data dma_data; struct timer_list watchdog; enum mxcmci_type devtype; }; static const struct platform_device_id mxcmci_devtype[] = { { .name = "imx21-mmc", .driver_data = IMX21_MMC, }, { .name = "imx31-mmc", .driver_data = IMX31_MMC, }, { .name = "mpc512x-sdhc", .driver_data = MPC512X_MMC, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, mxcmci_devtype); static const struct of_device_id mxcmci_of_match[] = { { .compatible = "fsl,imx21-mmc", .data = &mxcmci_devtype[IMX21_MMC], }, { .compatible = "fsl,imx31-mmc", .data = &mxcmci_devtype[IMX31_MMC], }, { .compatible = "fsl,mpc5121-sdhc", .data = &mxcmci_devtype[MPC512X_MMC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mxcmci_of_match); static inline int is_imx31_mmc(struct mxcmci_host *host) { return host->devtype == IMX31_MMC; } static inline int is_mpc512x_mmc(struct mxcmci_host *host) { return host->devtype == MPC512X_MMC; } static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg) { if (IS_ENABLED(CONFIG_PPC_MPC512x)) return ioread32be(host->base + reg); else return readl(host->base + reg); } static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg) { if (IS_ENABLED(CONFIG_PPC_MPC512x)) iowrite32be(val, host->base + reg); else writel(val, host->base + reg); } static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg) { if (IS_ENABLED(CONFIG_PPC_MPC512x)) return ioread32be(host->base + reg); else return readw(host->base + reg); } static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg) { if (IS_ENABLED(CONFIG_PPC_MPC512x)) iowrite32be(val, host->base + reg); else writew(val, host->base + reg); } static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); static inline void mxcmci_init_ocr(struct mxcmci_host *host) { host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc"); if (IS_ERR(host->vcc)) { host->vcc = NULL; } else { host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc); if (host->pdata && host->pdata->ocr_avail) dev_warn(mmc_dev(host->mmc), "pdata->ocr_avail will not be used\n"); } if (host->vcc == NULL) { /* fall-back to platform data */ if (host->pdata && host->pdata->ocr_avail) host->mmc->ocr_avail = host->pdata->ocr_avail; else host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; } } static inline void mxcmci_set_power(struct mxcmci_host *host, unsigned char power_mode, unsigned int vdd) { if (host->vcc) { if (power_mode == MMC_POWER_UP) mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); else if (power_mode == MMC_POWER_OFF) mmc_regulator_set_ocr(host->mmc, host->vcc, 0); } if (host->pdata && host->pdata->setpower) host->pdata->setpower(mmc_dev(host->mmc), vdd); } static inline int mxcmci_use_dma(struct mxcmci_host *host) { return host->do_dma; } static void mxcmci_softreset(struct mxcmci_host *host) { int i; dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n"); /* reset sequence */ mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK); mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK); for (i = 0; i < 8; i++) mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK); mxcmci_writew(host, 0xff, MMC_REG_RES_TO); } static int mxcmci_setup_dma(struct mmc_host *mmc); #if IS_ENABLED(CONFIG_PPC_MPC512x) static inline void buffer_swap32(u32 *buf, int len) { int i; for (i = 0; i < ((len + 3) / 4); i++) { st_le32(buf, *buf); buf++; } } static void mxcmci_swap_buffers(struct mmc_data *data) { struct scatterlist *sg; int i; for_each_sg(data->sg, sg, data->sg_len, i) buffer_swap32(sg_virt(sg), sg->length); } #else static inline void mxcmci_swap_buffers(struct mmc_data *data) {} #endif static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) { unsigned int nob = data->blocks; unsigned int blksz = data->blksz; unsigned int datasize = nob * blksz; struct scatterlist *sg; enum dma_transfer_direction slave_dirn; int i, nents; if (data->flags & MMC_DATA_STREAM) nob = 0xffff; host->data = data; data->bytes_xfered = 0; mxcmci_writew(host, nob, MMC_REG_NOB); mxcmci_writew(host, blksz, MMC_REG_BLK_LEN); host->datasize = datasize; if (!mxcmci_use_dma(host)) return 0; for_each_sg(data->sg, sg, data->sg_len, i) { if (sg->offset & 3 || sg->length & 3 || sg->length < 512) { host->do_dma = 0; return 0; } } if (data->flags & MMC_DATA_READ) { host->dma_dir = DMA_FROM_DEVICE; slave_dirn = DMA_DEV_TO_MEM; } else { host->dma_dir = DMA_TO_DEVICE; slave_dirn = DMA_MEM_TO_DEV; mxcmci_swap_buffers(data); } nents = dma_map_sg(host->dma->device->dev, data->sg, data->sg_len, host->dma_dir); if (nents != data->sg_len) return -EINVAL; host->desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len, slave_dirn, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!host->desc) { dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, host->dma_dir); host->do_dma = 0; return 0; /* Fall back to PIO */ } wmb(); dmaengine_submit(host->desc); dma_async_issue_pending(host->dma); mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS)); return 0; } static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat); static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat); static void mxcmci_dma_callback(void *data) { struct mxcmci_host *host = data; u32 stat; del_timer(&host->watchdog); stat = mxcmci_readl(host, MMC_REG_STATUS); mxcmci_writel(host, stat & ~STATUS_DATA_TRANS_DONE, MMC_REG_STATUS); dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); if (stat & STATUS_READ_OP_DONE) mxcmci_writel(host, STATUS_READ_OP_DONE, MMC_REG_STATUS); mxcmci_data_done(host, stat); } static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, unsigned int cmdat) { u32 int_cntr = host->default_irq_mask; unsigned long flags; WARN_ON(host->cmd != NULL); host->cmd = cmd; switch (mmc_resp_type(cmd)) { case MMC_RSP_R1: /* short CRC, OPCODE */ case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */ cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC; break; case MMC_RSP_R2: /* long 136 bit + CRC */ cmdat |= CMD_DAT_CONT_RESPONSE_136BIT; break; case MMC_RSP_R3: /* short */ cmdat |= CMD_DAT_CONT_RESPONSE_48BIT; break; case MMC_RSP_NONE: break; default: dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n", mmc_resp_type(cmd)); cmd->error = -EINVAL; return -EINVAL; } int_cntr = INT_END_CMD_RES_EN; if (mxcmci_use_dma(host)) { if (host->dma_dir == DMA_FROM_DEVICE) { host->desc->callback = mxcmci_dma_callback; host->desc->callback_param = host; } else { int_cntr |= INT_WRITE_OP_DONE_EN; } } spin_lock_irqsave(&host->lock, flags); if (host->use_sdio) int_cntr |= INT_SDIO_IRQ_EN; mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR); spin_unlock_irqrestore(&host->lock, flags); mxcmci_writew(host, cmd->opcode, MMC_REG_CMD); mxcmci_writel(host, cmd->arg, MMC_REG_ARG); mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT); return 0; } static void mxcmci_finish_request(struct mxcmci_host *host, struct mmc_request *req) { u32 int_cntr = host->default_irq_mask; unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (host->use_sdio) int_cntr |= INT_SDIO_IRQ_EN; mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR); spin_unlock_irqrestore(&host->lock, flags); host->req = NULL; host->cmd = NULL; host->data = NULL; mmc_request_done(host->mmc, req); } static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat) { struct mmc_data *data = host->data; int data_error; if (mxcmci_use_dma(host)) { dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, host->dma_dir); mxcmci_swap_buffers(data); } if (stat & STATUS_ERR_MASK) { dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat); if (stat & STATUS_CRC_READ_ERR) { dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__); data->error = -EILSEQ; } else if (stat & STATUS_CRC_WRITE_ERR) { u32 err_code = (stat >> 9) & 0x3; if (err_code == 2) { /* No CRC response */ dev_err(mmc_dev(host->mmc), "%s: No CRC -ETIMEDOUT\n", __func__); data->error = -ETIMEDOUT; } else { dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__); data->error = -EILSEQ; } } else if (stat & STATUS_TIME_OUT_READ) { dev_err(mmc_dev(host->mmc), "%s: read -ETIMEDOUT\n", __func__); data->error = -ETIMEDOUT; } else { dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__); data->error = -EIO; } } else { data->bytes_xfered = host->datasize; } data_error = data->error; host->data = NULL; return data_error; } static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat) { struct mmc_command *cmd = host->cmd; int i; u32 a, b, c; if (!cmd) return; if (stat & STATUS_TIME_OUT_RESP) { dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n"); cmd->error = -ETIMEDOUT; } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) { dev_dbg(mmc_dev(host->mmc), "cmd crc error\n"); cmd->error = -EILSEQ; } if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { for (i = 0; i < 4; i++) { a = mxcmci_readw(host, MMC_REG_RES_FIFO); b = mxcmci_readw(host, MMC_REG_RES_FIFO); cmd->resp[i] = a << 16 | b; } } else { a = mxcmci_readw(host, MMC_REG_RES_FIFO); b = mxcmci_readw(host, MMC_REG_RES_FIFO); c = mxcmci_readw(host, MMC_REG_RES_FIFO); cmd->resp[0] = a << 24 | b << 8 | c >> 8; } } } static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask) { u32 stat; unsigned long timeout = jiffies + HZ; do { stat = mxcmci_readl(host, MMC_REG_STATUS); if (stat & STATUS_ERR_MASK) return stat; if (time_after(jiffies, timeout)) { mxcmci_softreset(host); mxcmci_set_clk_rate(host, host->clock); return STATUS_TIME_OUT_READ; } if (stat & mask) return 0; cpu_relax(); } while (1); } static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes) { unsigned int stat; u32 *buf = _buf; while (bytes > 3) { stat = mxcmci_poll_status(host, STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE); if (stat) return stat; *buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS)); bytes -= 4; } if (bytes) { u8 *b = (u8 *)buf; u32 tmp; stat = mxcmci_poll_status(host, STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE); if (stat) return stat; tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS)); memcpy(b, &tmp, bytes); } return 0; } static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes) { unsigned int stat; u32 *buf = _buf; while (bytes > 3) { stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); if (stat) return stat; mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS); bytes -= 4; } if (bytes) { u8 *b = (u8 *)buf; u32 tmp; stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); if (stat) return stat; memcpy(&tmp, b, bytes); mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS); } stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); if (stat) return stat; return 0; } static int mxcmci_transfer_data(struct mxcmci_host *host) { struct mmc_data *data = host->req->data; struct scatterlist *sg; int stat, i; host->data = data; host->datasize = 0; if (data->flags & MMC_DATA_READ) { for_each_sg(data->sg, sg, data->sg_len, i) { stat = mxcmci_pull(host, sg_virt(sg), sg->length); if (stat) return stat; host->datasize += sg->length; } } else { for_each_sg(data->sg, sg, data->sg_len, i) { stat = mxcmci_push(host, sg_virt(sg), sg->length); if (stat) return stat; host->datasize += sg->length; } stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE); if (stat) return stat; } return 0; } static void mxcmci_datawork(struct work_struct *work) { struct mxcmci_host *host = container_of(work, struct mxcmci_host, datawork); int datastat = mxcmci_transfer_data(host); mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, MMC_REG_STATUS); mxcmci_finish_data(host, datastat); if (host->req->stop) { if (mxcmci_start_cmd(host, host->req->stop, 0)) { mxcmci_finish_request(host, host->req); return; } } else { mxcmci_finish_request(host, host->req); } } static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat) { struct mmc_request *req; int data_error; unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (!host->data) { spin_unlock_irqrestore(&host->lock, flags); return; } if (!host->req) { spin_unlock_irqrestore(&host->lock, flags); return; } req = host->req; if (!req->stop) host->req = NULL; /* we will handle finish req below */ data_error = mxcmci_finish_data(host, stat); spin_unlock_irqrestore(&host->lock, flags); mxcmci_read_response(host, stat); host->cmd = NULL; if (req->stop) { if (mxcmci_start_cmd(host, req->stop, 0)) { mxcmci_finish_request(host, req); return; } } else { mxcmci_finish_request(host, req); } } static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat) { mxcmci_read_response(host, stat); host->cmd = NULL; if (!host->data && host->req) { mxcmci_finish_request(host, host->req); return; } /* For the DMA case the DMA engine handles the data transfer * automatically. For non DMA we have to do it ourselves. * Don't do it in interrupt context though. */ if (!mxcmci_use_dma(host) && host->data) schedule_work(&host->datawork); } static irqreturn_t mxcmci_irq(int irq, void *devid) { struct mxcmci_host *host = devid; unsigned long flags; bool sdio_irq; u32 stat; stat = mxcmci_readl(host, MMC_REG_STATUS); mxcmci_writel(host, stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE), MMC_REG_STATUS); dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); spin_lock_irqsave(&host->lock, flags); sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio; spin_unlock_irqrestore(&host->lock, flags); if (mxcmci_use_dma(host) && (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE))) mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, MMC_REG_STATUS); if (sdio_irq) { mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS); mmc_signal_sdio_irq(host->mmc); } if (stat & STATUS_END_CMD_RESP) mxcmci_cmd_done(host, stat); if (mxcmci_use_dma(host) && (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) { del_timer(&host->watchdog); mxcmci_data_done(host, stat); } if (host->default_irq_mask && (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL))) mmc_detect_change(host->mmc, msecs_to_jiffies(200)); return IRQ_HANDLED; } static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req) { struct mxcmci_host *host = mmc_priv(mmc); unsigned int cmdat = host->cmdat; int error; WARN_ON(host->req != NULL); host->req = req; host->cmdat &= ~CMD_DAT_CONT_INIT; if (host->dma) host->do_dma = 1; if (req->data) { error = mxcmci_setup_data(host, req->data); if (error) { req->cmd->error = error; goto out; } cmdat |= CMD_DAT_CONT_DATA_ENABLE; if (req->data->flags & MMC_DATA_WRITE) cmdat |= CMD_DAT_CONT_WRITE; } error = mxcmci_start_cmd(host, req->cmd, cmdat); out: if (error) mxcmci_finish_request(host, req); } static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios) { unsigned int divider; int prescaler = 0; unsigned int clk_in = clk_get_rate(host->clk_per); while (prescaler <= 0x800) { for (divider = 1; divider <= 0xF; divider++) { int x; x = (clk_in / (divider + 1)); if (prescaler) x /= (prescaler * 2); if (x <= clk_ios) break; } if (divider < 0x10) break; if (prescaler == 0) prescaler = 1; else prescaler <<= 1; } mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE); dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n", prescaler, divider, clk_in, clk_ios); } static int mxcmci_setup_dma(struct mmc_host *mmc) { struct mxcmci_host *host = mmc_priv(mmc); struct dma_slave_config *config = &host->dma_slave_config; config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS; config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS; config->dst_addr_width = 4; config->src_addr_width = 4; config->dst_maxburst = host->burstlen; config->src_maxburst = host->burstlen; config->device_fc = false; return dmaengine_slave_config(host->dma, config); } static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mxcmci_host *host = mmc_priv(mmc); int burstlen, ret; /* * use burstlen of 64 (16 words) in 4 bit mode (--> reg value 0) * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16) */ if (ios->bus_width == MMC_BUS_WIDTH_4) burstlen = 16; else burstlen = 4; if (mxcmci_use_dma(host) && burstlen != host->burstlen) { host->burstlen = burstlen; ret = mxcmci_setup_dma(mmc); if (ret) { dev_err(mmc_dev(host->mmc), "failed to config DMA channel. Falling back to PIO\n"); dma_release_channel(host->dma); host->do_dma = 0; host->dma = NULL; } } if (ios->bus_width == MMC_BUS_WIDTH_4) host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4; else host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4; if (host->power_mode != ios->power_mode) { mxcmci_set_power(host, ios->power_mode, ios->vdd); host->power_mode = ios->power_mode; if (ios->power_mode == MMC_POWER_ON) host->cmdat |= CMD_DAT_CONT_INIT; } if (ios->clock) { mxcmci_set_clk_rate(host, ios->clock); mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK); } else { mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK); } host->clock = ios->clock; } static irqreturn_t mxcmci_detect_irq(int irq, void *data) { struct mmc_host *mmc = data; dev_dbg(mmc_dev(mmc), "%s\n", __func__); mmc_detect_change(mmc, msecs_to_jiffies(250)); return IRQ_HANDLED; } static int mxcmci_get_ro(struct mmc_host *mmc) { struct mxcmci_host *host = mmc_priv(mmc); if (host->pdata && host->pdata->get_ro) return !!host->pdata->get_ro(mmc_dev(mmc)); /* * If board doesn't support read only detection (no mmc_gpio * context or gpio is invalid), then let the mmc core decide * what to do. */ return mmc_gpio_get_ro(mmc); } static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct mxcmci_host *host = mmc_priv(mmc); unsigned long flags; u32 int_cntr; spin_lock_irqsave(&host->lock, flags); host->use_sdio = enable; int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR); if (enable) int_cntr |= INT_SDIO_IRQ_EN; else int_cntr &= ~INT_SDIO_IRQ_EN; mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR); spin_unlock_irqrestore(&host->lock, flags); } static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card) { struct mxcmci_host *mxcmci = mmc_priv(host); /* * MX3 SoCs have a silicon bug which corrupts CRC calculation of * multi-block transfers when connected SDIO peripheral doesn't * drive the BUSY line as required by the specs. * One way to prevent this is to only allow 1-bit transfers. */ if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO) host->caps &= ~MMC_CAP_4_BIT_DATA; else host->caps |= MMC_CAP_4_BIT_DATA; } static bool filter(struct dma_chan *chan, void *param) { struct mxcmci_host *host = param; if (!imx_dma_is_general_purpose(chan)) return false; chan->private = &host->dma_data; return true; } static void mxcmci_watchdog(unsigned long data) { struct mmc_host *mmc = (struct mmc_host *)data; struct mxcmci_host *host = mmc_priv(mmc); struct mmc_request *req = host->req; unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS); if (host->dma_dir == DMA_FROM_DEVICE) { dmaengine_terminate_all(host->dma); dev_err(mmc_dev(host->mmc), "%s: read time out (status = 0x%08x)\n", __func__, stat); } else { dev_err(mmc_dev(host->mmc), "%s: write time out (status = 0x%08x)\n", __func__, stat); mxcmci_softreset(host); } /* Mark transfer as erroneus and inform the upper layers */ if (host->data) host->data->error = -ETIMEDOUT; host->req = NULL; host->cmd = NULL; host->data = NULL; mmc_request_done(host->mmc, req); } static const struct mmc_host_ops mxcmci_ops = { .request = mxcmci_request, .set_ios = mxcmci_set_ios, .get_ro = mxcmci_get_ro, .enable_sdio_irq = mxcmci_enable_sdio_irq, .init_card = mxcmci_init_card, }; static int mxcmci_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct mxcmci_host *host = NULL; struct resource *iores, *r; int ret = 0, irq; bool dat3_card_detect = false; dma_cap_mask_t mask; const struct of_device_id *of_id; struct imxmmc_platform_data *pdata = pdev->dev.platform_data; pr_info("i.MX/MPC512x SDHC driver\n"); of_id = of_match_device(mxcmci_of_match, &pdev->dev); iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!iores || irq < 0) return -EINVAL; r = request_mem_region(iores->start, resource_size(iores), pdev->name); if (!r) return -EBUSY; mmc = mmc_alloc_host(sizeof(struct mxcmci_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto out_release_mem; } mmc_of_parse(mmc); mmc->ops = &mxcmci_ops; /* For devicetree parsing, the bus width is read from devicetree */ if (pdata) mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; else mmc->caps |= MMC_CAP_SDIO_IRQ; /* MMC core transfer sizes tunable parameters */ mmc->max_blk_size = 2048; mmc->max_blk_count = 65535; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; host = mmc_priv(mmc); host->base = ioremap(r->start, resource_size(r)); if (!host->base) { ret = -ENOMEM; goto out_free; } if (of_id) { const struct platform_device_id *id_entry = of_id->data; host->devtype = id_entry->driver_data; } else { host->devtype = pdev->id_entry->driver_data; } /* adjust max_segs after devtype detection */ if (!is_mpc512x_mmc(host)) mmc->max_segs = 64; host->mmc = mmc; host->pdata = pdata; spin_lock_init(&host->lock); if (pdata) dat3_card_detect = pdata->dat3_card_detect; else if (!(mmc->caps & MMC_CAP_NONREMOVABLE) && !of_property_read_bool(pdev->dev.of_node, "cd-gpios")) dat3_card_detect = true; mxcmci_init_ocr(host); if (dat3_card_detect) host->default_irq_mask = INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN; else host->default_irq_mask = 0; host->res = r; host->irq = irq; host->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(host->clk_ipg)) { ret = PTR_ERR(host->clk_ipg); goto out_iounmap; } host->clk_per = devm_clk_get(&pdev->dev, "per"); if (IS_ERR(host->clk_per)) { ret = PTR_ERR(host->clk_per); goto out_iounmap; } clk_prepare_enable(host->clk_per); clk_prepare_enable(host->clk_ipg); mxcmci_softreset(host); host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO); if (host->rev_no != 0x400) { ret = -ENODEV; dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n", host->rev_no); goto out_clk_put; } mmc->f_min = clk_get_rate(host->clk_per) >> 16; mmc->f_max = clk_get_rate(host->clk_per) >> 1; /* recommended in data sheet */ mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO); mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR); if (!host->pdata) { host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx"); } else { r = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (r) { host->dmareq = r->start; host->dma_data.peripheral_type = IMX_DMATYPE_SDHC; host->dma_data.priority = DMA_PRIO_LOW; host->dma_data.dma_request = host->dmareq; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); host->dma = dma_request_channel(mask, filter, host); } } if (host->dma) mmc->max_seg_size = dma_get_max_seg_size( host->dma->device->dev); else dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n"); INIT_WORK(&host->datawork, mxcmci_datawork); ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host); if (ret) goto out_free_dma; platform_set_drvdata(pdev, mmc); if (host->pdata && host->pdata->init) { ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq, host->mmc); if (ret) goto out_free_irq; } init_timer(&host->watchdog); host->watchdog.function = &mxcmci_watchdog; host->watchdog.data = (unsigned long)mmc; mmc_add_host(mmc); return 0; out_free_irq: free_irq(host->irq, host); out_free_dma: if (host->dma) dma_release_channel(host->dma); out_clk_put: clk_disable_unprepare(host->clk_per); clk_disable_unprepare(host->clk_ipg); out_iounmap: iounmap(host->base); out_free: mmc_free_host(mmc); out_release_mem: release_mem_region(iores->start, resource_size(iores)); return ret; } static int mxcmci_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct mxcmci_host *host = mmc_priv(mmc); platform_set_drvdata(pdev, NULL); mmc_remove_host(mmc); if (host->vcc) regulator_put(host->vcc); if (host->pdata && host->pdata->exit) host->pdata->exit(&pdev->dev, mmc); free_irq(host->irq, host); iounmap(host->base); if (host->dma) dma_release_channel(host->dma); clk_disable_unprepare(host->clk_per); clk_disable_unprepare(host->clk_ipg); release_mem_region(host->res->start, resource_size(host->res)); mmc_free_host(mmc); return 0; } #ifdef CONFIG_PM static int mxcmci_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxcmci_host *host = mmc_priv(mmc); int ret = 0; if (mmc) ret = mmc_suspend_host(mmc); clk_disable_unprepare(host->clk_per); clk_disable_unprepare(host->clk_ipg); return ret; } static int mxcmci_resume(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxcmci_host *host = mmc_priv(mmc); int ret = 0; clk_prepare_enable(host->clk_per); clk_prepare_enable(host->clk_ipg); if (mmc) ret = mmc_resume_host(mmc); return ret; } static const struct dev_pm_ops mxcmci_pm_ops = { .suspend = mxcmci_suspend, .resume = mxcmci_resume, }; #endif static struct platform_driver mxcmci_driver = { .probe = mxcmci_probe, .remove = mxcmci_remove, .id_table = mxcmci_devtype, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &mxcmci_pm_ops, #endif .of_match_table = mxcmci_of_match, } }; module_platform_driver(mxcmci_driver); MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver"); MODULE_AUTHOR("Sascha Hauer, Pengutronix"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:mxc-mmc");
gpl-2.0
BorqsIndia/polaris-kernel
sound/soc/blackfin/bf5xx-tdm.c
2086
8001
/* * File: sound/soc/blackfin/bf5xx-tdm.c * Author: Barry Song <Barry.Song@analog.com> * * Created: Thurs June 04 2009 * Description: Blackfin I2S(TDM) CPU DAI driver * Even though TDM mode can be as part of I2S DAI, but there * are so much difference in configuration and data flow, * it's very ugly to integrate I2S and TDM into a module * * Modified: * Copyright 2009 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include <asm/irq.h> #include <asm/portmux.h> #include <linux/mutex.h> #include <linux/gpio.h> #include "bf5xx-sport.h" #include "bf5xx-tdm.h" static int bf5xx_tdm_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { int ret = 0; /* interface format:support TDM,slave mode */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: break; default: printk(KERN_ERR "%s: Unknown DAI format type\n", __func__); ret = -EINVAL; break; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: break; case SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_CBM_CFS: case SND_SOC_DAIFMT_CBS_CFM: ret = -EINVAL; break; default: printk(KERN_ERR "%s: Unknown DAI master type\n", __func__); ret = -EINVAL; break; } return ret; } static int bf5xx_tdm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai); struct bf5xx_tdm_port *bf5xx_tdm = sport_handle->private_data; int ret = 0; bf5xx_tdm->tcr2 &= ~0x1f; bf5xx_tdm->rcr2 &= ~0x1f; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S32_LE: bf5xx_tdm->tcr2 |= 31; bf5xx_tdm->rcr2 |= 31; sport_handle->wdsize = 4; break; /* at present, we only support 32bit transfer */ default: pr_err("not supported PCM format yet\n"); return -EINVAL; break; } if (!bf5xx_tdm->configured) { /* * TX and RX are not independent,they are enabled at the * same time, even if only one side is running. So, we * need to configure both of them at the time when the first * stream is opened. * * CPU DAI:slave mode. */ ret = sport_config_rx(sport_handle, bf5xx_tdm->rcr1, bf5xx_tdm->rcr2, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); return -EBUSY; } ret = sport_config_tx(sport_handle, bf5xx_tdm->tcr1, bf5xx_tdm->tcr2, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); return -EBUSY; } bf5xx_tdm->configured = 1; } return 0; } static void bf5xx_tdm_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai); struct bf5xx_tdm_port *bf5xx_tdm = sport_handle->private_data; /* No active stream, SPORT is allowed to be configured again. */ if (!dai->active) bf5xx_tdm->configured = 0; } static int bf5xx_tdm_set_channel_map(struct snd_soc_dai *dai, unsigned int tx_num, unsigned int *tx_slot, unsigned int rx_num, unsigned int *rx_slot) { struct sport_device *sport_handle = snd_soc_dai_get_drvdata(dai); struct bf5xx_tdm_port *bf5xx_tdm = sport_handle->private_data; int i; unsigned int slot; unsigned int tx_mapped = 0, rx_mapped = 0; if ((tx_num > BFIN_TDM_DAI_MAX_SLOTS) || (rx_num > BFIN_TDM_DAI_MAX_SLOTS)) return -EINVAL; for (i = 0; i < tx_num; i++) { slot = tx_slot[i]; if ((slot < BFIN_TDM_DAI_MAX_SLOTS) && (!(tx_mapped & (1 << slot)))) { bf5xx_tdm->tx_map[i] = slot; tx_mapped |= 1 << slot; } else return -EINVAL; } for (i = 0; i < rx_num; i++) { slot = rx_slot[i]; if ((slot < BFIN_TDM_DAI_MAX_SLOTS) && (!(rx_mapped & (1 << slot)))) { bf5xx_tdm->rx_map[i] = slot; rx_mapped |= 1 << slot; } else return -EINVAL; } return 0; } #ifdef CONFIG_PM static int bf5xx_tdm_suspend(struct snd_soc_dai *dai) { struct sport_device *sport = snd_soc_dai_get_drvdata(dai); if (dai->playback_active) sport_tx_stop(sport); if (dai->capture_active) sport_rx_stop(sport); /* isolate sync/clock pins from codec while sports resume */ peripheral_free_list(sport->pin_req); return 0; } static int bf5xx_tdm_resume(struct snd_soc_dai *dai) { int ret; struct sport_device *sport = snd_soc_dai_get_drvdata(dai); ret = sport_set_multichannel(sport, 8, 0xFF, 1); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; } ret = sport_config_rx(sport, 0, 0x1F, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; } ret = sport_config_tx(sport, 0, 0x1F, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; } peripheral_request_list(sport->pin_req, "soc-audio"); return 0; } #else #define bf5xx_tdm_suspend NULL #define bf5xx_tdm_resume NULL #endif static const struct snd_soc_dai_ops bf5xx_tdm_dai_ops = { .hw_params = bf5xx_tdm_hw_params, .set_fmt = bf5xx_tdm_set_dai_fmt, .shutdown = bf5xx_tdm_shutdown, .set_channel_map = bf5xx_tdm_set_channel_map, }; static struct snd_soc_dai_driver bf5xx_tdm_dai = { .suspend = bf5xx_tdm_suspend, .resume = bf5xx_tdm_resume, .playback = { .channels_min = 2, .channels_max = 8, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S32_LE,}, .capture = { .channels_min = 2, .channels_max = 8, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S32_LE,}, .ops = &bf5xx_tdm_dai_ops, }; static const struct snd_soc_component_driver bf5xx_tdm_component = { .name = "bf5xx-tdm", }; static int bfin_tdm_probe(struct platform_device *pdev) { struct sport_device *sport_handle; int ret; /* configure SPORT for TDM */ sport_handle = sport_init(pdev, 4, 8 * sizeof(u32), sizeof(struct bf5xx_tdm_port)); if (!sport_handle) return -ENODEV; /* SPORT works in TDM mode */ ret = sport_set_multichannel(sport_handle, 8, 0xFF, 1); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; goto sport_config_err; } ret = sport_config_rx(sport_handle, 0, 0x1F, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; goto sport_config_err; } ret = sport_config_tx(sport_handle, 0, 0x1F, 0, 0); if (ret) { pr_err("SPORT is busy!\n"); ret = -EBUSY; goto sport_config_err; } ret = snd_soc_register_component(&pdev->dev, &bf5xx_tdm_component, &bf5xx_tdm_dai, 1); if (ret) { pr_err("Failed to register DAI: %d\n", ret); goto sport_config_err; } return 0; sport_config_err: sport_done(sport_handle); return ret; } static int bfin_tdm_remove(struct platform_device *pdev) { struct sport_device *sport_handle = platform_get_drvdata(pdev); snd_soc_unregister_component(&pdev->dev); sport_done(sport_handle); return 0; } static struct platform_driver bfin_tdm_driver = { .probe = bfin_tdm_probe, .remove = bfin_tdm_remove, .driver = { .name = "bfin-tdm", .owner = THIS_MODULE, }, }; module_platform_driver(bfin_tdm_driver); /* Module information */ MODULE_AUTHOR("Barry Song"); MODULE_DESCRIPTION("TDM driver for ADI Blackfin"); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/android_kernel_hardkernel_odroidc1
drivers/char/hw_random/timeriomem-rng.c
2086
5858
/* * drivers/char/hw_random/timeriomem-rng.c * * Copyright (C) 2009 Alexander Clouter <alex@digriz.org.uk> * * Derived from drivers/char/hw_random/omap-rng.c * Copyright 2005 (c) MontaVista Software, Inc. * Author: Deepak Saxena <dsaxena@plexity.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Overview: * This driver is useful for platforms that have an IO range that provides * periodic random data from a single IO memory address. All the platform * has to do is provide the address and 'wait time' that new data becomes * available. * * TODO: add support for reading sizes other than 32bits and masking */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/timeriomem-rng.h> #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/completion.h> struct timeriomem_rng_private_data { void __iomem *io_base; unsigned int expires; unsigned int period; unsigned int present:1; struct timer_list timer; struct completion completion; struct hwrng timeriomem_rng_ops; }; #define to_rng_priv(rng) \ ((struct timeriomem_rng_private_data *)rng->priv) /* * have data return 1, however return 0 if we have nothing */ static int timeriomem_rng_data_present(struct hwrng *rng, int wait) { struct timeriomem_rng_private_data *priv = to_rng_priv(rng); if (!wait || priv->present) return priv->present; wait_for_completion(&priv->completion); return 1; } static int timeriomem_rng_data_read(struct hwrng *rng, u32 *data) { struct timeriomem_rng_private_data *priv = to_rng_priv(rng); unsigned long cur; s32 delay; *data = readl(priv->io_base); cur = jiffies; delay = cur - priv->expires; delay = priv->period - (delay % priv->period); priv->expires = cur + delay; priv->present = 0; INIT_COMPLETION(priv->completion); mod_timer(&priv->timer, priv->expires); return 4; } static void timeriomem_rng_trigger(unsigned long data) { struct timeriomem_rng_private_data *priv = (struct timeriomem_rng_private_data *)data; priv->present = 1; complete(&priv->completion); } static int timeriomem_rng_probe(struct platform_device *pdev) { struct timeriomem_rng_data *pdata = pdev->dev.platform_data; struct timeriomem_rng_private_data *priv; struct resource *res; int err = 0; int period; if (!pdev->dev.of_node && !pdata) { dev_err(&pdev->dev, "timeriomem_rng_data is missing\n"); return -EINVAL; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; if (res->start % 4 != 0 || resource_size(res) != 4) { dev_err(&pdev->dev, "address must be four bytes wide and aligned\n"); return -EINVAL; } /* Allocate memory for the device structure (and zero it) */ priv = kzalloc(sizeof(struct timeriomem_rng_private_data), GFP_KERNEL); if (!priv) { dev_err(&pdev->dev, "failed to allocate device structure.\n"); return -ENOMEM; } platform_set_drvdata(pdev, priv); if (pdev->dev.of_node) { int i; if (!of_property_read_u32(pdev->dev.of_node, "period", &i)) period = i; else { dev_err(&pdev->dev, "missing period\n"); err = -EINVAL; goto out_free; } } else period = pdata->period; priv->period = usecs_to_jiffies(period); if (priv->period < 1) { dev_err(&pdev->dev, "period is less than one jiffy\n"); err = -EINVAL; goto out_free; } priv->expires = jiffies; priv->present = 1; init_completion(&priv->completion); complete(&priv->completion); setup_timer(&priv->timer, timeriomem_rng_trigger, (unsigned long)priv); priv->timeriomem_rng_ops.name = dev_name(&pdev->dev); priv->timeriomem_rng_ops.data_present = timeriomem_rng_data_present; priv->timeriomem_rng_ops.data_read = timeriomem_rng_data_read; priv->timeriomem_rng_ops.priv = (unsigned long)priv; if (!request_mem_region(res->start, resource_size(res), dev_name(&pdev->dev))) { dev_err(&pdev->dev, "request_mem_region failed\n"); err = -EBUSY; goto out_timer; } priv->io_base = ioremap(res->start, resource_size(res)); if (priv->io_base == NULL) { dev_err(&pdev->dev, "ioremap failed\n"); err = -EIO; goto out_release_io; } err = hwrng_register(&priv->timeriomem_rng_ops); if (err) { dev_err(&pdev->dev, "problem registering\n"); goto out; } dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n", priv->io_base, period); return 0; out: iounmap(priv->io_base); out_release_io: release_mem_region(res->start, resource_size(res)); out_timer: del_timer_sync(&priv->timer); out_free: platform_set_drvdata(pdev, NULL); kfree(priv); return err; } static int timeriomem_rng_remove(struct platform_device *pdev) { struct timeriomem_rng_private_data *priv = platform_get_drvdata(pdev); struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hwrng_unregister(&priv->timeriomem_rng_ops); del_timer_sync(&priv->timer); iounmap(priv->io_base); release_mem_region(res->start, resource_size(res)); platform_set_drvdata(pdev, NULL); kfree(priv); return 0; } static const struct of_device_id timeriomem_rng_match[] = { { .compatible = "timeriomem_rng" }, {}, }; MODULE_DEVICE_TABLE(of, timeriomem_rng_match); static struct platform_driver timeriomem_rng_driver = { .driver = { .name = "timeriomem_rng", .owner = THIS_MODULE, .of_match_table = timeriomem_rng_match, }, .probe = timeriomem_rng_probe, .remove = timeriomem_rng_remove, }; module_platform_driver(timeriomem_rng_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); MODULE_DESCRIPTION("Timer IOMEM H/W RNG driver");
gpl-2.0
imasaru/kernel_zte_cymbalT
drivers/staging/comedi/drivers/usbduxfast.c
2086
42990
/* * Copyright (C) 2004 Bernd Porr, Bernd.Porr@f2s.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * I must give credit here to Chris Baugher who * wrote the driver for AT-MIO-16d. I used some parts of this * driver. I also must give credits to David Brownell * who supported me with the USB development. * * Bernd Porr * * * Revision history: * 0.9: Dropping the first data packet which seems to be from the last transfer. * Buffer overflows in the FX2 are handed over to comedi. * 0.92: Dropping now 4 packets. The quad buffer has to be emptied. * Added insn command basically for testing. Sample rate is * 1MHz/16ch=62.5kHz * 0.99: Ian Abbott pointed out a bug which has been corrected. Thanks! * 0.99a: added external trigger. * 1.00: added firmware kernel request to the driver which fixed * udev coldplug problem */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/fcntl.h> #include <linux/compiler.h> #include "comedi_fc.h" #include "../comedidev.h" /* * timeout for the USB-transfer */ #define EZTIMEOUT 30 /* * constants for "firmware" upload and download */ #define FIRMWARE "usbduxfast_firmware.bin" #define USBDUXFASTSUB_FIRMWARE 0xA0 #define VENDOR_DIR_IN 0xC0 #define VENDOR_DIR_OUT 0x40 /* * internal addresses of the 8051 processor */ #define USBDUXFASTSUB_CPUCS 0xE600 /* * max lenghth of the transfer-buffer for software upload */ #define TB_LEN 0x2000 /* * input endpoint number */ #define BULKINEP 6 /* * endpoint for the A/D channellist: bulk OUT */ #define CHANNELLISTEP 4 /* * number of channels */ #define NUMCHANNELS 32 /* * size of the waveform descriptor */ #define WAVESIZE 0x20 /* * size of one A/D value */ #define SIZEADIN (sizeof(int16_t)) /* * size of the input-buffer IN BYTES */ #define SIZEINBUF 512 /* * 16 bytes */ #define SIZEINSNBUF 512 /* * size of the buffer for the dux commands in bytes */ #define SIZEOFDUXBUFFER 256 /* * number of in-URBs which receive the data: min=5 */ #define NUMOFINBUFFERSHIGH 10 /* * total number of usbduxfast devices */ #define NUMUSBDUXFAST 16 /* * analogue in subdevice */ #define SUBDEV_AD 0 /* * min delay steps for more than one channel * basically when the mux gives up ;-) * * steps at 30MHz in the FX2 */ #define MIN_SAMPLING_PERIOD 9 /* * max number of 1/30MHz delay steps */ #define MAX_SAMPLING_PERIOD 500 /* * number of received packets to ignore before we start handing data * over to comedi, it's quad buffering and we have to ignore 4 packets */ #define PACKETS_TO_IGNORE 4 /* * comedi constants */ static const struct comedi_lrange range_usbduxfast_ai_range = { 2, {BIP_RANGE(0.75), BIP_RANGE(0.5)} }; /* * private structure of one subdevice * * this is the structure which holds all the data of this driver * one sub device just now: A/D */ struct usbduxfastsub_s { int attached; /* is attached? */ int probed; /* is it associated with a subdevice? */ struct usb_device *usbdev; /* pointer to the usb-device */ struct urb *urbIn; /* BULK-transfer handling: urb */ int8_t *transfer_buffer; int16_t *insnBuffer; /* input buffer for single insn */ int ifnum; /* interface number */ struct usb_interface *interface; /* interface structure */ /* comedi device for the interrupt context */ struct comedi_device *comedidev; short int ai_cmd_running; /* asynchronous command is running */ short int ai_continous; /* continous acquisition */ long int ai_sample_count; /* number of samples to acquire */ uint8_t *dux_commands; /* commands */ int ignore; /* counter which ignores the first buffers */ struct semaphore sem; }; /* * The pointer to the private usb-data of the driver * is also the private data for the comedi-device. * This has to be global as the usb subsystem needs * global variables. The other reason is that this * structure must be there _before_ any comedi * command is issued. The usb subsystem must be * initialised before comedi can access it. */ static struct usbduxfastsub_s usbduxfastsub[NUMUSBDUXFAST]; static DEFINE_SEMAPHORE(start_stop_sem); /* * bulk transfers to usbduxfast */ #define SENDADCOMMANDS 0 #define SENDINITEP6 1 static int send_dux_commands(struct usbduxfastsub_s *udfs, int cmd_type) { int tmp, nsent; udfs->dux_commands[0] = cmd_type; #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi%d: usbduxfast: dux_commands: ", udfs->comedidev->minor); for (tmp = 0; tmp < SIZEOFDUXBUFFER; tmp++) printk(" %02x", udfs->dux_commands[tmp]); printk("\n"); #endif tmp = usb_bulk_msg(udfs->usbdev, usb_sndbulkpipe(udfs->usbdev, CHANNELLISTEP), udfs->dux_commands, SIZEOFDUXBUFFER, &nsent, 10000); if (tmp < 0) dev_err(&udfs->interface->dev, "could not transmit dux_commands to the usb-device, err=%d\n", tmp); return tmp; } /* * Stops the data acquision. * It should be safe to call this function from any context. */ static int usbduxfastsub_unlink_InURBs(struct usbduxfastsub_s *udfs) { int j = 0; int err = 0; if (udfs && udfs->urbIn) { udfs->ai_cmd_running = 0; /* waits until a running transfer is over */ usb_kill_urb(udfs->urbIn); j = 0; } #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi: usbduxfast: unlinked InURB: res=%d\n", j); #endif return err; } /* * This will stop a running acquisition operation. * Is called from within this driver from both the * interrupt context and from comedi. */ static int usbduxfast_ai_stop(struct usbduxfastsub_s *udfs, int do_unlink) { int ret = 0; if (!udfs) { pr_err("%s: udfs=NULL!\n", __func__); return -EFAULT; } #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi: usbduxfast_ai_stop\n"); #endif udfs->ai_cmd_running = 0; if (do_unlink) /* stop aquistion */ ret = usbduxfastsub_unlink_InURBs(udfs); return ret; } /* * This will cancel a running acquisition operation. * This is called by comedi but never from inside the driver. */ static int usbduxfast_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct usbduxfastsub_s *udfs; int ret; /* force unlink of all urbs */ #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi: usbduxfast_ai_cancel\n"); #endif udfs = dev->private; if (!udfs) { dev_err(dev->class_dev, "%s: udfs=NULL\n", __func__); return -EFAULT; } down(&udfs->sem); if (!udfs->probed) { up(&udfs->sem); return -ENODEV; } /* unlink */ ret = usbduxfast_ai_stop(udfs, 1); up(&udfs->sem); return ret; } /* * analogue IN * interrupt service routine */ static void usbduxfastsub_ai_Irq(struct urb *urb) { int n, err; struct usbduxfastsub_s *udfs; struct comedi_device *this_comedidev; struct comedi_subdevice *s; /* sanity checks - is the urb there? */ if (!urb) { pr_err("ao int-handler called with urb=NULL!\n"); return; } /* the context variable points to the subdevice */ this_comedidev = urb->context; if (!this_comedidev) { pr_err("urb context is a NULL pointer!\n"); return; } /* the private structure of the subdevice is usbduxfastsub_s */ udfs = this_comedidev->private; if (!udfs) { pr_err("private of comedi subdev is a NULL pointer!\n"); return; } /* are we running a command? */ if (unlikely(!udfs->ai_cmd_running)) { /* * not running a command * do not continue execution if no asynchronous command * is running in particular not resubmit */ return; } if (unlikely(!udfs->attached)) { /* no comedi device there */ return; } /* subdevice which is the AD converter */ s = &this_comedidev->subdevices[SUBDEV_AD]; /* first we test if something unusual has just happened */ switch (urb->status) { case 0: break; /* * happens after an unlink command or when the device * is plugged out */ case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -ECONNABORTED: /* tell this comedi */ s->async->events |= COMEDI_CB_EOA; s->async->events |= COMEDI_CB_ERROR; comedi_event(udfs->comedidev, s); /* stop the transfer w/o unlink */ usbduxfast_ai_stop(udfs, 0); return; default: pr_err("non-zero urb status received in ai intr context: %d\n", urb->status); s->async->events |= COMEDI_CB_EOA; s->async->events |= COMEDI_CB_ERROR; comedi_event(udfs->comedidev, s); usbduxfast_ai_stop(udfs, 0); return; } if (!udfs->ignore) { if (!udfs->ai_continous) { /* not continuous, fixed number of samples */ n = urb->actual_length / sizeof(uint16_t); if (unlikely(udfs->ai_sample_count < n)) { /* * we have send only a fraction of the bytes * received */ cfc_write_array_to_buffer(s, urb->transfer_buffer, udfs->ai_sample_count * sizeof(uint16_t)); usbduxfast_ai_stop(udfs, 0); /* tell comedi that the acquistion is over */ s->async->events |= COMEDI_CB_EOA; comedi_event(udfs->comedidev, s); return; } udfs->ai_sample_count -= n; } /* write the full buffer to comedi */ err = cfc_write_array_to_buffer(s, urb->transfer_buffer, urb->actual_length); if (unlikely(err == 0)) { /* buffer overflow */ usbduxfast_ai_stop(udfs, 0); return; } /* tell comedi that data is there */ comedi_event(udfs->comedidev, s); } else { /* ignore this packet */ udfs->ignore--; } /* * command is still running * resubmit urb for BULK transfer */ urb->dev = udfs->usbdev; urb->status = 0; err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { dev_err(&urb->dev->dev, "urb resubm failed: %d", err); s->async->events |= COMEDI_CB_EOA; s->async->events |= COMEDI_CB_ERROR; comedi_event(udfs->comedidev, s); usbduxfast_ai_stop(udfs, 0); } } static int usbduxfastsub_start(struct usbduxfastsub_s *udfs) { int ret; unsigned char *local_transfer_buffer; local_transfer_buffer = kmalloc(1, GFP_KERNEL); if (!local_transfer_buffer) return -ENOMEM; /* 7f92 to zero */ *local_transfer_buffer = 0; /* bRequest, "Firmware" */ ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0), USBDUXFASTSUB_FIRMWARE, VENDOR_DIR_OUT, /* bmRequestType */ USBDUXFASTSUB_CPUCS, /* Value */ 0x0000, /* Index */ /* address of the transfer buffer */ local_transfer_buffer, 1, /* Length */ EZTIMEOUT); /* Timeout */ if (ret < 0) dev_err(&udfs->interface->dev, "control msg failed (start)\n"); kfree(local_transfer_buffer); return ret; } static int usbduxfastsub_stop(struct usbduxfastsub_s *udfs) { int ret; unsigned char *local_transfer_buffer; local_transfer_buffer = kmalloc(1, GFP_KERNEL); if (!local_transfer_buffer) return -ENOMEM; /* 7f92 to one */ *local_transfer_buffer = 1; /* bRequest, "Firmware" */ ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0), USBDUXFASTSUB_FIRMWARE, VENDOR_DIR_OUT, /* bmRequestType */ USBDUXFASTSUB_CPUCS, /* Value */ 0x0000, /* Index */ local_transfer_buffer, 1, /* Length */ EZTIMEOUT); /* Timeout */ if (ret < 0) dev_err(&udfs->interface->dev, "control msg failed (stop)\n"); kfree(local_transfer_buffer); return ret; } static int usbduxfastsub_upload(struct usbduxfastsub_s *udfs, unsigned char *local_transfer_buffer, unsigned int startAddr, unsigned int len) { int ret; #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi: usbduxfast: uploading %d bytes", len); printk(KERN_DEBUG " to addr %d, first byte=%d.\n", startAddr, local_transfer_buffer[0]); #endif /* brequest, firmware */ ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0), USBDUXFASTSUB_FIRMWARE, VENDOR_DIR_OUT, /* bmRequestType */ startAddr, /* value */ 0x0000, /* index */ /* our local safe buffer */ local_transfer_buffer, len, /* length */ EZTIMEOUT); /* timeout */ #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi_: usbduxfast: result=%d\n", ret); #endif if (ret < 0) { dev_err(&udfs->interface->dev, "uppload failed\n"); return ret; } return 0; } static int usbduxfastsub_submit_InURBs(struct usbduxfastsub_s *udfs) { int ret; if (!udfs) return -EFAULT; usb_fill_bulk_urb(udfs->urbIn, udfs->usbdev, usb_rcvbulkpipe(udfs->usbdev, BULKINEP), udfs->transfer_buffer, SIZEINBUF, usbduxfastsub_ai_Irq, udfs->comedidev); #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi%d: usbduxfast: submitting in-urb: " "0x%p,0x%p\n", udfs->comedidev->minor, udfs->urbIn->context, udfs->urbIn->dev); #endif ret = usb_submit_urb(udfs->urbIn, GFP_ATOMIC); if (ret) { dev_err(&udfs->interface->dev, "ai: usb_submit_urb error %d\n", ret); return ret; } return 0; } static int usbduxfast_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { struct usbduxfastsub_s *udfs = dev->private; int err = 0; long int steps, tmp; int minSamplPer; if (!udfs->probed) return -ENODEV; /* Step 1 : check if triggers are trivially valid */ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT | TRIG_INT); err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER | TRIG_FOLLOW | TRIG_EXT); err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER | TRIG_EXT); err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= cfc_check_trigger_is_unique(cmd->start_src); err |= cfc_check_trigger_is_unique(cmd->scan_begin_src); err |= cfc_check_trigger_is_unique(cmd->convert_src); err |= cfc_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ /* can't have external stop and start triggers at once */ if (cmd->start_src == TRIG_EXT && cmd->stop_src == TRIG_EXT) err |= -EINVAL; if (err) return 2; /* Step 3: check if arguments are trivially valid */ if (cmd->start_src == TRIG_NOW) err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0); if (!cmd->chanlist_len) err |= -EINVAL; err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->chanlist_len == 1) minSamplPer = 1; else minSamplPer = MIN_SAMPLING_PERIOD; if (cmd->convert_src == TRIG_TIMER) { steps = cmd->convert_arg * 30; if (steps < (minSamplPer * 1000)) steps = minSamplPer * 1000; if (steps > (MAX_SAMPLING_PERIOD * 1000)) steps = MAX_SAMPLING_PERIOD * 1000; /* calc arg again */ tmp = steps / 30; err |= cfc_check_trigger_arg_is(&cmd->convert_arg, tmp); } if (cmd->scan_begin_src == TRIG_TIMER) err |= -EINVAL; /* stop source */ switch (cmd->stop_src) { case TRIG_COUNT: err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1); break; case TRIG_NONE: err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0); break; /* * TRIG_EXT doesn't care since it doesn't trigger * off a numbered channel */ default: break; } if (err) return 3; /* step 4: fix up any arguments */ return 0; } static int usbduxfast_ai_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum) { int ret; struct usbduxfastsub_s *udfs = dev->private; if (!udfs) return -EFAULT; down(&udfs->sem); if (!udfs->probed) { up(&udfs->sem); return -ENODEV; } #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi%d: usbduxfast_ai_inttrig\n", dev->minor); #endif if (trignum != 0) { dev_err(dev->class_dev, "%s: invalid trignum\n", __func__); up(&udfs->sem); return -EINVAL; } if (!udfs->ai_cmd_running) { udfs->ai_cmd_running = 1; ret = usbduxfastsub_submit_InURBs(udfs); if (ret < 0) { dev_err(dev->class_dev, "%s: urbSubmit: err=%d\n", __func__, ret); udfs->ai_cmd_running = 0; up(&udfs->sem); return ret; } s->async->inttrig = NULL; } else { dev_err(dev->class_dev, "ai_inttrig but acqu is already running\n"); } up(&udfs->sem); return 1; } /* * offsets for the GPIF bytes * the first byte is the command byte */ #define LENBASE (1+0x00) #define OPBASE (1+0x08) #define OUTBASE (1+0x10) #define LOGBASE (1+0x18) static int usbduxfast_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; unsigned int chan, gain, rngmask = 0xff; int i, j, ret; struct usbduxfastsub_s *udfs; int result; long steps, steps_tmp; #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi%d: usbduxfast_ai_cmd\n", dev->minor); #endif udfs = dev->private; if (!udfs) return -EFAULT; down(&udfs->sem); if (!udfs->probed) { up(&udfs->sem); return -ENODEV; } if (udfs->ai_cmd_running) { dev_err(dev->class_dev, "ai_cmd not possible. Another ai_cmd is running.\n"); up(&udfs->sem); return -EBUSY; } /* set current channel of the running acquisition to zero */ s->async->cur_chan = 0; /* * ignore the first buffers from the device if there * is an error condition */ udfs->ignore = PACKETS_TO_IGNORE; if (cmd->chanlist_len > 0) { gain = CR_RANGE(cmd->chanlist[0]); for (i = 0; i < cmd->chanlist_len; ++i) { chan = CR_CHAN(cmd->chanlist[i]); if (chan != i) { dev_err(dev->class_dev, "cmd is accepting only consecutive channels.\n"); up(&udfs->sem); return -EINVAL; } if ((gain != CR_RANGE(cmd->chanlist[i])) && (cmd->chanlist_len > 3)) { dev_err(dev->class_dev, "the gain must be the same for all channels.\n"); up(&udfs->sem); return -EINVAL; } if (i >= NUMCHANNELS) { dev_err(dev->class_dev, "channel list too long\n"); break; } } } steps = 0; if (cmd->scan_begin_src == TRIG_TIMER) { dev_err(dev->class_dev, "scan_begin_src==TRIG_TIMER not valid.\n"); up(&udfs->sem); return -EINVAL; } if (cmd->convert_src == TRIG_TIMER) steps = (cmd->convert_arg * 30) / 1000; if ((steps < MIN_SAMPLING_PERIOD) && (cmd->chanlist_len != 1)) { dev_err(dev->class_dev, "ai_cmd: steps=%ld, scan_begin_arg=%d. Not properly tested by cmdtest?\n", steps, cmd->scan_begin_arg); up(&udfs->sem); return -EINVAL; } if (steps > MAX_SAMPLING_PERIOD) { dev_err(dev->class_dev, "ai_cmd: sampling rate too low.\n"); up(&udfs->sem); return -EINVAL; } if ((cmd->start_src == TRIG_EXT) && (cmd->chanlist_len != 1) && (cmd->chanlist_len != 16)) { dev_err(dev->class_dev, "ai_cmd: TRIG_EXT only with 1 or 16 channels possible.\n"); up(&udfs->sem); return -EINVAL; } #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi%d: usbduxfast: steps=%ld, convert_arg=%u\n", dev->minor, steps, cmd->convert_arg); #endif switch (cmd->chanlist_len) { case 1: /* * one channel */ if (CR_RANGE(cmd->chanlist[0]) > 0) rngmask = 0xff - 0x04; else rngmask = 0xff; /* * for external trigger: looping in this state until * the RDY0 pin becomes zero */ /* we loop here until ready has been set */ if (cmd->start_src == TRIG_EXT) { /* branch back to state 0 */ udfs->dux_commands[LENBASE + 0] = 0x01; /* deceision state w/o data */ udfs->dux_commands[OPBASE + 0] = 0x01; udfs->dux_commands[OUTBASE + 0] = 0xFF & rngmask; /* RDY0 = 0 */ udfs->dux_commands[LOGBASE + 0] = 0x00; } else { /* we just proceed to state 1 */ udfs->dux_commands[LENBASE + 0] = 1; udfs->dux_commands[OPBASE + 0] = 0; udfs->dux_commands[OUTBASE + 0] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 0] = 0; } if (steps < MIN_SAMPLING_PERIOD) { /* for fast single channel aqu without mux */ if (steps <= 1) { /* * we just stay here at state 1 and rexecute * the same state this gives us 30MHz sampling * rate */ /* branch back to state 1 */ udfs->dux_commands[LENBASE + 1] = 0x89; /* deceision state with data */ udfs->dux_commands[OPBASE + 1] = 0x03; udfs->dux_commands[OUTBASE + 1] = 0xFF & rngmask; /* doesn't matter */ udfs->dux_commands[LOGBASE + 1] = 0xFF; } else { /* * we loop through two states: data and delay * max rate is 15MHz */ udfs->dux_commands[LENBASE + 1] = steps - 1; /* data */ udfs->dux_commands[OPBASE + 1] = 0x02; udfs->dux_commands[OUTBASE + 1] = 0xFF & rngmask; /* doesn't matter */ udfs->dux_commands[LOGBASE + 1] = 0; /* branch back to state 1 */ udfs->dux_commands[LENBASE + 2] = 0x09; /* deceision state w/o data */ udfs->dux_commands[OPBASE + 2] = 0x01; udfs->dux_commands[OUTBASE + 2] = 0xFF & rngmask; /* doesn't matter */ udfs->dux_commands[LOGBASE + 2] = 0xFF; } } else { /* * we loop through 3 states: 2x delay and 1x data * this gives a min sampling rate of 60kHz */ /* we have 1 state with duration 1 */ steps = steps - 1; /* do the first part of the delay */ udfs->dux_commands[LENBASE + 1] = steps / 2; udfs->dux_commands[OPBASE + 1] = 0; udfs->dux_commands[OUTBASE + 1] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 1] = 0; /* and the second part */ udfs->dux_commands[LENBASE + 2] = steps - steps / 2; udfs->dux_commands[OPBASE + 2] = 0; udfs->dux_commands[OUTBASE + 2] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 2] = 0; /* get the data and branch back */ /* branch back to state 1 */ udfs->dux_commands[LENBASE + 3] = 0x09; /* deceision state w data */ udfs->dux_commands[OPBASE + 3] = 0x03; udfs->dux_commands[OUTBASE + 3] = 0xFF & rngmask; /* doesn't matter */ udfs->dux_commands[LOGBASE + 3] = 0xFF; } break; case 2: /* * two channels * commit data to the FIFO */ if (CR_RANGE(cmd->chanlist[0]) > 0) rngmask = 0xff - 0x04; else rngmask = 0xff; udfs->dux_commands[LENBASE + 0] = 1; /* data */ udfs->dux_commands[OPBASE + 0] = 0x02; udfs->dux_commands[OUTBASE + 0] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 0] = 0; /* we have 1 state with duration 1: state 0 */ steps_tmp = steps - 1; if (CR_RANGE(cmd->chanlist[1]) > 0) rngmask = 0xff - 0x04; else rngmask = 0xff; /* do the first part of the delay */ udfs->dux_commands[LENBASE + 1] = steps_tmp / 2; udfs->dux_commands[OPBASE + 1] = 0; /* count */ udfs->dux_commands[OUTBASE + 1] = 0xFE & rngmask; udfs->dux_commands[LOGBASE + 1] = 0; /* and the second part */ udfs->dux_commands[LENBASE + 2] = steps_tmp - steps_tmp / 2; udfs->dux_commands[OPBASE + 2] = 0; udfs->dux_commands[OUTBASE + 2] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 2] = 0; udfs->dux_commands[LENBASE + 3] = 1; /* data */ udfs->dux_commands[OPBASE + 3] = 0x02; udfs->dux_commands[OUTBASE + 3] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 3] = 0; /* * we have 2 states with duration 1: step 6 and * the IDLE state */ steps_tmp = steps - 2; if (CR_RANGE(cmd->chanlist[0]) > 0) rngmask = 0xff - 0x04; else rngmask = 0xff; /* do the first part of the delay */ udfs->dux_commands[LENBASE + 4] = steps_tmp / 2; udfs->dux_commands[OPBASE + 4] = 0; /* reset */ udfs->dux_commands[OUTBASE + 4] = (0xFF - 0x02) & rngmask; udfs->dux_commands[LOGBASE + 4] = 0; /* and the second part */ udfs->dux_commands[LENBASE + 5] = steps_tmp - steps_tmp / 2; udfs->dux_commands[OPBASE + 5] = 0; udfs->dux_commands[OUTBASE + 5] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 5] = 0; udfs->dux_commands[LENBASE + 6] = 1; udfs->dux_commands[OPBASE + 6] = 0; udfs->dux_commands[OUTBASE + 6] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 6] = 0; break; case 3: /* * three channels */ for (j = 0; j < 1; j++) { if (CR_RANGE(cmd->chanlist[j]) > 0) rngmask = 0xff - 0x04; else rngmask = 0xff; /* * commit data to the FIFO and do the first part * of the delay */ udfs->dux_commands[LENBASE + j * 2] = steps / 2; /* data */ udfs->dux_commands[OPBASE + j * 2] = 0x02; /* no change */ udfs->dux_commands[OUTBASE + j * 2] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + j * 2] = 0; if (CR_RANGE(cmd->chanlist[j + 1]) > 0) rngmask = 0xff - 0x04; else rngmask = 0xff; /* do the second part of the delay */ udfs->dux_commands[LENBASE + j * 2 + 1] = steps - steps / 2; /* no data */ udfs->dux_commands[OPBASE + j * 2 + 1] = 0; /* count */ udfs->dux_commands[OUTBASE + j * 2 + 1] = 0xFE & rngmask; udfs->dux_commands[LOGBASE + j * 2 + 1] = 0; } /* 2 steps with duration 1: the idele step and step 6: */ steps_tmp = steps - 2; /* commit data to the FIFO and do the first part of the delay */ udfs->dux_commands[LENBASE + 4] = steps_tmp / 2; /* data */ udfs->dux_commands[OPBASE + 4] = 0x02; udfs->dux_commands[OUTBASE + 4] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 4] = 0; if (CR_RANGE(cmd->chanlist[0]) > 0) rngmask = 0xff - 0x04; else rngmask = 0xff; /* do the second part of the delay */ udfs->dux_commands[LENBASE + 5] = steps_tmp - steps_tmp / 2; /* no data */ udfs->dux_commands[OPBASE + 5] = 0; /* reset */ udfs->dux_commands[OUTBASE + 5] = (0xFF - 0x02) & rngmask; udfs->dux_commands[LOGBASE + 5] = 0; udfs->dux_commands[LENBASE + 6] = 1; udfs->dux_commands[OPBASE + 6] = 0; udfs->dux_commands[OUTBASE + 6] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 6] = 0; case 16: if (CR_RANGE(cmd->chanlist[0]) > 0) rngmask = 0xff - 0x04; else rngmask = 0xff; if (cmd->start_src == TRIG_EXT) { /* * we loop here until ready has been set */ /* branch back to state 0 */ udfs->dux_commands[LENBASE + 0] = 0x01; /* deceision state w/o data */ udfs->dux_commands[OPBASE + 0] = 0x01; /* reset */ udfs->dux_commands[OUTBASE + 0] = (0xFF - 0x02) & rngmask; /* RDY0 = 0 */ udfs->dux_commands[LOGBASE + 0] = 0x00; } else { /* * we just proceed to state 1 */ /* 30us reset pulse */ udfs->dux_commands[LENBASE + 0] = 255; udfs->dux_commands[OPBASE + 0] = 0; /* reset */ udfs->dux_commands[OUTBASE + 0] = (0xFF - 0x02) & rngmask; udfs->dux_commands[LOGBASE + 0] = 0; } /* commit data to the FIFO */ udfs->dux_commands[LENBASE + 1] = 1; /* data */ udfs->dux_commands[OPBASE + 1] = 0x02; udfs->dux_commands[OUTBASE + 1] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 1] = 0; /* we have 2 states with duration 1 */ steps = steps - 2; /* do the first part of the delay */ udfs->dux_commands[LENBASE + 2] = steps / 2; udfs->dux_commands[OPBASE + 2] = 0; udfs->dux_commands[OUTBASE + 2] = 0xFE & rngmask; udfs->dux_commands[LOGBASE + 2] = 0; /* and the second part */ udfs->dux_commands[LENBASE + 3] = steps - steps / 2; udfs->dux_commands[OPBASE + 3] = 0; udfs->dux_commands[OUTBASE + 3] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 3] = 0; /* branch back to state 1 */ udfs->dux_commands[LENBASE + 4] = 0x09; /* deceision state w/o data */ udfs->dux_commands[OPBASE + 4] = 0x01; udfs->dux_commands[OUTBASE + 4] = 0xFF & rngmask; /* doesn't matter */ udfs->dux_commands[LOGBASE + 4] = 0xFF; break; default: dev_err(dev->class_dev, "unsupported combination of channels\n"); up(&udfs->sem); return -EFAULT; } #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi %d: sending commands to the usb device\n", dev->minor); #endif /* 0 means that the AD commands are sent */ result = send_dux_commands(udfs, SENDADCOMMANDS); if (result < 0) { dev_err(dev->class_dev, "adc command could not be submitted. Aborting...\n"); up(&udfs->sem); return result; } if (cmd->stop_src == TRIG_COUNT) { udfs->ai_sample_count = cmd->stop_arg * cmd->scan_end_arg; if (udfs->ai_sample_count < 1) { dev_err(dev->class_dev, "(cmd->stop_arg)*(cmd->scan_end_arg)<1, aborting.\n"); up(&udfs->sem); return -EFAULT; } udfs->ai_continous = 0; } else { /* continous acquisition */ udfs->ai_continous = 1; udfs->ai_sample_count = 0; } if ((cmd->start_src == TRIG_NOW) || (cmd->start_src == TRIG_EXT)) { /* enable this acquisition operation */ udfs->ai_cmd_running = 1; ret = usbduxfastsub_submit_InURBs(udfs); if (ret < 0) { udfs->ai_cmd_running = 0; /* fixme: unlink here?? */ up(&udfs->sem); return ret; } s->async->inttrig = NULL; } else { /* * TRIG_INT * don't enable the acquision operation * wait for an internal signal */ s->async->inttrig = usbduxfast_ai_inttrig; } up(&udfs->sem); return 0; } /* * Mode 0 is used to get a single conversion on demand. */ static int usbduxfast_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i, j, n, actual_length; int chan, range, rngmask; int err; struct usbduxfastsub_s *udfs; udfs = dev->private; if (!udfs) { dev_err(dev->class_dev, "%s: no usb dev.\n", __func__); return -ENODEV; } #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi%d: ai_insn_read, insn->n=%d, " "insn->subdev=%d\n", dev->minor, insn->n, insn->subdev); #endif down(&udfs->sem); if (!udfs->probed) { up(&udfs->sem); return -ENODEV; } if (udfs->ai_cmd_running) { dev_err(dev->class_dev, "ai_insn_read not possible. Async Command is running.\n"); up(&udfs->sem); return -EBUSY; } /* sample one channel */ chan = CR_CHAN(insn->chanspec); range = CR_RANGE(insn->chanspec); /* set command for the first channel */ if (range > 0) rngmask = 0xff - 0x04; else rngmask = 0xff; /* commit data to the FIFO */ udfs->dux_commands[LENBASE + 0] = 1; /* data */ udfs->dux_commands[OPBASE + 0] = 0x02; udfs->dux_commands[OUTBASE + 0] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 0] = 0; /* do the first part of the delay */ udfs->dux_commands[LENBASE + 1] = 12; udfs->dux_commands[OPBASE + 1] = 0; udfs->dux_commands[OUTBASE + 1] = 0xFE & rngmask; udfs->dux_commands[LOGBASE + 1] = 0; udfs->dux_commands[LENBASE + 2] = 1; udfs->dux_commands[OPBASE + 2] = 0; udfs->dux_commands[OUTBASE + 2] = 0xFE & rngmask; udfs->dux_commands[LOGBASE + 2] = 0; udfs->dux_commands[LENBASE + 3] = 1; udfs->dux_commands[OPBASE + 3] = 0; udfs->dux_commands[OUTBASE + 3] = 0xFE & rngmask; udfs->dux_commands[LOGBASE + 3] = 0; udfs->dux_commands[LENBASE + 4] = 1; udfs->dux_commands[OPBASE + 4] = 0; udfs->dux_commands[OUTBASE + 4] = 0xFE & rngmask; udfs->dux_commands[LOGBASE + 4] = 0; /* second part */ udfs->dux_commands[LENBASE + 5] = 12; udfs->dux_commands[OPBASE + 5] = 0; udfs->dux_commands[OUTBASE + 5] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 5] = 0; udfs->dux_commands[LENBASE + 6] = 1; udfs->dux_commands[OPBASE + 6] = 0; udfs->dux_commands[OUTBASE + 6] = 0xFF & rngmask; udfs->dux_commands[LOGBASE + 0] = 0; #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi %d: sending commands to the usb device\n", dev->minor); #endif /* 0 means that the AD commands are sent */ err = send_dux_commands(udfs, SENDADCOMMANDS); if (err < 0) { dev_err(dev->class_dev, "adc command could not be submitted. Aborting...\n"); up(&udfs->sem); return err; } #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi%d: usbduxfast: submitting in-urb: " "0x%p,0x%p\n", udfs->comedidev->minor, udfs->urbIn->context, udfs->urbIn->dev); #endif for (i = 0; i < PACKETS_TO_IGNORE; i++) { err = usb_bulk_msg(udfs->usbdev, usb_rcvbulkpipe(udfs->usbdev, BULKINEP), udfs->transfer_buffer, SIZEINBUF, &actual_length, 10000); if (err < 0) { dev_err(dev->class_dev, "insn timeout. No data.\n"); up(&udfs->sem); return err; } } /* data points */ for (i = 0; i < insn->n;) { err = usb_bulk_msg(udfs->usbdev, usb_rcvbulkpipe(udfs->usbdev, BULKINEP), udfs->transfer_buffer, SIZEINBUF, &actual_length, 10000); if (err < 0) { dev_err(dev->class_dev, "insn data error: %d\n", err); up(&udfs->sem); return err; } n = actual_length / sizeof(uint16_t); if ((n % 16) != 0) { dev_err(dev->class_dev, "insn data packet corrupted.\n"); up(&udfs->sem); return -EINVAL; } for (j = chan; (j < n) && (i < insn->n); j = j + 16) { data[i] = ((uint16_t *) (udfs->transfer_buffer))[j]; i++; } } up(&udfs->sem); return i; } #define FIRMWARE_MAX_LEN 0x2000 static int firmwareUpload(struct usbduxfastsub_s *usbduxfastsub, const u8 *firmwareBinary, int sizeFirmware) { int ret; uint8_t *fwBuf; if (!firmwareBinary) return 0; if (sizeFirmware > FIRMWARE_MAX_LEN) { dev_err(&usbduxfastsub->interface->dev, "comedi_: usbduxfast firmware binary it too large for FX2.\n"); return -ENOMEM; } /* we generate a local buffer for the firmware */ fwBuf = kmemdup(firmwareBinary, sizeFirmware, GFP_KERNEL); if (!fwBuf) { dev_err(&usbduxfastsub->interface->dev, "comedi_: mem alloc for firmware failed\n"); return -ENOMEM; } ret = usbduxfastsub_stop(usbduxfastsub); if (ret < 0) { dev_err(&usbduxfastsub->interface->dev, "comedi_: can not stop firmware\n"); kfree(fwBuf); return ret; } ret = usbduxfastsub_upload(usbduxfastsub, fwBuf, 0, sizeFirmware); if (ret < 0) { dev_err(&usbduxfastsub->interface->dev, "comedi_: firmware upload failed\n"); kfree(fwBuf); return ret; } ret = usbduxfastsub_start(usbduxfastsub); if (ret < 0) { dev_err(&usbduxfastsub->interface->dev, "comedi_: can not start firmware\n"); kfree(fwBuf); return ret; } kfree(fwBuf); return 0; } static void tidy_up(struct usbduxfastsub_s *udfs) { #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi_: usbduxfast: tiding up\n"); #endif if (!udfs) return; /* shows the usb subsystem that the driver is down */ if (udfs->interface) usb_set_intfdata(udfs->interface, NULL); udfs->probed = 0; if (udfs->urbIn) { /* waits until a running transfer is over */ usb_kill_urb(udfs->urbIn); kfree(udfs->transfer_buffer); udfs->transfer_buffer = NULL; usb_free_urb(udfs->urbIn); udfs->urbIn = NULL; } kfree(udfs->insnBuffer); udfs->insnBuffer = NULL; kfree(udfs->dux_commands); udfs->dux_commands = NULL; udfs->ai_cmd_running = 0; } static int usbduxfast_attach_common(struct comedi_device *dev, struct usbduxfastsub_s *udfs) { int ret; struct comedi_subdevice *s; down(&udfs->sem); /* pointer back to the corresponding comedi device */ udfs->comedidev = dev; ret = comedi_alloc_subdevices(dev, 1); if (ret) { up(&udfs->sem); return ret; } /* private structure is also simply the usb-structure */ dev->private = udfs; /* the first subdevice is the A/D converter */ s = &dev->subdevices[SUBDEV_AD]; /* * the URBs get the comedi subdevice which is responsible for reading * this is the subdevice which reads data */ dev->read_subdev = s; /* the subdevice receives as private structure the usb-structure */ s->private = NULL; /* analog input */ s->type = COMEDI_SUBD_AI; /* readable and ref is to ground */ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ; /* 16 channels */ s->n_chan = 16; /* length of the channellist */ s->len_chanlist = 16; /* callback functions */ s->insn_read = usbduxfast_ai_insn_read; s->do_cmdtest = usbduxfast_ai_cmdtest; s->do_cmd = usbduxfast_ai_cmd; s->cancel = usbduxfast_ai_cancel; /* max value from the A/D converter (12bit+1 bit for overflow) */ s->maxdata = 0x1000; /* range table to convert to physical units */ s->range_table = &range_usbduxfast_ai_range; /* finally decide that it's attached */ udfs->attached = 1; up(&udfs->sem); dev_info(dev->class_dev, "successfully attached to usbduxfast.\n"); return 0; } static int usbduxfast_auto_attach(struct comedi_device *dev, unsigned long context_unused) { struct usb_interface *uinterf = comedi_to_usb_interface(dev); int ret; struct usbduxfastsub_s *udfs; dev->private = NULL; down(&start_stop_sem); udfs = usb_get_intfdata(uinterf); if (!udfs || !udfs->probed) { dev_err(dev->class_dev, "usbduxfast: error: auto_attach failed, not connected\n"); ret = -ENODEV; } else if (udfs->attached) { dev_err(dev->class_dev, "usbduxfast: error: auto_attach failed, already attached\n"); ret = -ENODEV; } else ret = usbduxfast_attach_common(dev, udfs); up(&start_stop_sem); return ret; } static void usbduxfast_detach(struct comedi_device *dev) { struct usbduxfastsub_s *usb = dev->private; if (usb) { down(&usb->sem); down(&start_stop_sem); dev->private = NULL; usb->attached = 0; usb->comedidev = NULL; up(&start_stop_sem); up(&usb->sem); } } static struct comedi_driver usbduxfast_driver = { .driver_name = "usbduxfast", .module = THIS_MODULE, .auto_attach = usbduxfast_auto_attach, .detach = usbduxfast_detach, }; static void usbduxfast_firmware_request_complete_handler(const struct firmware *fw, void *context) { struct usbduxfastsub_s *usbduxfastsub_tmp = context; struct usb_interface *uinterf = usbduxfastsub_tmp->interface; int ret; if (fw == NULL) return; /* * we need to upload the firmware here because fw will be * freed once we've left this function */ ret = firmwareUpload(usbduxfastsub_tmp, fw->data, fw->size); if (ret) { dev_err(&uinterf->dev, "Could not upload firmware (err=%d)\n", ret); goto out; } comedi_usb_auto_config(uinterf, &usbduxfast_driver, 0); out: release_firmware(fw); } static int usbduxfast_usb_probe(struct usb_interface *uinterf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(uinterf); int i; int index; int ret; if (udev->speed != USB_SPEED_HIGH) { dev_err(&uinterf->dev, "This driver needs USB 2.0 to operate. Aborting...\n"); return -ENODEV; } #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi_: usbduxfast_: finding a free structure for " "the usb-device\n"); #endif down(&start_stop_sem); /* look for a free place in the usbduxfast array */ index = -1; for (i = 0; i < NUMUSBDUXFAST; i++) { if (!usbduxfastsub[i].probed) { index = i; break; } } /* no more space */ if (index == -1) { dev_err(&uinterf->dev, "Too many usbduxfast-devices connected.\n"); up(&start_stop_sem); return -EMFILE; } #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi_: usbduxfast: usbduxfastsub[%d] is ready to " "connect to comedi.\n", index); #endif sema_init(&(usbduxfastsub[index].sem), 1); /* save a pointer to the usb device */ usbduxfastsub[index].usbdev = udev; /* save the interface itself */ usbduxfastsub[index].interface = uinterf; /* get the interface number from the interface */ usbduxfastsub[index].ifnum = uinterf->altsetting->desc.bInterfaceNumber; /* * hand the private data over to the usb subsystem * will be needed for disconnect */ usb_set_intfdata(uinterf, &(usbduxfastsub[index])); #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi_: usbduxfast: ifnum=%d\n", usbduxfastsub[index].ifnum); #endif /* create space for the commands going to the usb device */ usbduxfastsub[index].dux_commands = kmalloc(SIZEOFDUXBUFFER, GFP_KERNEL); if (!usbduxfastsub[index].dux_commands) { tidy_up(&(usbduxfastsub[index])); up(&start_stop_sem); return -ENOMEM; } /* create space of the instruction buffer */ usbduxfastsub[index].insnBuffer = kmalloc(SIZEINSNBUF, GFP_KERNEL); if (!usbduxfastsub[index].insnBuffer) { tidy_up(&(usbduxfastsub[index])); up(&start_stop_sem); return -ENOMEM; } /* setting to alternate setting 1: enabling bulk ep */ i = usb_set_interface(usbduxfastsub[index].usbdev, usbduxfastsub[index].ifnum, 1); if (i < 0) { dev_err(&uinterf->dev, "usbduxfast%d: could not switch to alternate setting 1.\n", index); tidy_up(&(usbduxfastsub[index])); up(&start_stop_sem); return -ENODEV; } usbduxfastsub[index].urbIn = usb_alloc_urb(0, GFP_KERNEL); if (!usbduxfastsub[index].urbIn) { dev_err(&uinterf->dev, "usbduxfast%d: Could not alloc. urb\n", index); tidy_up(&(usbduxfastsub[index])); up(&start_stop_sem); return -ENOMEM; } usbduxfastsub[index].transfer_buffer = kmalloc(SIZEINBUF, GFP_KERNEL); if (!usbduxfastsub[index].transfer_buffer) { tidy_up(&(usbduxfastsub[index])); up(&start_stop_sem); return -ENOMEM; } /* we've reached the bottom of the function */ usbduxfastsub[index].probed = 1; up(&start_stop_sem); ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, FIRMWARE, &udev->dev, GFP_KERNEL, usbduxfastsub + index, usbduxfast_firmware_request_complete_handler); if (ret) { dev_err(&uinterf->dev, "could not load firmware (err=%d)\n", ret); return ret; } dev_info(&uinterf->dev, "usbduxfast%d has been successfully initialized.\n", index); /* success */ return 0; } static void usbduxfast_usb_disconnect(struct usb_interface *intf) { struct usbduxfastsub_s *udfs = usb_get_intfdata(intf); struct usb_device *udev = interface_to_usbdev(intf); if (!udfs) { dev_err(&intf->dev, "disconnect called with null pointer.\n"); return; } if (udfs->usbdev != udev) { dev_err(&intf->dev, "BUG! called with wrong ptr!!!\n"); return; } comedi_usb_auto_unconfig(intf); down(&start_stop_sem); down(&udfs->sem); tidy_up(udfs); up(&udfs->sem); up(&start_stop_sem); #ifdef CONFIG_COMEDI_DEBUG printk(KERN_DEBUG "comedi_: usbduxfast: disconnected from the usb\n"); #endif } static const struct usb_device_id usbduxfast_usb_table[] = { /* { USB_DEVICE(0x4b4, 0x8613) }, testing */ { USB_DEVICE(0x13d8, 0x0010) }, /* real ID */ { USB_DEVICE(0x13d8, 0x0011) }, /* real ID */ { } }; MODULE_DEVICE_TABLE(usb, usbduxfast_usb_table); static struct usb_driver usbduxfast_usb_driver = { #ifdef COMEDI_HAVE_USB_DRIVER_OWNER .owner = THIS_MODULE, #endif .name = "usbduxfast", .probe = usbduxfast_usb_probe, .disconnect = usbduxfast_usb_disconnect, .id_table = usbduxfast_usb_table, }; module_comedi_usb_driver(usbduxfast_driver, usbduxfast_usb_driver); MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com"); MODULE_DESCRIPTION("USB-DUXfast, BerndPorr@f2s.com"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FIRMWARE);
gpl-2.0
tyler6389/android_kernel_samsung_baffinve
drivers/misc/atmel_pwm.c
3622
9383
#include <linux/module.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/atmel_pwm.h> /* * This is a simple driver for the PWM controller found in various newer * Atmel SOCs, including the AVR32 series and the AT91sam9263. * * Chips with current Linux ports have only 4 PWM channels, out of max 32. * AT32UC3A and AT32UC3B chips have 7 channels (but currently no Linux). * Docs are inconsistent about the width of the channel counter registers; * it's at least 16 bits, but several places say 20 bits. */ #define PWM_NCHAN 4 /* max 32 */ struct pwm { spinlock_t lock; struct platform_device *pdev; u32 mask; int irq; void __iomem *base; struct clk *clk; struct pwm_channel *channel[PWM_NCHAN]; void (*handler[PWM_NCHAN])(struct pwm_channel *); }; /* global PWM controller registers */ #define PWM_MR 0x00 #define PWM_ENA 0x04 #define PWM_DIS 0x08 #define PWM_SR 0x0c #define PWM_IER 0x10 #define PWM_IDR 0x14 #define PWM_IMR 0x18 #define PWM_ISR 0x1c static inline void pwm_writel(const struct pwm *p, unsigned offset, u32 val) { __raw_writel(val, p->base + offset); } static inline u32 pwm_readl(const struct pwm *p, unsigned offset) { return __raw_readl(p->base + offset); } static inline void __iomem *pwmc_regs(const struct pwm *p, int index) { return p->base + 0x200 + index * 0x20; } static struct pwm *pwm; static void pwm_dumpregs(struct pwm_channel *ch, char *tag) { struct device *dev = &pwm->pdev->dev; dev_dbg(dev, "%s: mr %08x, sr %08x, imr %08x\n", tag, pwm_readl(pwm, PWM_MR), pwm_readl(pwm, PWM_SR), pwm_readl(pwm, PWM_IMR)); dev_dbg(dev, "pwm ch%d - mr %08x, dty %u, prd %u, cnt %u\n", ch->index, pwm_channel_readl(ch, PWM_CMR), pwm_channel_readl(ch, PWM_CDTY), pwm_channel_readl(ch, PWM_CPRD), pwm_channel_readl(ch, PWM_CCNT)); } /** * pwm_channel_alloc - allocate an unused PWM channel * @index: identifies the channel * @ch: structure to be initialized * * Drivers allocate PWM channels according to the board's wiring, and * matching board-specific setup code. Returns zero or negative errno. */ int pwm_channel_alloc(int index, struct pwm_channel *ch) { unsigned long flags; int status = 0; /* insist on PWM init, with this signal pinned out */ if (!pwm || !(pwm->mask & 1 << index)) return -ENODEV; if (index < 0 || index >= PWM_NCHAN || !ch) return -EINVAL; memset(ch, 0, sizeof *ch); spin_lock_irqsave(&pwm->lock, flags); if (pwm->channel[index]) status = -EBUSY; else { clk_enable(pwm->clk); ch->regs = pwmc_regs(pwm, index); ch->index = index; /* REVISIT: ap7000 seems to go 2x as fast as we expect!! */ ch->mck = clk_get_rate(pwm->clk); pwm->channel[index] = ch; pwm->handler[index] = NULL; /* channel and irq are always disabled when we return */ pwm_writel(pwm, PWM_DIS, 1 << index); pwm_writel(pwm, PWM_IDR, 1 << index); } spin_unlock_irqrestore(&pwm->lock, flags); return status; } EXPORT_SYMBOL(pwm_channel_alloc); static int pwmcheck(struct pwm_channel *ch) { int index; if (!pwm) return -ENODEV; if (!ch) return -EINVAL; index = ch->index; if (index < 0 || index >= PWM_NCHAN || pwm->channel[index] != ch) return -EINVAL; return index; } /** * pwm_channel_free - release a previously allocated channel * @ch: the channel being released * * The channel is completely shut down (counter and IRQ disabled), * and made available for re-use. Returns zero, or negative errno. */ int pwm_channel_free(struct pwm_channel *ch) { unsigned long flags; int t; spin_lock_irqsave(&pwm->lock, flags); t = pwmcheck(ch); if (t >= 0) { pwm->channel[t] = NULL; pwm->handler[t] = NULL; /* channel and irq are always disabled when we return */ pwm_writel(pwm, PWM_DIS, 1 << t); pwm_writel(pwm, PWM_IDR, 1 << t); clk_disable(pwm->clk); t = 0; } spin_unlock_irqrestore(&pwm->lock, flags); return t; } EXPORT_SYMBOL(pwm_channel_free); int __pwm_channel_onoff(struct pwm_channel *ch, int enabled) { unsigned long flags; int t; /* OMITTED FUNCTIONALITY: starting several channels in synch */ spin_lock_irqsave(&pwm->lock, flags); t = pwmcheck(ch); if (t >= 0) { pwm_writel(pwm, enabled ? PWM_ENA : PWM_DIS, 1 << t); t = 0; pwm_dumpregs(ch, enabled ? "enable" : "disable"); } spin_unlock_irqrestore(&pwm->lock, flags); return t; } EXPORT_SYMBOL(__pwm_channel_onoff); /** * pwm_clk_alloc - allocate and configure CLKA or CLKB * @prescale: from 0..10, the power of two used to divide MCK * @div: from 1..255, the linear divisor to use * * Returns PWM_CPR_CLKA, PWM_CPR_CLKB, or negative errno. The allocated * clock will run with a period of (2^prescale * div) / MCK, or twice as * long if center aligned PWM output is used. The clock must later be * deconfigured using pwm_clk_free(). */ int pwm_clk_alloc(unsigned prescale, unsigned div) { unsigned long flags; u32 mr; u32 val = (prescale << 8) | div; int ret = -EBUSY; if (prescale >= 10 || div == 0 || div > 255) return -EINVAL; spin_lock_irqsave(&pwm->lock, flags); mr = pwm_readl(pwm, PWM_MR); if ((mr & 0xffff) == 0) { mr |= val; ret = PWM_CPR_CLKA; } else if ((mr & (0xffff << 16)) == 0) { mr |= val << 16; ret = PWM_CPR_CLKB; } if (ret > 0) pwm_writel(pwm, PWM_MR, mr); spin_unlock_irqrestore(&pwm->lock, flags); return ret; } EXPORT_SYMBOL(pwm_clk_alloc); /** * pwm_clk_free - deconfigure and release CLKA or CLKB * * Reverses the effect of pwm_clk_alloc(). */ void pwm_clk_free(unsigned clk) { unsigned long flags; u32 mr; spin_lock_irqsave(&pwm->lock, flags); mr = pwm_readl(pwm, PWM_MR); if (clk == PWM_CPR_CLKA) pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 0)); if (clk == PWM_CPR_CLKB) pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 16)); spin_unlock_irqrestore(&pwm->lock, flags); } EXPORT_SYMBOL(pwm_clk_free); /** * pwm_channel_handler - manage channel's IRQ handler * @ch: the channel * @handler: the handler to use, possibly NULL * * If the handler is non-null, the handler will be called after every * period of this PWM channel. If the handler is null, this channel * won't generate an IRQ. */ int pwm_channel_handler(struct pwm_channel *ch, void (*handler)(struct pwm_channel *ch)) { unsigned long flags; int t; spin_lock_irqsave(&pwm->lock, flags); t = pwmcheck(ch); if (t >= 0) { pwm->handler[t] = handler; pwm_writel(pwm, handler ? PWM_IER : PWM_IDR, 1 << t); t = 0; } spin_unlock_irqrestore(&pwm->lock, flags); return t; } EXPORT_SYMBOL(pwm_channel_handler); static irqreturn_t pwm_irq(int id, void *_pwm) { struct pwm *p = _pwm; irqreturn_t handled = IRQ_NONE; u32 irqstat; int index; spin_lock(&p->lock); /* ack irqs, then handle them */ irqstat = pwm_readl(pwm, PWM_ISR); while (irqstat) { struct pwm_channel *ch; void (*handler)(struct pwm_channel *ch); index = ffs(irqstat) - 1; irqstat &= ~(1 << index); ch = pwm->channel[index]; handler = pwm->handler[index]; if (handler && ch) { spin_unlock(&p->lock); handler(ch); spin_lock(&p->lock); handled = IRQ_HANDLED; } } spin_unlock(&p->lock); return handled; } static int __init pwm_probe(struct platform_device *pdev) { struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); int irq = platform_get_irq(pdev, 0); u32 *mp = pdev->dev.platform_data; struct pwm *p; int status = -EIO; if (pwm) return -EBUSY; if (!r || irq < 0 || !mp || !*mp) return -ENODEV; if (*mp & ~((1<<PWM_NCHAN)-1)) { dev_warn(&pdev->dev, "mask 0x%x ... more than %d channels\n", *mp, PWM_NCHAN); return -EINVAL; } p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; spin_lock_init(&p->lock); p->pdev = pdev; p->mask = *mp; p->irq = irq; p->base = ioremap(r->start, r->end - r->start + 1); if (!p->base) goto fail; p->clk = clk_get(&pdev->dev, "pwm_clk"); if (IS_ERR(p->clk)) { status = PTR_ERR(p->clk); p->clk = NULL; goto fail; } status = request_irq(irq, pwm_irq, 0, pdev->name, p); if (status < 0) goto fail; pwm = p; platform_set_drvdata(pdev, p); return 0; fail: if (p->clk) clk_put(p->clk); if (p->base) iounmap(p->base); kfree(p); return status; } static int __exit pwm_remove(struct platform_device *pdev) { struct pwm *p = platform_get_drvdata(pdev); if (p != pwm) return -EINVAL; clk_enable(pwm->clk); pwm_writel(pwm, PWM_DIS, (1 << PWM_NCHAN) - 1); pwm_writel(pwm, PWM_IDR, (1 << PWM_NCHAN) - 1); clk_disable(pwm->clk); pwm = NULL; free_irq(p->irq, p); clk_put(p->clk); iounmap(p->base); kfree(p); return 0; } static struct platform_driver atmel_pwm_driver = { .driver = { .name = "atmel_pwm", .owner = THIS_MODULE, }, .remove = __exit_p(pwm_remove), /* NOTE: PWM can keep running in AVR32 "idle" and "frozen" states; * and all AT91sam9263 states, albeit at reduced clock rate if * MCK becomes the slow clock (i.e. what Linux labels STR). */ }; static int __init pwm_init(void) { return platform_driver_probe(&atmel_pwm_driver, pwm_probe); } module_init(pwm_init); static void __exit pwm_exit(void) { platform_driver_unregister(&atmel_pwm_driver); } module_exit(pwm_exit); MODULE_DESCRIPTION("Driver for AT32/AT91 PWM module"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:atmel_pwm");
gpl-2.0
MWisBest/kernel_samsung_tuna
arch/x86/pci/bus_numa.c
4134
1985
#include <linux/init.h> #include <linux/pci.h> #include <linux/range.h> #include "bus_numa.h" int pci_root_num; struct pci_root_info pci_root_info[PCI_ROOT_NR]; void x86_pci_root_bus_res_quirks(struct pci_bus *b) { int i; int j; struct pci_root_info *info; /* don't go for it if _CRS is used already */ if (b->resource[0] != &ioport_resource || b->resource[1] != &iomem_resource) return; if (!pci_root_num) return; for (i = 0; i < pci_root_num; i++) { if (pci_root_info[i].bus_min == b->number) break; } if (i == pci_root_num) return; printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n", b->number); pci_bus_remove_resources(b); info = &pci_root_info[i]; for (j = 0; j < info->res_num; j++) { struct resource *res; struct resource *root; res = &info->res[j]; pci_bus_add_resource(b, res, 0); if (res->flags & IORESOURCE_IO) root = &ioport_resource; else root = &iomem_resource; insert_resource(root, res); } } void __devinit update_res(struct pci_root_info *info, resource_size_t start, resource_size_t end, unsigned long flags, int merge) { int i; struct resource *res; if (start > end) return; if (start == MAX_RESOURCE) return; if (!merge) goto addit; /* try to merge it with old one */ for (i = 0; i < info->res_num; i++) { resource_size_t final_start, final_end; resource_size_t common_start, common_end; res = &info->res[i]; if (res->flags != flags) continue; common_start = max(res->start, start); common_end = min(res->end, end); if (common_start > common_end + 1) continue; final_start = min(res->start, start); final_end = max(res->end, end); res->start = final_start; res->end = final_end; return; } addit: /* need to add that */ if (info->res_num >= RES_NUM) return; res = &info->res[info->res_num]; res->name = info->name; res->flags = flags; res->start = start; res->end = end; res->child = NULL; info->res_num++; }
gpl-2.0
yoctobsp/linux-yocto-3.14
arch/arm/mach-ep93xx/gesbc9312.c
4646
1218
/* * arch/arm/mach-ep93xx/gesbc9312.c * Glomation GESBC-9312-sx support. * * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/sizes.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "soc.h" static struct ep93xx_eth_data __initdata gesbc9312_eth_data = { .phy_id = 1, }; static void __init gesbc9312_init_machine(void) { ep93xx_init_devices(); ep93xx_register_flash(4, EP93XX_CS6_PHYS_BASE, SZ_8M); ep93xx_register_eth(&gesbc9312_eth_data, 0); } MACHINE_START(GESBC9312, "Glomation GESBC-9312-sx") /* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */ .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, .init_time = ep93xx_timer_init, .init_machine = gesbc9312_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, MACHINE_END
gpl-2.0
bangprovn/android_stock
drivers/media/video/cx25821/cx25821-alsa.c
4902
19787
/* * Driver for the Conexant CX25821 PCIe bridge * * Copyright (C) 2009 Conexant Systems Inc. * Authors <shu.lin@conexant.com>, <hiep.huynh@conexant.com> * Based on SAA713x ALSA driver and CX88 driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/vmalloc.h> #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/control.h> #include <sound/initval.h> #include <sound/tlv.h> #include "cx25821.h" #include "cx25821-reg.h" #define AUDIO_SRAM_CHANNEL SRAM_CH08 #define dprintk(level, fmt, arg...) \ do { \ if (debug >= level) \ pr_info("%s/1: " fmt, chip->dev->name, ##arg); \ } while (0) #define dprintk_core(level, fmt, arg...) \ do { \ if (debug >= level) \ printk(KERN_DEBUG "%s/1: " fmt, chip->dev->name, ##arg); \ } while (0) /**************************************************************************** Data type declarations - Can be moded to a header file later ****************************************************************************/ static struct snd_card *snd_cx25821_cards[SNDRV_CARDS]; static int devno; struct cx25821_audio_buffer { unsigned int bpl; struct btcx_riscmem risc; struct videobuf_dmabuf dma; }; struct cx25821_audio_dev { struct cx25821_dev *dev; struct cx25821_dmaqueue q; /* pci i/o */ struct pci_dev *pci; /* audio controls */ int irq; struct snd_card *card; unsigned long iobase; spinlock_t reg_lock; atomic_t count; unsigned int dma_size; unsigned int period_size; unsigned int num_periods; struct videobuf_dmabuf *dma_risc; struct cx25821_audio_buffer *buf; struct snd_pcm_substream *substream; }; /**************************************************************************** Module global static vars ****************************************************************************/ static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = { 1, [1 ... (SNDRV_CARDS - 1)] = 1 }; module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable cx25821 soundcard. default enabled."); module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for cx25821 capture interface(s)."); /**************************************************************************** Module macros ****************************************************************************/ MODULE_DESCRIPTION("ALSA driver module for cx25821 based capture cards"); MODULE_AUTHOR("Hiep Huynh"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Conexant,25821}"); /* "{{Conexant,23881}," */ static unsigned int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); /**************************************************************************** Module specific funtions ****************************************************************************/ /* Constants taken from cx88-reg.h */ #define AUD_INT_DN_RISCI1 (1 << 0) #define AUD_INT_UP_RISCI1 (1 << 1) #define AUD_INT_RDS_DN_RISCI1 (1 << 2) #define AUD_INT_DN_RISCI2 (1 << 4) /* yes, 3 is skipped */ #define AUD_INT_UP_RISCI2 (1 << 5) #define AUD_INT_RDS_DN_RISCI2 (1 << 6) #define AUD_INT_DN_SYNC (1 << 12) #define AUD_INT_UP_SYNC (1 << 13) #define AUD_INT_RDS_DN_SYNC (1 << 14) #define AUD_INT_OPC_ERR (1 << 16) #define AUD_INT_BER_IRQ (1 << 20) #define AUD_INT_MCHG_IRQ (1 << 21) #define GP_COUNT_CONTROL_RESET 0x3 #define PCI_MSK_AUD_EXT (1 << 4) #define PCI_MSK_AUD_INT (1 << 3) /* * BOARD Specific: Sets audio DMA */ static int _cx25821_start_audio_dma(struct cx25821_audio_dev *chip) { struct cx25821_audio_buffer *buf = chip->buf; struct cx25821_dev *dev = chip->dev; struct sram_channel *audio_ch = &cx25821_sram_channels[AUDIO_SRAM_CHANNEL]; u32 tmp = 0; /* enable output on the GPIO 0 for the MCLK ADC (Audio) */ cx25821_set_gpiopin_direction(chip->dev, 0, 0); /* Make sure RISC/FIFO are off before changing FIFO/RISC settings */ cx_clear(AUD_INT_DMA_CTL, FLD_AUD_DST_A_RISC_EN | FLD_AUD_DST_A_FIFO_EN); /* setup fifo + format - out channel */ cx25821_sram_channel_setup_audio(chip->dev, audio_ch, buf->bpl, buf->risc.dma); /* sets bpl size */ cx_write(AUD_A_LNGTH, buf->bpl); /* reset counter */ /* GP_COUNT_CONTROL_RESET = 0x3 */ cx_write(AUD_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET); atomic_set(&chip->count, 0); /* Set the input mode to 16-bit */ tmp = cx_read(AUD_A_CFG); cx_write(AUD_A_CFG, tmp | FLD_AUD_DST_PK_MODE | FLD_AUD_DST_ENABLE | FLD_AUD_CLK_ENABLE); /* pr_info("DEBUG: Start audio DMA, %d B/line, cmds_start(0x%x)= %d lines/FIFO, %d periods, %d byte buffer\n", buf->bpl, audio_ch->cmds_start, cx_read(audio_ch->cmds_start + 12)>>1, chip->num_periods, buf->bpl * chip->num_periods); */ /* Enables corresponding bits at AUD_INT_STAT */ cx_write(AUD_A_INT_MSK, FLD_AUD_DST_RISCI1 | FLD_AUD_DST_OF | FLD_AUD_DST_SYNC | FLD_AUD_DST_OPC_ERR); /* Clean any pending interrupt bits already set */ cx_write(AUD_A_INT_STAT, ~0); /* enable audio irqs */ cx_set(PCI_INT_MSK, chip->dev->pci_irqmask | PCI_MSK_AUD_INT); /* Turn on audio downstream fifo and risc enable 0x101 */ tmp = cx_read(AUD_INT_DMA_CTL); cx_set(AUD_INT_DMA_CTL, tmp | (FLD_AUD_DST_A_RISC_EN | FLD_AUD_DST_A_FIFO_EN)); mdelay(100); return 0; } /* * BOARD Specific: Resets audio DMA */ static int _cx25821_stop_audio_dma(struct cx25821_audio_dev *chip) { struct cx25821_dev *dev = chip->dev; /* stop dma */ cx_clear(AUD_INT_DMA_CTL, FLD_AUD_DST_A_RISC_EN | FLD_AUD_DST_A_FIFO_EN); /* disable irqs */ cx_clear(PCI_INT_MSK, PCI_MSK_AUD_INT); cx_clear(AUD_A_INT_MSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC | AUD_INT_DN_RISCI2 | AUD_INT_DN_RISCI1); return 0; } #define MAX_IRQ_LOOP 50 /* * BOARD Specific: IRQ dma bits */ static char *cx25821_aud_irqs[32] = { "dn_risci1", "up_risci1", "rds_dn_risc1", /* 0-2 */ NULL, /* reserved */ "dn_risci2", "up_risci2", "rds_dn_risc2", /* 4-6 */ NULL, /* reserved */ "dnf_of", "upf_uf", "rds_dnf_uf", /* 8-10 */ NULL, /* reserved */ "dn_sync", "up_sync", "rds_dn_sync", /* 12-14 */ NULL, /* reserved */ "opc_err", "par_err", "rip_err", /* 16-18 */ "pci_abort", "ber_irq", "mchg_irq" /* 19-21 */ }; /* * BOARD Specific: Threats IRQ audio specific calls */ static void cx25821_aud_irq(struct cx25821_audio_dev *chip, u32 status, u32 mask) { struct cx25821_dev *dev = chip->dev; if (0 == (status & mask)) return; cx_write(AUD_A_INT_STAT, status); if (debug > 1 || (status & mask & ~0xff)) cx25821_print_irqbits(dev->name, "irq aud", cx25821_aud_irqs, ARRAY_SIZE(cx25821_aud_irqs), status, mask); /* risc op code error */ if (status & AUD_INT_OPC_ERR) { pr_warn("WARNING %s/1: Audio risc op code error\n", dev->name); cx_clear(AUD_INT_DMA_CTL, FLD_AUD_DST_A_RISC_EN | FLD_AUD_DST_A_FIFO_EN); cx25821_sram_channel_dump_audio(dev, &cx25821_sram_channels[AUDIO_SRAM_CHANNEL]); } if (status & AUD_INT_DN_SYNC) { pr_warn("WARNING %s: Downstream sync error!\n", dev->name); cx_write(AUD_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET); return; } /* risc1 downstream */ if (status & AUD_INT_DN_RISCI1) { atomic_set(&chip->count, cx_read(AUD_A_GPCNT)); snd_pcm_period_elapsed(chip->substream); } } /* * BOARD Specific: Handles IRQ calls */ static irqreturn_t cx25821_irq(int irq, void *dev_id) { struct cx25821_audio_dev *chip = dev_id; struct cx25821_dev *dev = chip->dev; u32 status, pci_status; u32 audint_status, audint_mask; int loop, handled = 0; int audint_count = 0; audint_status = cx_read(AUD_A_INT_STAT); audint_mask = cx_read(AUD_A_INT_MSK); audint_count = cx_read(AUD_A_GPCNT); status = cx_read(PCI_INT_STAT); for (loop = 0; loop < 1; loop++) { status = cx_read(PCI_INT_STAT); if (0 == status) { status = cx_read(PCI_INT_STAT); audint_status = cx_read(AUD_A_INT_STAT); audint_mask = cx_read(AUD_A_INT_MSK); if (status) { handled = 1; cx_write(PCI_INT_STAT, status); cx25821_aud_irq(chip, audint_status, audint_mask); break; } else { goto out; } } handled = 1; cx_write(PCI_INT_STAT, status); cx25821_aud_irq(chip, audint_status, audint_mask); } pci_status = cx_read(PCI_INT_STAT); if (handled) cx_write(PCI_INT_STAT, pci_status); out: return IRQ_RETVAL(handled); } static int dsp_buffer_free(struct cx25821_audio_dev *chip) { BUG_ON(!chip->dma_size); dprintk(2, "Freeing buffer\n"); videobuf_dma_unmap(&chip->pci->dev, chip->dma_risc); videobuf_dma_free(chip->dma_risc); btcx_riscmem_free(chip->pci, &chip->buf->risc); kfree(chip->buf); chip->dma_risc = NULL; chip->dma_size = 0; return 0; } /**************************************************************************** ALSA PCM Interface ****************************************************************************/ /* * Digital hardware definition */ #define DEFAULT_FIFO_SIZE 384 static struct snd_pcm_hardware snd_cx25821_digital_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, /* Analog audio output will be full of clicks and pops if there are not exactly four lines in the SRAM FIFO buffer. */ .period_bytes_min = DEFAULT_FIFO_SIZE / 3, .period_bytes_max = DEFAULT_FIFO_SIZE / 3, .periods_min = 1, .periods_max = AUDIO_LINE_SIZE, /* 128 * 128 = 16384 = 1024 * 16 */ .buffer_bytes_max = (AUDIO_LINE_SIZE * AUDIO_LINE_SIZE), }; /* * audio pcm capture open callback */ static int snd_cx25821_pcm_open(struct snd_pcm_substream *substream) { struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; unsigned int bpl = 0; if (!chip) { pr_err("DEBUG: cx25821 can't find device struct. Can't proceed with open\n"); return -ENODEV; } err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) goto _error; chip->substream = substream; runtime->hw = snd_cx25821_digital_hw; if (cx25821_sram_channels[AUDIO_SRAM_CHANNEL].fifo_size != DEFAULT_FIFO_SIZE) { /* since there are 3 audio Clusters */ bpl = cx25821_sram_channels[AUDIO_SRAM_CHANNEL].fifo_size / 3; bpl &= ~7; /* must be multiple of 8 */ if (bpl > AUDIO_LINE_SIZE) bpl = AUDIO_LINE_SIZE; runtime->hw.period_bytes_min = bpl; runtime->hw.period_bytes_max = bpl; } return 0; _error: dprintk(1, "Error opening PCM!\n"); return err; } /* * audio close callback */ static int snd_cx25821_close(struct snd_pcm_substream *substream) { return 0; } /* * hw_params callback */ static int snd_cx25821_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream); struct videobuf_dmabuf *dma; struct cx25821_audio_buffer *buf; int ret; if (substream->runtime->dma_area) { dsp_buffer_free(chip); substream->runtime->dma_area = NULL; } chip->period_size = params_period_bytes(hw_params); chip->num_periods = params_periods(hw_params); chip->dma_size = chip->period_size * params_periods(hw_params); BUG_ON(!chip->dma_size); BUG_ON(chip->num_periods & (chip->num_periods - 1)); buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (NULL == buf) return -ENOMEM; if (chip->period_size > AUDIO_LINE_SIZE) chip->period_size = AUDIO_LINE_SIZE; buf->bpl = chip->period_size; dma = &buf->dma; videobuf_dma_init(dma); ret = videobuf_dma_init_kernel(dma, PCI_DMA_FROMDEVICE, (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT)); if (ret < 0) goto error; ret = videobuf_dma_map(&chip->pci->dev, dma); if (ret < 0) goto error; ret = cx25821_risc_databuffer_audio(chip->pci, &buf->risc, dma->sglist, chip->period_size, chip->num_periods, 1); if (ret < 0) { pr_info("DEBUG: ERROR after cx25821_risc_databuffer_audio()\n"); goto error; } /* Loop back to start of program */ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC); buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma); buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */ chip->buf = buf; chip->dma_risc = dma; substream->runtime->dma_area = chip->dma_risc->vaddr; substream->runtime->dma_bytes = chip->dma_size; substream->runtime->dma_addr = 0; return 0; error: kfree(buf); return ret; } /* * hw free callback */ static int snd_cx25821_hw_free(struct snd_pcm_substream *substream) { struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream); if (substream->runtime->dma_area) { dsp_buffer_free(chip); substream->runtime->dma_area = NULL; } return 0; } /* * prepare callback */ static int snd_cx25821_prepare(struct snd_pcm_substream *substream) { return 0; } /* * trigger callback */ static int snd_cx25821_card_trigger(struct snd_pcm_substream *substream, int cmd) { struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream); int err = 0; /* Local interrupts are already disabled by ALSA */ spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: err = _cx25821_start_audio_dma(chip); break; case SNDRV_PCM_TRIGGER_STOP: err = _cx25821_stop_audio_dma(chip); break; default: err = -EINVAL; break; } spin_unlock(&chip->reg_lock); return err; } /* * pointer callback */ static snd_pcm_uframes_t snd_cx25821_pointer(struct snd_pcm_substream *substream) { struct cx25821_audio_dev *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; u16 count; count = atomic_read(&chip->count); return runtime->period_size * (count & (runtime->periods - 1)); } /* * page callback (needed for mmap) */ static struct page *snd_cx25821_page(struct snd_pcm_substream *substream, unsigned long offset) { void *pageptr = substream->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } /* * operators */ static struct snd_pcm_ops snd_cx25821_pcm_ops = { .open = snd_cx25821_pcm_open, .close = snd_cx25821_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cx25821_hw_params, .hw_free = snd_cx25821_hw_free, .prepare = snd_cx25821_prepare, .trigger = snd_cx25821_card_trigger, .pointer = snd_cx25821_pointer, .page = snd_cx25821_page, }; /* * ALSA create a PCM device: Called when initializing the board. * Sets up the name and hooks up the callbacks */ static int snd_cx25821_pcm(struct cx25821_audio_dev *chip, int device, char *name) { struct snd_pcm *pcm; int err; err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm); if (err < 0) { pr_info("ERROR: FAILED snd_pcm_new() in %s\n", __func__); return err; } pcm->private_data = chip; pcm->info_flags = 0; strcpy(pcm->name, name); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cx25821_pcm_ops); return 0; } /**************************************************************************** Basic Flow for Sound Devices ****************************************************************************/ /* * PCI ID Table - 14f1:8801 and 14f1:8811 means function 1: Audio * Only boards with eeprom and byte 1 at eeprom=1 have it */ static DEFINE_PCI_DEVICE_TABLE(cx25821_audio_pci_tbl) = { {0x14f1, 0x0920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, cx25821_audio_pci_tbl); /* * Not used in the function snd_cx25821_dev_free so removing * from the file. */ /* static int snd_cx25821_free(struct cx25821_audio_dev *chip) { if (chip->irq >= 0) free_irq(chip->irq, chip); cx25821_dev_unregister(chip->dev); pci_disable_device(chip->pci); return 0; } */ /* * Component Destructor */ static void snd_cx25821_dev_free(struct snd_card *card) { struct cx25821_audio_dev *chip = card->private_data; /* snd_cx25821_free(chip); */ snd_card_free(chip->card); } /* * Alsa Constructor - Component probe */ static int cx25821_audio_initdev(struct cx25821_dev *dev) { struct snd_card *card; struct cx25821_audio_dev *chip; int err; if (devno >= SNDRV_CARDS) { pr_info("DEBUG ERROR: devno >= SNDRV_CARDS %s\n", __func__); return -ENODEV; } if (!enable[devno]) { ++devno; pr_info("DEBUG ERROR: !enable[devno] %s\n", __func__); return -ENOENT; } err = snd_card_create(index[devno], id[devno], THIS_MODULE, sizeof(struct cx25821_audio_dev), &card); if (err < 0) { pr_info("DEBUG ERROR: cannot create snd_card_new in %s\n", __func__); return err; } strcpy(card->driver, "cx25821"); /* Card "creation" */ card->private_free = snd_cx25821_dev_free; chip = card->private_data; spin_lock_init(&chip->reg_lock); chip->dev = dev; chip->card = card; chip->pci = dev->pci; chip->iobase = pci_resource_start(dev->pci, 0); chip->irq = dev->pci->irq; err = request_irq(dev->pci->irq, cx25821_irq, IRQF_SHARED, chip->dev->name, chip); if (err < 0) { pr_err("ERROR %s: can't get IRQ %d for ALSA\n", chip->dev->name, dev->pci->irq); goto error; } err = snd_cx25821_pcm(chip, 0, "cx25821 Digital"); if (err < 0) { pr_info("DEBUG ERROR: cannot create snd_cx25821_pcm %s\n", __func__); goto error; } snd_card_set_dev(card, &chip->pci->dev); strcpy(card->shortname, "cx25821"); sprintf(card->longname, "%s at 0x%lx irq %d", chip->dev->name, chip->iobase, chip->irq); strcpy(card->mixername, "CX25821"); pr_info("%s/%i: ALSA support for cx25821 boards\n", card->driver, devno); err = snd_card_register(card); if (err < 0) { pr_info("DEBUG ERROR: cannot register sound card %s\n", __func__); goto error; } snd_cx25821_cards[devno] = card; devno++; return 0; error: snd_card_free(card); return err; } /**************************************************************************** LINUX MODULE INIT ****************************************************************************/ static void cx25821_audio_fini(void) { snd_card_free(snd_cx25821_cards[0]); } /* * Module initializer * * Loops through present saa7134 cards, and assigns an ALSA device * to each one * */ static int cx25821_alsa_init(void) { struct cx25821_dev *dev = NULL; struct list_head *list; mutex_lock(&cx25821_devlist_mutex); list_for_each(list, &cx25821_devlist) { dev = list_entry(list, struct cx25821_dev, devlist); cx25821_audio_initdev(dev); } mutex_unlock(&cx25821_devlist_mutex); if (dev == NULL) pr_info("ERROR ALSA: no cx25821 cards found\n"); return 0; } late_initcall(cx25821_alsa_init); module_exit(cx25821_audio_fini); /* ----------------------------------------------------------- */ /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
SimpleAOSP-Kernel/kernel_mako
drivers/media/video/cx25821/cx25821-audio-upstream.c
4902
20308
/* * Driver for the Conexant CX25821 PCIe bridge * * Copyright (C) 2009 Conexant Systems Inc. * Authors <hiep.huynh@conexant.com>, <shu.lin@conexant.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "cx25821-video.h" #include "cx25821-audio-upstream.h" #include <linux/fs.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/syscalls.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/uaccess.h> MODULE_DESCRIPTION("v4l2 driver module for cx25821 based TV cards"); MODULE_AUTHOR("Hiep Huynh <hiep.huynh@conexant.com>"); MODULE_LICENSE("GPL"); static int _intr_msk = FLD_AUD_SRC_RISCI1 | FLD_AUD_SRC_OF | FLD_AUD_SRC_SYNC | FLD_AUD_SRC_OPC_ERR; int cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev, struct sram_channel *ch, unsigned int bpl, u32 risc) { unsigned int i, lines; u32 cdt; if (ch->cmds_start == 0) { cx_write(ch->ptr1_reg, 0); cx_write(ch->ptr2_reg, 0); cx_write(ch->cnt2_reg, 0); cx_write(ch->cnt1_reg, 0); return 0; } bpl = (bpl + 7) & ~7; /* alignment */ cdt = ch->cdt; lines = ch->fifo_size / bpl; if (lines > 3) lines = 3; BUG_ON(lines < 2); /* write CDT */ for (i = 0; i < lines; i++) { cx_write(cdt + 16 * i, ch->fifo_start + bpl * i); cx_write(cdt + 16 * i + 4, 0); cx_write(cdt + 16 * i + 8, 0); cx_write(cdt + 16 * i + 12, 0); } /* write CMDS */ cx_write(ch->cmds_start + 0, risc); cx_write(ch->cmds_start + 4, 0); cx_write(ch->cmds_start + 8, cdt); cx_write(ch->cmds_start + 12, AUDIO_CDT_SIZE_QW); cx_write(ch->cmds_start + 16, ch->ctrl_start); /* IQ size */ cx_write(ch->cmds_start + 20, AUDIO_IQ_SIZE_DW); for (i = 24; i < 80; i += 4) cx_write(ch->cmds_start + i, 0); /* fill registers */ cx_write(ch->ptr1_reg, ch->fifo_start); cx_write(ch->ptr2_reg, cdt); cx_write(ch->cnt2_reg, AUDIO_CDT_SIZE_QW); cx_write(ch->cnt1_reg, AUDIO_CLUSTER_SIZE_QW - 1); return 0; } static __le32 *cx25821_risc_field_upstream_audio(struct cx25821_dev *dev, __le32 *rp, dma_addr_t databuf_phys_addr, unsigned int bpl, int fifo_enable) { unsigned int line; struct sram_channel *sram_ch = dev->channels[dev->_audio_upstream_channel].sram_channels; int offset = 0; /* scan lines */ for (line = 0; line < LINES_PER_AUDIO_BUFFER; line++) { *(rp++) = cpu_to_le32(RISC_READ | RISC_SOL | RISC_EOL | bpl); *(rp++) = cpu_to_le32(databuf_phys_addr + offset); *(rp++) = cpu_to_le32(0); /* bits 63-32 */ /* Check if we need to enable the FIFO * after the first 3 lines. * For the upstream audio channel, * the risc engine will enable the FIFO */ if (fifo_enable && line == 2) { *(rp++) = RISC_WRITECR; *(rp++) = sram_ch->dma_ctl; *(rp++) = sram_ch->fld_aud_fifo_en; *(rp++) = 0x00000020; } offset += AUDIO_LINE_SIZE; } return rp; } int cx25821_risc_buffer_upstream_audio(struct cx25821_dev *dev, struct pci_dev *pci, unsigned int bpl, unsigned int lines) { __le32 *rp; int fifo_enable = 0; int frame = 0, i = 0; int frame_size = AUDIO_DATA_BUF_SZ; int databuf_offset = 0; int risc_flag = RISC_CNT_INC; dma_addr_t risc_phys_jump_addr; /* Virtual address of Risc buffer program */ rp = dev->_risc_virt_addr; /* sync instruction */ *(rp++) = cpu_to_le32(RISC_RESYNC | AUDIO_SYNC_LINE); for (frame = 0; frame < NUM_AUDIO_FRAMES; frame++) { databuf_offset = frame_size * frame; if (frame == 0) { fifo_enable = 1; risc_flag = RISC_CNT_RESET; } else { fifo_enable = 0; risc_flag = RISC_CNT_INC; } /* Calculate physical jump address */ if ((frame + 1) == NUM_AUDIO_FRAMES) { risc_phys_jump_addr = dev->_risc_phys_start_addr + RISC_SYNC_INSTRUCTION_SIZE; } else { risc_phys_jump_addr = dev->_risc_phys_start_addr + RISC_SYNC_INSTRUCTION_SIZE + AUDIO_RISC_DMA_BUF_SIZE * (frame + 1); } rp = cx25821_risc_field_upstream_audio(dev, rp, dev->_audiodata_buf_phys_addr + databuf_offset, bpl, fifo_enable); if (USE_RISC_NOOP_AUDIO) { for (i = 0; i < NUM_NO_OPS; i++) *(rp++) = cpu_to_le32(RISC_NOOP); } /* Loop to (Nth)FrameRISC or to Start of Risc program & * generate IRQ */ *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | risc_flag); *(rp++) = cpu_to_le32(risc_phys_jump_addr); *(rp++) = cpu_to_le32(0); /* Recalculate virtual address based on frame index */ rp = dev->_risc_virt_addr + RISC_SYNC_INSTRUCTION_SIZE / 4 + (AUDIO_RISC_DMA_BUF_SIZE * (frame + 1) / 4); } return 0; } void cx25821_free_memory_audio(struct cx25821_dev *dev) { if (dev->_risc_virt_addr) { pci_free_consistent(dev->pci, dev->_audiorisc_size, dev->_risc_virt_addr, dev->_risc_phys_addr); dev->_risc_virt_addr = NULL; } if (dev->_audiodata_buf_virt_addr) { pci_free_consistent(dev->pci, dev->_audiodata_buf_size, dev->_audiodata_buf_virt_addr, dev->_audiodata_buf_phys_addr); dev->_audiodata_buf_virt_addr = NULL; } } void cx25821_stop_upstream_audio(struct cx25821_dev *dev) { struct sram_channel *sram_ch = dev->channels[AUDIO_UPSTREAM_SRAM_CHANNEL_B].sram_channels; u32 tmp = 0; if (!dev->_audio_is_running) { printk(KERN_DEBUG pr_fmt("No audio file is currently running so return!\n")); return; } /* Disable RISC interrupts */ cx_write(sram_ch->int_msk, 0); /* Turn OFF risc and fifo enable in AUD_DMA_CNTRL */ tmp = cx_read(sram_ch->dma_ctl); cx_write(sram_ch->dma_ctl, tmp & ~(sram_ch->fld_aud_fifo_en | sram_ch->fld_aud_risc_en)); /* Clear data buffer memory */ if (dev->_audiodata_buf_virt_addr) memset(dev->_audiodata_buf_virt_addr, 0, dev->_audiodata_buf_size); dev->_audio_is_running = 0; dev->_is_first_audio_frame = 0; dev->_audioframe_count = 0; dev->_audiofile_status = END_OF_FILE; kfree(dev->_irq_audio_queues); dev->_irq_audio_queues = NULL; kfree(dev->_audiofilename); } void cx25821_free_mem_upstream_audio(struct cx25821_dev *dev) { if (dev->_audio_is_running) cx25821_stop_upstream_audio(dev); cx25821_free_memory_audio(dev); } int cx25821_get_audio_data(struct cx25821_dev *dev, struct sram_channel *sram_ch) { struct file *myfile; int frame_index_temp = dev->_audioframe_index; int i = 0; int line_size = AUDIO_LINE_SIZE; int frame_size = AUDIO_DATA_BUF_SZ; int frame_offset = frame_size * frame_index_temp; ssize_t vfs_read_retval = 0; char mybuf[line_size]; loff_t file_offset = dev->_audioframe_count * frame_size; loff_t pos; mm_segment_t old_fs; if (dev->_audiofile_status == END_OF_FILE) return 0; myfile = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(myfile)) { const int open_errno = -PTR_ERR(myfile); pr_err("%s(): ERROR opening file(%s) with errno = %d!\n", __func__, dev->_audiofilename, open_errno); return PTR_ERR(myfile); } else { if (!(myfile->f_op)) { pr_err("%s(): File has no file operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } if (!myfile->f_op->read) { pr_err("%s(): File has no READ operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } pos = myfile->f_pos; old_fs = get_fs(); set_fs(KERNEL_DS); for (i = 0; i < dev->_audio_lines_count; i++) { pos = file_offset; vfs_read_retval = vfs_read(myfile, mybuf, line_size, &pos); if (vfs_read_retval > 0 && vfs_read_retval == line_size && dev->_audiodata_buf_virt_addr != NULL) { memcpy((void *)(dev->_audiodata_buf_virt_addr + frame_offset / 4), mybuf, vfs_read_retval); } file_offset += vfs_read_retval; frame_offset += vfs_read_retval; if (vfs_read_retval < line_size) { pr_info("Done: exit %s() since no more bytes to read from Audio file\n", __func__); break; } } if (i > 0) dev->_audioframe_count++; dev->_audiofile_status = (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE; set_fs(old_fs); filp_close(myfile, NULL); } return 0; } static void cx25821_audioups_handler(struct work_struct *work) { struct cx25821_dev *dev = container_of(work, struct cx25821_dev, _audio_work_entry); if (!dev) { pr_err("ERROR %s(): since container_of(work_struct) FAILED!\n", __func__); return; } cx25821_get_audio_data(dev, dev->channels[dev->_audio_upstream_channel]. sram_channels); } int cx25821_openfile_audio(struct cx25821_dev *dev, struct sram_channel *sram_ch) { struct file *myfile; int i = 0, j = 0; int line_size = AUDIO_LINE_SIZE; ssize_t vfs_read_retval = 0; char mybuf[line_size]; loff_t pos; loff_t offset = (unsigned long)0; mm_segment_t old_fs; myfile = filp_open(dev->_audiofilename, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(myfile)) { const int open_errno = -PTR_ERR(myfile); pr_err("%s(): ERROR opening file(%s) with errno = %d!\n", __func__, dev->_audiofilename, open_errno); return PTR_ERR(myfile); } else { if (!(myfile->f_op)) { pr_err("%s(): File has no file operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } if (!myfile->f_op->read) { pr_err("%s(): File has no READ operations registered!\n", __func__); filp_close(myfile, NULL); return -EIO; } pos = myfile->f_pos; old_fs = get_fs(); set_fs(KERNEL_DS); for (j = 0; j < NUM_AUDIO_FRAMES; j++) { for (i = 0; i < dev->_audio_lines_count; i++) { pos = offset; vfs_read_retval = vfs_read(myfile, mybuf, line_size, &pos); if (vfs_read_retval > 0 && vfs_read_retval == line_size && dev->_audiodata_buf_virt_addr != NULL) { memcpy((void *)(dev-> _audiodata_buf_virt_addr + offset / 4), mybuf, vfs_read_retval); } offset += vfs_read_retval; if (vfs_read_retval < line_size) { pr_info("Done: exit %s() since no more bytes to read from Audio file\n", __func__); break; } } if (i > 0) dev->_audioframe_count++; if (vfs_read_retval < line_size) break; } dev->_audiofile_status = (vfs_read_retval == line_size) ? IN_PROGRESS : END_OF_FILE; set_fs(old_fs); myfile->f_pos = 0; filp_close(myfile, NULL); } return 0; } static int cx25821_audio_upstream_buffer_prepare(struct cx25821_dev *dev, struct sram_channel *sram_ch, int bpl) { int ret = 0; dma_addr_t dma_addr; dma_addr_t data_dma_addr; cx25821_free_memory_audio(dev); dev->_risc_virt_addr = pci_alloc_consistent(dev->pci, dev->audio_upstream_riscbuf_size, &dma_addr); dev->_risc_virt_start_addr = dev->_risc_virt_addr; dev->_risc_phys_start_addr = dma_addr; dev->_risc_phys_addr = dma_addr; dev->_audiorisc_size = dev->audio_upstream_riscbuf_size; if (!dev->_risc_virt_addr) { printk(KERN_DEBUG pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for RISC program! Returning\n")); return -ENOMEM; } /* Clear out memory at address */ memset(dev->_risc_virt_addr, 0, dev->_audiorisc_size); /* For Audio Data buffer allocation */ dev->_audiodata_buf_virt_addr = pci_alloc_consistent(dev->pci, dev->audio_upstream_databuf_size, &data_dma_addr); dev->_audiodata_buf_phys_addr = data_dma_addr; dev->_audiodata_buf_size = dev->audio_upstream_databuf_size; if (!dev->_audiodata_buf_virt_addr) { printk(KERN_DEBUG pr_fmt("ERROR: pci_alloc_consistent() FAILED to allocate memory for data buffer! Returning\n")); return -ENOMEM; } /* Clear out memory at address */ memset(dev->_audiodata_buf_virt_addr, 0, dev->_audiodata_buf_size); ret = cx25821_openfile_audio(dev, sram_ch); if (ret < 0) return ret; /* Creating RISC programs */ ret = cx25821_risc_buffer_upstream_audio(dev, dev->pci, bpl, dev->_audio_lines_count); if (ret < 0) { printk(KERN_DEBUG pr_fmt("ERROR creating audio upstream RISC programs!\n")); goto error; } return 0; error: return ret; } int cx25821_audio_upstream_irq(struct cx25821_dev *dev, int chan_num, u32 status) { int i = 0; u32 int_msk_tmp; struct sram_channel *channel = dev->channels[chan_num].sram_channels; dma_addr_t risc_phys_jump_addr; __le32 *rp; if (status & FLD_AUD_SRC_RISCI1) { /* Get interrupt_index of the program that interrupted */ u32 prog_cnt = cx_read(channel->gpcnt); /* Since we've identified our IRQ, clear our bits from the * interrupt mask and interrupt status registers */ cx_write(channel->int_msk, 0); cx_write(channel->int_stat, cx_read(channel->int_stat)); spin_lock(&dev->slock); while (prog_cnt != dev->_last_index_irq) { /* Update _last_index_irq */ if (dev->_last_index_irq < (NUMBER_OF_PROGRAMS - 1)) dev->_last_index_irq++; else dev->_last_index_irq = 0; dev->_audioframe_index = dev->_last_index_irq; queue_work(dev->_irq_audio_queues, &dev->_audio_work_entry); } if (dev->_is_first_audio_frame) { dev->_is_first_audio_frame = 0; if (dev->_risc_virt_start_addr != NULL) { risc_phys_jump_addr = dev->_risc_phys_start_addr + RISC_SYNC_INSTRUCTION_SIZE + AUDIO_RISC_DMA_BUF_SIZE; rp = cx25821_risc_field_upstream_audio(dev, dev->_risc_virt_start_addr + 1, dev->_audiodata_buf_phys_addr, AUDIO_LINE_SIZE, FIFO_DISABLE); if (USE_RISC_NOOP_AUDIO) { for (i = 0; i < NUM_NO_OPS; i++) { *(rp++) = cpu_to_le32(RISC_NOOP); } } /* Jump to 2nd Audio Frame */ *(rp++) = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_RESET); *(rp++) = cpu_to_le32(risc_phys_jump_addr); *(rp++) = cpu_to_le32(0); } } spin_unlock(&dev->slock); } else { if (status & FLD_AUD_SRC_OF) pr_warn("%s(): Audio Received Overflow Error Interrupt!\n", __func__); if (status & FLD_AUD_SRC_SYNC) pr_warn("%s(): Audio Received Sync Error Interrupt!\n", __func__); if (status & FLD_AUD_SRC_OPC_ERR) pr_warn("%s(): Audio Received OpCode Error Interrupt!\n", __func__); /* Read and write back the interrupt status register to clear * our bits */ cx_write(channel->int_stat, cx_read(channel->int_stat)); } if (dev->_audiofile_status == END_OF_FILE) { pr_warn("EOF Channel Audio Framecount = %d\n", dev->_audioframe_count); return -1; } /* ElSE, set the interrupt mask register, re-enable irq. */ int_msk_tmp = cx_read(channel->int_msk); cx_write(channel->int_msk, int_msk_tmp |= _intr_msk); return 0; } static irqreturn_t cx25821_upstream_irq_audio(int irq, void *dev_id) { struct cx25821_dev *dev = dev_id; u32 msk_stat, audio_status; int handled = 0; struct sram_channel *sram_ch; if (!dev) return -1; sram_ch = dev->channels[dev->_audio_upstream_channel].sram_channels; msk_stat = cx_read(sram_ch->int_mstat); audio_status = cx_read(sram_ch->int_stat); /* Only deal with our interrupt */ if (audio_status) { handled = cx25821_audio_upstream_irq(dev, dev->_audio_upstream_channel, audio_status); } if (handled < 0) cx25821_stop_upstream_audio(dev); else handled += handled; return IRQ_RETVAL(handled); } static void cx25821_wait_fifo_enable(struct cx25821_dev *dev, struct sram_channel *sram_ch) { int count = 0; u32 tmp; do { /* Wait 10 microsecond before checking to see if the FIFO is * turned ON. */ udelay(10); tmp = cx_read(sram_ch->dma_ctl); /* 10 millisecond timeout */ if (count++ > 1000) { pr_err("ERROR: %s() fifo is NOT turned on. Timeout!\n", __func__); return; } } while (!(tmp & sram_ch->fld_aud_fifo_en)); } int cx25821_start_audio_dma_upstream(struct cx25821_dev *dev, struct sram_channel *sram_ch) { u32 tmp = 0; int err = 0; /* Set the physical start address of the RISC program in the initial * program counter(IPC) member of the CMDS. */ cx_write(sram_ch->cmds_start + 0, dev->_risc_phys_addr); /* Risc IPC High 64 bits 63-32 */ cx_write(sram_ch->cmds_start + 4, 0); /* reset counter */ cx_write(sram_ch->gpcnt_ctl, 3); /* Set the line length (It looks like we do not need to set the * line length) */ cx_write(sram_ch->aud_length, AUDIO_LINE_SIZE & FLD_AUD_DST_LN_LNGTH); /* Set the input mode to 16-bit */ tmp = cx_read(sram_ch->aud_cfg); tmp |= FLD_AUD_SRC_ENABLE | FLD_AUD_DST_PK_MODE | FLD_AUD_CLK_ENABLE | FLD_AUD_MASTER_MODE | FLD_AUD_CLK_SELECT_PLL_D | FLD_AUD_SONY_MODE; cx_write(sram_ch->aud_cfg, tmp); /* Read and write back the interrupt status register to clear it */ tmp = cx_read(sram_ch->int_stat); cx_write(sram_ch->int_stat, tmp); /* Clear our bits from the interrupt status register. */ cx_write(sram_ch->int_stat, _intr_msk); /* Set the interrupt mask register, enable irq. */ cx_set(PCI_INT_MSK, cx_read(PCI_INT_MSK) | (1 << sram_ch->irq_bit)); tmp = cx_read(sram_ch->int_msk); cx_write(sram_ch->int_msk, tmp |= _intr_msk); err = request_irq(dev->pci->irq, cx25821_upstream_irq_audio, IRQF_SHARED, dev->name, dev); if (err < 0) { pr_err("%s: can't get upstream IRQ %d\n", dev->name, dev->pci->irq); goto fail_irq; } /* Start the DMA engine */ tmp = cx_read(sram_ch->dma_ctl); cx_set(sram_ch->dma_ctl, tmp | sram_ch->fld_aud_risc_en); dev->_audio_is_running = 1; dev->_is_first_audio_frame = 1; /* The fifo_en bit turns on by the first Risc program */ cx25821_wait_fifo_enable(dev, sram_ch); return 0; fail_irq: cx25821_dev_unregister(dev); return err; } int cx25821_audio_upstream_init(struct cx25821_dev *dev, int channel_select) { struct sram_channel *sram_ch; int retval = 0; int err = 0; int str_length = 0; if (dev->_audio_is_running) { pr_warn("Audio Channel is still running so return!\n"); return 0; } dev->_audio_upstream_channel = channel_select; sram_ch = dev->channels[channel_select].sram_channels; /* Work queue */ INIT_WORK(&dev->_audio_work_entry, cx25821_audioups_handler); dev->_irq_audio_queues = create_singlethread_workqueue("cx25821_audioworkqueue"); if (!dev->_irq_audio_queues) { printk(KERN_DEBUG pr_fmt("ERROR: create_singlethread_workqueue() for Audio FAILED!\n")); return -ENOMEM; } dev->_last_index_irq = 0; dev->_audio_is_running = 0; dev->_audioframe_count = 0; dev->_audiofile_status = RESET_STATUS; dev->_audio_lines_count = LINES_PER_AUDIO_BUFFER; _line_size = AUDIO_LINE_SIZE; if (dev->input_audiofilename) { str_length = strlen(dev->input_audiofilename); dev->_audiofilename = kmemdup(dev->input_audiofilename, str_length + 1, GFP_KERNEL); if (!dev->_audiofilename) goto error; /* Default if filename is empty string */ if (strcmp(dev->input_audiofilename, "") == 0) dev->_audiofilename = "/root/audioGOOD.wav"; } else { str_length = strlen(_defaultAudioName); dev->_audiofilename = kmemdup(_defaultAudioName, str_length + 1, GFP_KERNEL); if (!dev->_audiofilename) goto error; } retval = cx25821_sram_channel_setup_upstream_audio(dev, sram_ch, _line_size, 0); dev->audio_upstream_riscbuf_size = AUDIO_RISC_DMA_BUF_SIZE * NUM_AUDIO_PROGS + RISC_SYNC_INSTRUCTION_SIZE; dev->audio_upstream_databuf_size = AUDIO_DATA_BUF_SZ * NUM_AUDIO_PROGS; /* Allocating buffers and prepare RISC program */ retval = cx25821_audio_upstream_buffer_prepare(dev, sram_ch, _line_size); if (retval < 0) { pr_err("%s: Failed to set up Audio upstream buffers!\n", dev->name); goto error; } /* Start RISC engine */ cx25821_start_audio_dma_upstream(dev, sram_ch); return 0; error: cx25821_dev_unregister(dev); return err; }
gpl-2.0
ChronoMonochrome/android_kernel_ste-3.4
drivers/mtd/maps/ixp4xx.c
5158
6517
/* * drivers/mtd/maps/ixp4xx.c * * MTD Map file for IXP4XX based systems. Please do not make per-board * changes in here. If your board needs special setup, do it in your * platform level code in arch/arm/mach-ixp4xx/board-setup.c * * Original Author: Intel Corporation * Maintainer: Deepak Saxena <dsaxena@mvista.com> * * Copyright (C) 2002 Intel Corporation * Copyright (C) 2003-2004 MontaVista Software, Inc. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <asm/io.h> #include <asm/mach/flash.h> #include <linux/reboot.h> /* * Read/write a 16 bit word from flash address 'addr'. * * When the cpu is in little-endian mode it swizzles the address lines * ('address coherency') so we need to undo the swizzling to ensure commands * and the like end up on the correct flash address. * * To further complicate matters, due to the way the expansion bus controller * handles 32 bit reads, the byte stream ABCD is stored on the flash as: * D15 D0 * +---+---+ * | A | B | 0 * +---+---+ * | C | D | 2 * +---+---+ * This means that on LE systems each 16 bit word must be swapped. Note that * this requires CONFIG_MTD_CFI_BE_BYTE_SWAP to be enabled to 'unswap' the CFI * data and other flash commands which are always in D7-D0. */ #ifndef __ARMEB__ #ifndef CONFIG_MTD_CFI_BE_BYTE_SWAP # error CONFIG_MTD_CFI_BE_BYTE_SWAP required #endif static inline u16 flash_read16(void __iomem *addr) { return be16_to_cpu(__raw_readw((void __iomem *)((unsigned long)addr ^ 0x2))); } static inline void flash_write16(u16 d, void __iomem *addr) { __raw_writew(cpu_to_be16(d), (void __iomem *)((unsigned long)addr ^ 0x2)); } #define BYTE0(h) ((h) & 0xFF) #define BYTE1(h) (((h) >> 8) & 0xFF) #else static inline u16 flash_read16(const void __iomem *addr) { return __raw_readw(addr); } static inline void flash_write16(u16 d, void __iomem *addr) { __raw_writew(d, addr); } #define BYTE0(h) (((h) >> 8) & 0xFF) #define BYTE1(h) ((h) & 0xFF) #endif static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs) { map_word val; val.x[0] = flash_read16(map->virt + ofs); return val; } /* * The IXP4xx expansion bus only allows 16-bit wide acceses * when attached to a 16-bit wide device (such as the 28F128J3A), * so we can't just memcpy_fromio(). */ static void ixp4xx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { u8 *dest = (u8 *) to; void __iomem *src = map->virt + from; if (len <= 0) return; if (from & 1) { *dest++ = BYTE1(flash_read16(src-1)); src++; --len; } while (len >= 2) { u16 data = flash_read16(src); *dest++ = BYTE0(data); *dest++ = BYTE1(data); src += 2; len -= 2; } if (len > 0) *dest++ = BYTE0(flash_read16(src)); } /* * Unaligned writes are ignored, causing the 8-bit * probe to fail and proceed to the 16-bit probe (which succeeds). */ static void ixp4xx_probe_write16(struct map_info *map, map_word d, unsigned long adr) { if (!(adr & 1)) flash_write16(d.x[0], map->virt + adr); } /* * Fast write16 function without the probing check above */ static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr) { flash_write16(d.x[0], map->virt + adr); } struct ixp4xx_flash_info { struct mtd_info *mtd; struct map_info map; struct resource *res; }; static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; static int ixp4xx_flash_remove(struct platform_device *dev) { struct flash_platform_data *plat = dev->dev.platform_data; struct ixp4xx_flash_info *info = platform_get_drvdata(dev); platform_set_drvdata(dev, NULL); if(!info) return 0; if (info->mtd) { mtd_device_unregister(info->mtd); map_destroy(info->mtd); } if (info->map.virt) iounmap(info->map.virt); if (info->res) { release_resource(info->res); kfree(info->res); } if (plat->exit) plat->exit(); return 0; } static int ixp4xx_flash_probe(struct platform_device *dev) { struct flash_platform_data *plat = dev->dev.platform_data; struct ixp4xx_flash_info *info; struct mtd_part_parser_data ppdata = { .origin = dev->resource->start, }; int err = -1; if (!plat) return -ENODEV; if (plat->init) { err = plat->init(); if (err) return err; } info = kzalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL); if(!info) { err = -ENOMEM; goto Error; } platform_set_drvdata(dev, info); /* * Tell the MTD layer we're not 1:1 mapped so that it does * not attempt to do a direct access on us. */ info->map.phys = NO_XIP; info->map.size = resource_size(dev->resource); /* * We only support 16-bit accesses for now. If and when * any board use 8-bit access, we'll fixup the driver to * handle that. */ info->map.bankwidth = 2; info->map.name = dev_name(&dev->dev); info->map.read = ixp4xx_read16; info->map.write = ixp4xx_probe_write16; info->map.copy_from = ixp4xx_copy_from; info->res = request_mem_region(dev->resource->start, resource_size(dev->resource), "IXP4XXFlash"); if (!info->res) { printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n"); err = -ENOMEM; goto Error; } info->map.virt = ioremap(dev->resource->start, resource_size(dev->resource)); if (!info->map.virt) { printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n"); err = -EIO; goto Error; } info->mtd = do_map_probe(plat->map_name, &info->map); if (!info->mtd) { printk(KERN_ERR "IXP4XXFlash: map_probe failed\n"); err = -ENXIO; goto Error; } info->mtd->owner = THIS_MODULE; /* Use the fast version */ info->map.write = ixp4xx_write16; err = mtd_device_parse_register(info->mtd, probes, &ppdata, plat->parts, plat->nr_parts); if (err) { printk(KERN_ERR "Could not parse partitions\n"); goto Error; } return 0; Error: ixp4xx_flash_remove(dev); return err; } static struct platform_driver ixp4xx_flash_driver = { .probe = ixp4xx_flash_probe, .remove = ixp4xx_flash_remove, .driver = { .name = "IXP4XX-Flash", .owner = THIS_MODULE, }, }; module_platform_driver(ixp4xx_flash_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems"); MODULE_AUTHOR("Deepak Saxena"); MODULE_ALIAS("platform:IXP4XX-Flash");
gpl-2.0
MyAOSP/kernel_asus_tf201
sound/core/seq/seq_compat.c
13094
4715
/* * 32bit -> 64bit ioctl wrapper for sequencer API * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* This file included from seq.c */ #include <linux/compat.h> #include <linux/slab.h> struct snd_seq_port_info32 { struct snd_seq_addr addr; /* client/port numbers */ char name[64]; /* port name */ u32 capability; /* port capability bits */ u32 type; /* port type bits */ s32 midi_channels; /* channels per MIDI port */ s32 midi_voices; /* voices per MIDI port */ s32 synth_voices; /* voices per SYNTH port */ s32 read_use; /* R/O: subscribers for output (from this port) */ s32 write_use; /* R/O: subscribers for input (to this port) */ u32 kernel; /* reserved for kernel use (must be NULL) */ u32 flags; /* misc. conditioning */ unsigned char time_queue; /* queue # for timestamping */ char reserved[59]; /* for future use */ }; static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned int cmd, struct snd_seq_port_info32 __user *data32) { int err = -EFAULT; struct snd_seq_port_info *data; mm_segment_t fs; data = memdup_user(data32, sizeof(*data32)); if (IS_ERR(data)) return PTR_ERR(data); if (get_user(data->flags, &data32->flags) || get_user(data->time_queue, &data32->time_queue)) goto error; data->kernel = NULL; fs = snd_enter_user(); err = snd_seq_do_ioctl(client, cmd, data); snd_leave_user(fs); if (err < 0) goto error; if (copy_to_user(data32, data, sizeof(*data32)) || put_user(data->flags, &data32->flags) || put_user(data->time_queue, &data32->time_queue)) err = -EFAULT; error: kfree(data); return err; } /* */ enum { SNDRV_SEQ_IOCTL_CREATE_PORT32 = _IOWR('S', 0x20, struct snd_seq_port_info32), SNDRV_SEQ_IOCTL_DELETE_PORT32 = _IOW ('S', 0x21, struct snd_seq_port_info32), SNDRV_SEQ_IOCTL_GET_PORT_INFO32 = _IOWR('S', 0x22, struct snd_seq_port_info32), SNDRV_SEQ_IOCTL_SET_PORT_INFO32 = _IOW ('S', 0x23, struct snd_seq_port_info32), SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT32 = _IOWR('S', 0x52, struct snd_seq_port_info32), }; static long snd_seq_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_seq_client *client = file->private_data; void __user *argp = compat_ptr(arg); if (snd_BUG_ON(!client)) return -ENXIO; switch (cmd) { case SNDRV_SEQ_IOCTL_PVERSION: case SNDRV_SEQ_IOCTL_CLIENT_ID: case SNDRV_SEQ_IOCTL_SYSTEM_INFO: case SNDRV_SEQ_IOCTL_GET_CLIENT_INFO: case SNDRV_SEQ_IOCTL_SET_CLIENT_INFO: case SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT: case SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT: case SNDRV_SEQ_IOCTL_CREATE_QUEUE: case SNDRV_SEQ_IOCTL_DELETE_QUEUE: case SNDRV_SEQ_IOCTL_GET_QUEUE_INFO: case SNDRV_SEQ_IOCTL_SET_QUEUE_INFO: case SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE: case SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS: case SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO: case SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO: case SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER: case SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER: case SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT: case SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT: case SNDRV_SEQ_IOCTL_GET_CLIENT_POOL: case SNDRV_SEQ_IOCTL_SET_CLIENT_POOL: case SNDRV_SEQ_IOCTL_REMOVE_EVENTS: case SNDRV_SEQ_IOCTL_QUERY_SUBS: case SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION: case SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT: case SNDRV_SEQ_IOCTL_RUNNING_MODE: return snd_seq_do_ioctl(client, cmd, argp); case SNDRV_SEQ_IOCTL_CREATE_PORT32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_CREATE_PORT, argp); case SNDRV_SEQ_IOCTL_DELETE_PORT32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_DELETE_PORT, argp); case SNDRV_SEQ_IOCTL_GET_PORT_INFO32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_GET_PORT_INFO, argp); case SNDRV_SEQ_IOCTL_SET_PORT_INFO32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_SET_PORT_INFO, argp); case SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT32: return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, argp); } return -ENOIOCTLCMD; }
gpl-2.0
lnfamous/Kernel_Stock_Pico
drivers/media/dvb/firewire/firedtv-fw.c
295
7904
/* * FireDTV driver -- firewire I/O backend */ #include <linux/device.h> #include <linux/errno.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/types.h> #include <asm/page.h> #include <dvb_demux.h> #include "firedtv.h" static LIST_HEAD(node_list); static DEFINE_SPINLOCK(node_list_lock); static inline struct fw_device *device_of(struct firedtv *fdtv) { return fw_device(fdtv->device->parent); } static int node_req(struct firedtv *fdtv, u64 addr, void *data, size_t len, int tcode) { struct fw_device *device = device_of(fdtv); int rcode, generation = device->generation; smp_rmb(); /* node_id vs. generation */ rcode = fw_run_transaction(device->card, tcode, device->node_id, generation, device->max_speed, addr, data, len); return rcode != RCODE_COMPLETE ? -EIO : 0; } static int node_lock(struct firedtv *fdtv, u64 addr, void *data) { return node_req(fdtv, addr, data, 8, TCODE_LOCK_COMPARE_SWAP); } static int node_read(struct firedtv *fdtv, u64 addr, void *data) { return node_req(fdtv, addr, data, 4, TCODE_READ_QUADLET_REQUEST); } static int node_write(struct firedtv *fdtv, u64 addr, void *data, size_t len) { return node_req(fdtv, addr, data, len, TCODE_WRITE_BLOCK_REQUEST); } #define ISO_HEADER_SIZE 4 #define CIP_HEADER_SIZE 8 #define MPEG2_TS_HEADER_SIZE 4 #define MPEG2_TS_SOURCE_PACKET_SIZE (4 + 188) #define MAX_PACKET_SIZE 1024 /* 776, rounded up to 2^n */ #define PACKETS_PER_PAGE (PAGE_SIZE / MAX_PACKET_SIZE) #define N_PACKETS 64 /* buffer size */ #define N_PAGES DIV_ROUND_UP(N_PACKETS, PACKETS_PER_PAGE) #define IRQ_INTERVAL 16 struct firedtv_receive_context { struct fw_iso_context *context; struct fw_iso_buffer buffer; int interrupt_packet; int current_packet; char *pages[N_PAGES]; }; static int queue_iso(struct firedtv_receive_context *ctx, int index) { struct fw_iso_packet p; p.payload_length = MAX_PACKET_SIZE; p.interrupt = !(++ctx->interrupt_packet & (IRQ_INTERVAL - 1)); p.skip = 0; p.header_length = ISO_HEADER_SIZE; return fw_iso_context_queue(ctx->context, &p, &ctx->buffer, index * MAX_PACKET_SIZE); } static void handle_iso(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data) { struct firedtv *fdtv = data; struct firedtv_receive_context *ctx = fdtv->backend_data; __be32 *h, *h_end; int length, err, i = ctx->current_packet; char *p, *p_end; for (h = header, h_end = h + header_length / 4; h < h_end; h++) { length = be32_to_cpup(h) >> 16; if (unlikely(length > MAX_PACKET_SIZE)) { dev_err(fdtv->device, "length = %d\n", length); length = MAX_PACKET_SIZE; } p = ctx->pages[i / PACKETS_PER_PAGE] + (i % PACKETS_PER_PAGE) * MAX_PACKET_SIZE; p_end = p + length; for (p += CIP_HEADER_SIZE + MPEG2_TS_HEADER_SIZE; p < p_end; p += MPEG2_TS_SOURCE_PACKET_SIZE) dvb_dmx_swfilter_packets(&fdtv->demux, p, 1); err = queue_iso(ctx, i); if (unlikely(err)) dev_err(fdtv->device, "requeue failed\n"); i = (i + 1) & (N_PACKETS - 1); } ctx->current_packet = i; } static int start_iso(struct firedtv *fdtv) { struct firedtv_receive_context *ctx; struct fw_device *device = device_of(fdtv); int i, err; ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->context = fw_iso_context_create(device->card, FW_ISO_CONTEXT_RECEIVE, fdtv->isochannel, device->max_speed, ISO_HEADER_SIZE, handle_iso, fdtv); if (IS_ERR(ctx->context)) { err = PTR_ERR(ctx->context); goto fail_free; } err = fw_iso_buffer_init(&ctx->buffer, device->card, N_PAGES, DMA_FROM_DEVICE); if (err) goto fail_context_destroy; ctx->interrupt_packet = 0; ctx->current_packet = 0; for (i = 0; i < N_PAGES; i++) ctx->pages[i] = page_address(ctx->buffer.pages[i]); for (i = 0; i < N_PACKETS; i++) { err = queue_iso(ctx, i); if (err) goto fail; } err = fw_iso_context_start(ctx->context, -1, 0, FW_ISO_CONTEXT_MATCH_ALL_TAGS); if (err) goto fail; fdtv->backend_data = ctx; return 0; fail: fw_iso_buffer_destroy(&ctx->buffer, device->card); fail_context_destroy: fw_iso_context_destroy(ctx->context); fail_free: kfree(ctx); return err; } static void stop_iso(struct firedtv *fdtv) { struct firedtv_receive_context *ctx = fdtv->backend_data; fw_iso_context_stop(ctx->context); fw_iso_buffer_destroy(&ctx->buffer, device_of(fdtv)->card); fw_iso_context_destroy(ctx->context); kfree(ctx); } static const struct firedtv_backend backend = { .lock = node_lock, .read = node_read, .write = node_write, .start_iso = start_iso, .stop_iso = stop_iso, }; static void handle_fcp(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation, unsigned long long offset, void *payload, size_t length, void *callback_data) { struct firedtv *f, *fdtv = NULL; struct fw_device *device; unsigned long flags; int su; if (length < 2 || (((u8 *)payload)[0] & 0xf0) != 0) return; su = ((u8 *)payload)[1] & 0x7; spin_lock_irqsave(&node_list_lock, flags); list_for_each_entry(f, &node_list, list) { device = device_of(f); if (device->generation != generation) continue; smp_rmb(); /* node_id vs. generation */ if (device->card == card && device->node_id == source && (f->subunit == su || (f->subunit == 0 && su == 0x7))) { fdtv = f; break; } } spin_unlock_irqrestore(&node_list_lock, flags); if (fdtv) avc_recv(fdtv, payload, length); } static struct fw_address_handler fcp_handler = { .length = CSR_FCP_END - CSR_FCP_RESPONSE, .address_callback = handle_fcp, }; static const struct fw_address_region fcp_region = { .start = CSR_REGISTER_BASE + CSR_FCP_RESPONSE, .end = CSR_REGISTER_BASE + CSR_FCP_END, }; /* Adjust the template string if models with longer names appear. */ #define MAX_MODEL_NAME_LEN sizeof("FireDTV ????") static int node_probe(struct device *dev) { struct firedtv *fdtv; char name[MAX_MODEL_NAME_LEN]; int name_len, err; name_len = fw_csr_string(fw_unit(dev)->directory, CSR_MODEL, name, sizeof(name)); fdtv = fdtv_alloc(dev, &backend, name, name_len >= 0 ? name_len : 0); if (!fdtv) return -ENOMEM; err = fdtv_register_rc(fdtv, dev); if (err) goto fail_free; spin_lock_irq(&node_list_lock); list_add_tail(&fdtv->list, &node_list); spin_unlock_irq(&node_list_lock); err = avc_identify_subunit(fdtv); if (err) goto fail; err = fdtv_dvb_register(fdtv); if (err) goto fail; avc_register_remote_control(fdtv); return 0; fail: spin_lock_irq(&node_list_lock); list_del(&fdtv->list); spin_unlock_irq(&node_list_lock); fdtv_unregister_rc(fdtv); fail_free: kfree(fdtv); return err; } static int node_remove(struct device *dev) { struct firedtv *fdtv = dev_get_drvdata(dev); fdtv_dvb_unregister(fdtv); spin_lock_irq(&node_list_lock); list_del(&fdtv->list); spin_unlock_irq(&node_list_lock); fdtv_unregister_rc(fdtv); kfree(fdtv); return 0; } static void node_update(struct fw_unit *unit) { struct firedtv *fdtv = dev_get_drvdata(&unit->device); if (fdtv->isochannel >= 0) cmp_establish_pp_connection(fdtv, fdtv->subunit, fdtv->isochannel); } static struct fw_driver fdtv_driver = { .driver = { .owner = THIS_MODULE, .name = "firedtv", .bus = &fw_bus_type, .probe = node_probe, .remove = node_remove, }, .update = node_update, .id_table = fdtv_id_table, }; int __init fdtv_fw_init(void) { int ret; ret = fw_core_add_address_handler(&fcp_handler, &fcp_region); if (ret < 0) return ret; return driver_register(&fdtv_driver.driver); } void fdtv_fw_exit(void) { driver_unregister(&fdtv_driver.driver); fw_core_remove_address_handler(&fcp_handler); }
gpl-2.0
longman88/kernel-qspinlock-v10
drivers/reset/reset-sunxi.c
295
4138
/* * Allwinner SoCs Reset Controller driver * * Copyright 2013 Maxime Ripard * * Maxime Ripard <maxime.ripard@free-electrons.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/reset-controller.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/types.h> struct sunxi_reset_data { spinlock_t lock; void __iomem *membase; struct reset_controller_dev rcdev; }; static int sunxi_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) { struct sunxi_reset_data *data = container_of(rcdev, struct sunxi_reset_data, rcdev); int bank = id / BITS_PER_LONG; int offset = id % BITS_PER_LONG; unsigned long flags; u32 reg; spin_lock_irqsave(&data->lock, flags); reg = readl(data->membase + (bank * 4)); writel(reg & ~BIT(offset), data->membase + (bank * 4)); spin_unlock_irqrestore(&data->lock, flags); return 0; } static int sunxi_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) { struct sunxi_reset_data *data = container_of(rcdev, struct sunxi_reset_data, rcdev); int bank = id / BITS_PER_LONG; int offset = id % BITS_PER_LONG; unsigned long flags; u32 reg; spin_lock_irqsave(&data->lock, flags); reg = readl(data->membase + (bank * 4)); writel(reg | BIT(offset), data->membase + (bank * 4)); spin_unlock_irqrestore(&data->lock, flags); return 0; } static struct reset_control_ops sunxi_reset_ops = { .assert = sunxi_reset_assert, .deassert = sunxi_reset_deassert, }; static int sunxi_reset_init(struct device_node *np) { struct sunxi_reset_data *data; struct resource res; resource_size_t size; int ret; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; ret = of_address_to_resource(np, 0, &res); if (ret) goto err_alloc; size = resource_size(&res); if (!request_mem_region(res.start, size, np->name)) { ret = -EBUSY; goto err_alloc; } data->membase = ioremap(res.start, size); if (!data->membase) { ret = -ENOMEM; goto err_alloc; } data->rcdev.owner = THIS_MODULE; data->rcdev.nr_resets = size * 32; data->rcdev.ops = &sunxi_reset_ops; data->rcdev.of_node = np; reset_controller_register(&data->rcdev); return 0; err_alloc: kfree(data); return ret; }; /* * These are the reset controller we need to initialize early on in * our system, before we can even think of using a regular device * driver for it. */ static const struct of_device_id sunxi_early_reset_dt_ids[] __initdata = { { .compatible = "allwinner,sun6i-a31-ahb1-reset", }, { /* sentinel */ }, }; void __init sun6i_reset_init(void) { struct device_node *np; for_each_matching_node(np, sunxi_early_reset_dt_ids) sunxi_reset_init(np); } /* * And these are the controllers we can register through the regular * device model. */ static const struct of_device_id sunxi_reset_dt_ids[] = { { .compatible = "allwinner,sun6i-a31-clock-reset", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids); static int sunxi_reset_probe(struct platform_device *pdev) { return sunxi_reset_init(pdev->dev.of_node); } static int sunxi_reset_remove(struct platform_device *pdev) { struct sunxi_reset_data *data = platform_get_drvdata(pdev); reset_controller_unregister(&data->rcdev); iounmap(data->membase); kfree(data); return 0; } static struct platform_driver sunxi_reset_driver = { .probe = sunxi_reset_probe, .remove = sunxi_reset_remove, .driver = { .name = "sunxi-reset", .owner = THIS_MODULE, .of_match_table = sunxi_reset_dt_ids, }, }; module_platform_driver(sunxi_reset_driver); MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); MODULE_DESCRIPTION("Allwinner SoCs Reset Controller Driver"); MODULE_LICENSE("GPL");
gpl-2.0
Jaykay-x/Mini2440_BH1750fvi
drivers/regulator/twl4030-regulator.c
551
13177
/* * twl4030-regulator.c -- support regulators in twl4030 family chips * * Copyright (C) 2008 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/i2c/twl4030.h> /* * The TWL4030/TW5030/TPS659x0 family chips include power management, a * USB OTG transceiver, an RTC, ADC, PWM, and lots more. Some versions * include an audio codec, battery charger, and more voltage regulators. * These chips are often used in OMAP-based systems. * * This driver implements software-based resource control for various * voltage regulators. This is usually augmented with state machine * based control. */ struct twlreg_info { /* start of regulator's PM_RECEIVER control register bank */ u8 base; /* twl4030 resource ID, for resource control state machine */ u8 id; /* voltage in mV = table[VSEL]; table_len must be a power-of-two */ u8 table_len; const u16 *table; /* chip constraints on regulator behavior */ u16 min_mV; /* used by regulator core */ struct regulator_desc desc; }; /* LDO control registers ... offset is from the base of its register bank. * The first three registers of all power resource banks help hardware to * manage the various resource groups. */ #define VREG_GRP 0 #define VREG_TYPE 1 #define VREG_REMAP 2 #define VREG_DEDICATED 3 /* LDO control */ static inline int twl4030reg_read(struct twlreg_info *info, unsigned offset) { u8 value; int status; status = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &value, info->base + offset); return (status < 0) ? status : value; } static inline int twl4030reg_write(struct twlreg_info *info, unsigned offset, u8 value) { return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, value, info->base + offset); } /*----------------------------------------------------------------------*/ /* generic power resource operations, which work on all regulators */ static int twl4030reg_grp(struct regulator_dev *rdev) { return twl4030reg_read(rdev_get_drvdata(rdev), VREG_GRP); } /* * Enable/disable regulators by joining/leaving the P1 (processor) group. * We assume nobody else is updating the DEV_GRP registers. */ #define P3_GRP BIT(7) /* "peripherals" */ #define P2_GRP BIT(6) /* secondary processor, modem, etc */ #define P1_GRP BIT(5) /* CPU/Linux */ static int twl4030reg_is_enabled(struct regulator_dev *rdev) { int state = twl4030reg_grp(rdev); if (state < 0) return state; return (state & P1_GRP) != 0; } static int twl4030reg_enable(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); int grp; grp = twl4030reg_read(info, VREG_GRP); if (grp < 0) return grp; grp |= P1_GRP; return twl4030reg_write(info, VREG_GRP, grp); } static int twl4030reg_disable(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); int grp; grp = twl4030reg_read(info, VREG_GRP); if (grp < 0) return grp; grp &= ~P1_GRP; return twl4030reg_write(info, VREG_GRP, grp); } static int twl4030reg_get_status(struct regulator_dev *rdev) { int state = twl4030reg_grp(rdev); if (state < 0) return state; state &= 0x0f; /* assume state != WARM_RESET; we'd not be running... */ if (!state) return REGULATOR_STATUS_OFF; return (state & BIT(3)) ? REGULATOR_STATUS_NORMAL : REGULATOR_STATUS_STANDBY; } static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode) { struct twlreg_info *info = rdev_get_drvdata(rdev); unsigned message; int status; /* We can only set the mode through state machine commands... */ switch (mode) { case REGULATOR_MODE_NORMAL: message = MSG_SINGULAR(DEV_GRP_P1, info->id, RES_STATE_ACTIVE); break; case REGULATOR_MODE_STANDBY: message = MSG_SINGULAR(DEV_GRP_P1, info->id, RES_STATE_SLEEP); break; default: return -EINVAL; } /* Ensure the resource is associated with some group */ status = twl4030reg_grp(rdev); if (status < 0) return status; if (!(status & (P3_GRP | P2_GRP | P1_GRP))) return -EACCES; status = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, message >> 8, 0x15 /* PB_WORD_MSB */ ); if (status >= 0) return status; return twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, message, 0x16 /* PB_WORD_LSB */ ); } /*----------------------------------------------------------------------*/ /* * Support for adjustable-voltage LDOs uses a four bit (or less) voltage * select field in its control register. We use tables indexed by VSEL * to record voltages in milliVolts. (Accuracy is about three percent.) * * Note that VSEL values for VAUX2 changed in twl5030 and newer silicon; * currently handled by listing two slightly different VAUX2 regulators, * only one of which will be configured. * * VSEL values documented as "TI cannot support these values" are flagged * in these tables as UNSUP() values; we normally won't assign them. * * VAUX3 at 3V is incorrectly listed in some TI manuals as unsupported. * TI are revising the twl5030/tps659x0 specs to support that 3.0V setting. */ #ifdef CONFIG_TWL4030_ALLOW_UNSUPPORTED #define UNSUP_MASK 0x0000 #else #define UNSUP_MASK 0x8000 #endif #define UNSUP(x) (UNSUP_MASK | (x)) #define IS_UNSUP(x) (UNSUP_MASK & (x)) #define LDO_MV(x) (~UNSUP_MASK & (x)) static const u16 VAUX1_VSEL_table[] = { UNSUP(1500), UNSUP(1800), 2500, 2800, 3000, 3000, 3000, 3000, }; static const u16 VAUX2_4030_VSEL_table[] = { UNSUP(1000), UNSUP(1000), UNSUP(1200), 1300, 1500, 1800, UNSUP(1850), 2500, UNSUP(2600), 2800, UNSUP(2850), UNSUP(3000), UNSUP(3150), UNSUP(3150), UNSUP(3150), UNSUP(3150), }; static const u16 VAUX2_VSEL_table[] = { 1700, 1700, 1900, 1300, 1500, 1800, 2000, 2500, 2100, 2800, 2200, 2300, 2400, 2400, 2400, 2400, }; static const u16 VAUX3_VSEL_table[] = { 1500, 1800, 2500, 2800, 3000, 3000, 3000, 3000, }; static const u16 VAUX4_VSEL_table[] = { 700, 1000, 1200, UNSUP(1300), 1500, 1800, UNSUP(1850), 2500, UNSUP(2600), 2800, UNSUP(2850), UNSUP(3000), UNSUP(3150), UNSUP(3150), UNSUP(3150), UNSUP(3150), }; static const u16 VMMC1_VSEL_table[] = { 1850, 2850, 3000, 3150, }; static const u16 VMMC2_VSEL_table[] = { UNSUP(1000), UNSUP(1000), UNSUP(1200), UNSUP(1300), UNSUP(1500), UNSUP(1800), 1850, UNSUP(2500), 2600, 2800, 2850, 3000, 3150, 3150, 3150, 3150, }; static const u16 VPLL1_VSEL_table[] = { 1000, 1200, 1300, 1800, UNSUP(2800), UNSUP(3000), UNSUP(3000), UNSUP(3000), }; static const u16 VPLL2_VSEL_table[] = { 700, 1000, 1200, 1300, UNSUP(1500), 1800, UNSUP(1850), UNSUP(2500), UNSUP(2600), UNSUP(2800), UNSUP(2850), UNSUP(3000), UNSUP(3150), UNSUP(3150), UNSUP(3150), UNSUP(3150), }; static const u16 VSIM_VSEL_table[] = { UNSUP(1000), UNSUP(1200), UNSUP(1300), 1800, 2800, 3000, 3000, 3000, }; static const u16 VDAC_VSEL_table[] = { 1200, 1300, 1800, 1800, }; static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) { struct twlreg_info *info = rdev_get_drvdata(rdev); int mV = info->table[index]; return IS_UNSUP(mV) ? 0 : (LDO_MV(mV) * 1000); } static int twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { struct twlreg_info *info = rdev_get_drvdata(rdev); int vsel; for (vsel = 0; vsel < info->table_len; vsel++) { int mV = info->table[vsel]; int uV; if (IS_UNSUP(mV)) continue; uV = LDO_MV(mV) * 1000; /* REVISIT for VAUX2, first match may not be best/lowest */ /* use the first in-range value */ if (min_uV <= uV && uV <= max_uV) return twl4030reg_write(info, VREG_DEDICATED, vsel); } return -EDOM; } static int twl4030ldo_get_voltage(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); int vsel = twl4030reg_read(info, VREG_DEDICATED); if (vsel < 0) return vsel; vsel &= info->table_len - 1; return LDO_MV(info->table[vsel]) * 1000; } static struct regulator_ops twl4030ldo_ops = { .list_voltage = twl4030ldo_list_voltage, .set_voltage = twl4030ldo_set_voltage, .get_voltage = twl4030ldo_get_voltage, .enable = twl4030reg_enable, .disable = twl4030reg_disable, .is_enabled = twl4030reg_is_enabled, .set_mode = twl4030reg_set_mode, .get_status = twl4030reg_get_status, }; /*----------------------------------------------------------------------*/ /* * Fixed voltage LDOs don't have a VSEL field to update. */ static int twl4030fixed_list_voltage(struct regulator_dev *rdev, unsigned index) { struct twlreg_info *info = rdev_get_drvdata(rdev); return info->min_mV * 1000; } static int twl4030fixed_get_voltage(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); return info->min_mV * 1000; } static struct regulator_ops twl4030fixed_ops = { .list_voltage = twl4030fixed_list_voltage, .get_voltage = twl4030fixed_get_voltage, .enable = twl4030reg_enable, .disable = twl4030reg_disable, .is_enabled = twl4030reg_is_enabled, .set_mode = twl4030reg_set_mode, .get_status = twl4030reg_get_status, }; /*----------------------------------------------------------------------*/ #define TWL_ADJUSTABLE_LDO(label, offset, num) { \ .base = offset, \ .id = num, \ .table_len = ARRAY_SIZE(label##_VSEL_table), \ .table = label##_VSEL_table, \ .desc = { \ .name = #label, \ .id = TWL4030_REG_##label, \ .n_voltages = ARRAY_SIZE(label##_VSEL_table), \ .ops = &twl4030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ }, \ } #define TWL_FIXED_LDO(label, offset, mVolts, num) { \ .base = offset, \ .id = num, \ .min_mV = mVolts, \ .desc = { \ .name = #label, \ .id = TWL4030_REG_##label, \ .n_voltages = 1, \ .ops = &twl4030fixed_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ }, \ } /* * We list regulators here if systems need some level of * software control over them after boot. */ static struct twlreg_info twl4030_regs[] = { TWL_ADJUSTABLE_LDO(VAUX1, 0x17, 1), TWL_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2), TWL_ADJUSTABLE_LDO(VAUX2, 0x1b, 2), TWL_ADJUSTABLE_LDO(VAUX3, 0x1f, 3), TWL_ADJUSTABLE_LDO(VAUX4, 0x23, 4), TWL_ADJUSTABLE_LDO(VMMC1, 0x27, 5), TWL_ADJUSTABLE_LDO(VMMC2, 0x2b, 6), /* TWL_ADJUSTABLE_LDO(VPLL1, 0x2f, 7), */ TWL_ADJUSTABLE_LDO(VPLL2, 0x33, 8), TWL_ADJUSTABLE_LDO(VSIM, 0x37, 9), TWL_ADJUSTABLE_LDO(VDAC, 0x3b, 10), /* TWL_ADJUSTABLE_LDO(VINTANA1, 0x3f, 11), TWL_ADJUSTABLE_LDO(VINTANA2, 0x43, 12), TWL_ADJUSTABLE_LDO(VINTDIG, 0x47, 13), TWL_SMPS(VIO, 0x4b, 14), TWL_SMPS(VDD1, 0x55, 15), TWL_SMPS(VDD2, 0x63, 16), */ TWL_FIXED_LDO(VUSB1V5, 0x71, 1500, 17), TWL_FIXED_LDO(VUSB1V8, 0x74, 1800, 18), TWL_FIXED_LDO(VUSB3V1, 0x77, 3100, 19), /* VUSBCP is managed *only* by the USB subchip */ }; static int twl4030reg_probe(struct platform_device *pdev) { int i; struct twlreg_info *info; struct regulator_init_data *initdata; struct regulation_constraints *c; struct regulator_dev *rdev; for (i = 0, info = NULL; i < ARRAY_SIZE(twl4030_regs); i++) { if (twl4030_regs[i].desc.id != pdev->id) continue; info = twl4030_regs + i; break; } if (!info) return -ENODEV; initdata = pdev->dev.platform_data; if (!initdata) return -EINVAL; /* Constrain board-specific capabilities according to what * this driver and the chip itself can actually do. */ c = &initdata->constraints; c->valid_modes_mask &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY; c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS; rdev = regulator_register(&info->desc, &pdev->dev, initdata, info); if (IS_ERR(rdev)) { dev_err(&pdev->dev, "can't register %s, %ld\n", info->desc.name, PTR_ERR(rdev)); return PTR_ERR(rdev); } platform_set_drvdata(pdev, rdev); /* NOTE: many regulators support short-circuit IRQs (presentable * as REGULATOR_OVER_CURRENT notifications?) configured via: * - SC_CONFIG * - SC_DETECT1 (vintana2, vmmc1/2, vaux1/2/3/4) * - SC_DETECT2 (vusb, vdac, vio, vdd1/2, vpll2) * - IT_CONFIG */ return 0; } static int __devexit twl4030reg_remove(struct platform_device *pdev) { regulator_unregister(platform_get_drvdata(pdev)); return 0; } MODULE_ALIAS("platform:twl4030_reg"); static struct platform_driver twl4030reg_driver = { .probe = twl4030reg_probe, .remove = __devexit_p(twl4030reg_remove), /* NOTE: short name, to work around driver model truncation of * "twl4030_regulator.12" (and friends) to "twl4030_regulator.1". */ .driver.name = "twl4030_reg", .driver.owner = THIS_MODULE, }; static int __init twl4030reg_init(void) { return platform_driver_register(&twl4030reg_driver); } subsys_initcall(twl4030reg_init); static void __exit twl4030reg_exit(void) { platform_driver_unregister(&twl4030reg_driver); } module_exit(twl4030reg_exit) MODULE_DESCRIPTION("TWL4030 regulator driver"); MODULE_LICENSE("GPL");
gpl-2.0
MasterSS/linux
drivers/scsi/imm.c
551
29300
/* imm.c -- low level driver for the IOMEGA MatchMaker * parallel port SCSI host adapter. * * (The IMM is the embedded controller in the ZIP Plus drive.) * * My unofficial company acronym list is 21 pages long: * FLA: Four letter acronym with built in facility for * future expansion to five letters. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/blkdev.h> #include <linux/parport.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/slab.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> /* The following #define is to avoid a clash with hosts.c */ #define IMM_PROBE_SPP 0x0001 #define IMM_PROBE_PS2 0x0002 #define IMM_PROBE_ECR 0x0010 #define IMM_PROBE_EPP17 0x0100 #define IMM_PROBE_EPP19 0x0200 typedef struct { struct pardevice *dev; /* Parport device entry */ int base; /* Actual port address */ int base_hi; /* Hi Base address for ECP-ISA chipset */ int mode; /* Transfer mode */ struct scsi_cmnd *cur_cmd; /* Current queued command */ struct delayed_work imm_tq; /* Polling interrupt stuff */ unsigned long jstart; /* Jiffies at start */ unsigned failed:1; /* Failure flag */ unsigned dp:1; /* Data phase present */ unsigned rd:1; /* Read data in data phase */ unsigned wanted:1; /* Parport sharing busy flag */ wait_queue_head_t *waiting; struct Scsi_Host *host; struct list_head list; } imm_struct; static void imm_reset_pulse(unsigned int base); static int device_check(imm_struct *dev); #include "imm.h" static inline imm_struct *imm_dev(struct Scsi_Host *host) { return *(imm_struct **)&host->hostdata; } static DEFINE_SPINLOCK(arbitration_lock); static void got_it(imm_struct *dev) { dev->base = dev->dev->port->base; if (dev->cur_cmd) dev->cur_cmd->SCp.phase = 1; else wake_up(dev->waiting); } static void imm_wakeup(void *ref) { imm_struct *dev = (imm_struct *) ref; unsigned long flags; spin_lock_irqsave(&arbitration_lock, flags); if (dev->wanted) { parport_claim(dev->dev); got_it(dev); dev->wanted = 0; } spin_unlock_irqrestore(&arbitration_lock, flags); } static int imm_pb_claim(imm_struct *dev) { unsigned long flags; int res = 1; spin_lock_irqsave(&arbitration_lock, flags); if (parport_claim(dev->dev) == 0) { got_it(dev); res = 0; } dev->wanted = res; spin_unlock_irqrestore(&arbitration_lock, flags); return res; } static void imm_pb_dismiss(imm_struct *dev) { unsigned long flags; int wanted; spin_lock_irqsave(&arbitration_lock, flags); wanted = dev->wanted; dev->wanted = 0; spin_unlock_irqrestore(&arbitration_lock, flags); if (!wanted) parport_release(dev->dev); } static inline void imm_pb_release(imm_struct *dev) { parport_release(dev->dev); } /* This is to give the imm driver a way to modify the timings (and other * parameters) by writing to the /proc/scsi/imm/0 file. * Very simple method really... (Too simple, no error checking :( ) * Reason: Kernel hackers HATE having to unload and reload modules for * testing... * Also gives a method to use a script to obtain optimum timings (TODO) */ static int imm_write_info(struct Scsi_Host *host, char *buffer, int length) { imm_struct *dev = imm_dev(host); if ((length > 5) && (strncmp(buffer, "mode=", 5) == 0)) { dev->mode = simple_strtoul(buffer + 5, NULL, 0); return length; } printk("imm /proc: invalid variable\n"); return -EINVAL; } static int imm_show_info(struct seq_file *m, struct Scsi_Host *host) { imm_struct *dev = imm_dev(host); seq_printf(m, "Version : %s\n", IMM_VERSION); seq_printf(m, "Parport : %s\n", dev->dev->port->name); seq_printf(m, "Mode : %s\n", IMM_MODE_STRING[dev->mode]); return 0; } #if IMM_DEBUG > 0 #define imm_fail(x,y) printk("imm: imm_fail(%i) from %s at line %d\n",\ y, __func__, __LINE__); imm_fail_func(x,y); static inline void imm_fail_func(imm_struct *dev, int error_code) #else static inline void imm_fail(imm_struct *dev, int error_code) #endif { /* If we fail a device then we trash status / message bytes */ if (dev->cur_cmd) { dev->cur_cmd->result = error_code << 16; dev->failed = 1; } } /* * Wait for the high bit to be set. * * In principle, this could be tied to an interrupt, but the adapter * doesn't appear to be designed to support interrupts. We spin on * the 0x80 ready bit. */ static unsigned char imm_wait(imm_struct *dev) { int k; unsigned short ppb = dev->base; unsigned char r; w_ctr(ppb, 0x0c); k = IMM_SPIN_TMO; do { r = r_str(ppb); k--; udelay(1); } while (!(r & 0x80) && (k)); /* * STR register (LPT base+1) to SCSI mapping: * * STR imm imm * =================================== * 0x80 S_REQ S_REQ * 0x40 !S_BSY (????) * 0x20 !S_CD !S_CD * 0x10 !S_IO !S_IO * 0x08 (????) !S_BSY * * imm imm meaning * ================================== * 0xf0 0xb8 Bit mask * 0xc0 0x88 ZIP wants more data * 0xd0 0x98 ZIP wants to send more data * 0xe0 0xa8 ZIP is expecting SCSI command data * 0xf0 0xb8 end of transfer, ZIP is sending status */ w_ctr(ppb, 0x04); if (k) return (r & 0xb8); /* Counter expired - Time out occurred */ imm_fail(dev, DID_TIME_OUT); printk("imm timeout in imm_wait\n"); return 0; /* command timed out */ } static int imm_negotiate(imm_struct * tmp) { /* * The following is supposedly the IEEE 1284-1994 negotiate * sequence. I have yet to obtain a copy of the above standard * so this is a bit of a guess... * * A fair chunk of this is based on the Linux parport implementation * of IEEE 1284. * * Return 0 if data available * 1 if no data available */ unsigned short base = tmp->base; unsigned char a, mode; switch (tmp->mode) { case IMM_NIBBLE: mode = 0x00; break; case IMM_PS2: mode = 0x01; break; default: return 0; } w_ctr(base, 0x04); udelay(5); w_dtr(base, mode); udelay(100); w_ctr(base, 0x06); udelay(5); a = (r_str(base) & 0x20) ? 0 : 1; udelay(5); w_ctr(base, 0x07); udelay(5); w_ctr(base, 0x06); if (a) { printk ("IMM: IEEE1284 negotiate indicates no data available.\n"); imm_fail(tmp, DID_ERROR); } return a; } /* * Clear EPP timeout bit. */ static inline void epp_reset(unsigned short ppb) { int i; i = r_str(ppb); w_str(ppb, i); w_str(ppb, i & 0xfe); } /* * Wait for empty ECP fifo (if we are in ECP fifo mode only) */ static inline void ecp_sync(imm_struct *dev) { int i, ppb_hi = dev->base_hi; if (ppb_hi == 0) return; if ((r_ecr(ppb_hi) & 0xe0) == 0x60) { /* mode 011 == ECP fifo mode */ for (i = 0; i < 100; i++) { if (r_ecr(ppb_hi) & 0x01) return; udelay(5); } printk("imm: ECP sync failed as data still present in FIFO.\n"); } } static int imm_byte_out(unsigned short base, const char *buffer, int len) { int i; w_ctr(base, 0x4); /* apparently a sane mode */ for (i = len >> 1; i; i--) { w_dtr(base, *buffer++); w_ctr(base, 0x5); /* Drop STROBE low */ w_dtr(base, *buffer++); w_ctr(base, 0x0); /* STROBE high + INIT low */ } w_ctr(base, 0x4); /* apparently a sane mode */ return 1; /* All went well - we hope! */ } static int imm_nibble_in(unsigned short base, char *buffer, int len) { unsigned char l; int i; /* * The following is based on documented timing signals */ w_ctr(base, 0x4); for (i = len; i; i--) { w_ctr(base, 0x6); l = (r_str(base) & 0xf0) >> 4; w_ctr(base, 0x5); *buffer++ = (r_str(base) & 0xf0) | l; w_ctr(base, 0x4); } return 1; /* All went well - we hope! */ } static int imm_byte_in(unsigned short base, char *buffer, int len) { int i; /* * The following is based on documented timing signals */ w_ctr(base, 0x4); for (i = len; i; i--) { w_ctr(base, 0x26); *buffer++ = r_dtr(base); w_ctr(base, 0x25); } return 1; /* All went well - we hope! */ } static int imm_out(imm_struct *dev, char *buffer, int len) { unsigned short ppb = dev->base; int r = imm_wait(dev); /* * Make sure that: * a) the SCSI bus is BUSY (device still listening) * b) the device is listening */ if ((r & 0x18) != 0x08) { imm_fail(dev, DID_ERROR); printk("IMM: returned SCSI status %2x\n", r); return 0; } switch (dev->mode) { case IMM_EPP_32: case IMM_EPP_16: case IMM_EPP_8: epp_reset(ppb); w_ctr(ppb, 0x4); #ifdef CONFIG_SCSI_IZIP_EPP16 if (!(((long) buffer | len) & 0x01)) outsw(ppb + 4, buffer, len >> 1); #else if (!(((long) buffer | len) & 0x03)) outsl(ppb + 4, buffer, len >> 2); #endif else outsb(ppb + 4, buffer, len); w_ctr(ppb, 0xc); r = !(r_str(ppb) & 0x01); w_ctr(ppb, 0xc); ecp_sync(dev); break; case IMM_NIBBLE: case IMM_PS2: /* 8 bit output, with a loop */ r = imm_byte_out(ppb, buffer, len); break; default: printk("IMM: bug in imm_out()\n"); r = 0; } return r; } static int imm_in(imm_struct *dev, char *buffer, int len) { unsigned short ppb = dev->base; int r = imm_wait(dev); /* * Make sure that: * a) the SCSI bus is BUSY (device still listening) * b) the device is sending data */ if ((r & 0x18) != 0x18) { imm_fail(dev, DID_ERROR); return 0; } switch (dev->mode) { case IMM_NIBBLE: /* 4 bit input, with a loop */ r = imm_nibble_in(ppb, buffer, len); w_ctr(ppb, 0xc); break; case IMM_PS2: /* 8 bit input, with a loop */ r = imm_byte_in(ppb, buffer, len); w_ctr(ppb, 0xc); break; case IMM_EPP_32: case IMM_EPP_16: case IMM_EPP_8: epp_reset(ppb); w_ctr(ppb, 0x24); #ifdef CONFIG_SCSI_IZIP_EPP16 if (!(((long) buffer | len) & 0x01)) insw(ppb + 4, buffer, len >> 1); #else if (!(((long) buffer | len) & 0x03)) insl(ppb + 4, buffer, len >> 2); #endif else insb(ppb + 4, buffer, len); w_ctr(ppb, 0x2c); r = !(r_str(ppb) & 0x01); w_ctr(ppb, 0x2c); ecp_sync(dev); break; default: printk("IMM: bug in imm_ins()\n"); r = 0; break; } return r; } static int imm_cpp(unsigned short ppb, unsigned char b) { /* * Comments on udelay values refer to the * Command Packet Protocol (CPP) timing diagram. */ unsigned char s1, s2, s3; w_ctr(ppb, 0x0c); udelay(2); /* 1 usec - infinite */ w_dtr(ppb, 0xaa); udelay(10); /* 7 usec - infinite */ w_dtr(ppb, 0x55); udelay(10); /* 7 usec - infinite */ w_dtr(ppb, 0x00); udelay(10); /* 7 usec - infinite */ w_dtr(ppb, 0xff); udelay(10); /* 7 usec - infinite */ s1 = r_str(ppb) & 0xb8; w_dtr(ppb, 0x87); udelay(10); /* 7 usec - infinite */ s2 = r_str(ppb) & 0xb8; w_dtr(ppb, 0x78); udelay(10); /* 7 usec - infinite */ s3 = r_str(ppb) & 0x38; /* * Values for b are: * 0000 00aa Assign address aa to current device * 0010 00aa Select device aa in EPP Winbond mode * 0010 10aa Select device aa in EPP mode * 0011 xxxx Deselect all devices * 0110 00aa Test device aa * 1101 00aa Select device aa in ECP mode * 1110 00aa Select device aa in Compatible mode */ w_dtr(ppb, b); udelay(2); /* 1 usec - infinite */ w_ctr(ppb, 0x0c); udelay(10); /* 7 usec - infinite */ w_ctr(ppb, 0x0d); udelay(2); /* 1 usec - infinite */ w_ctr(ppb, 0x0c); udelay(10); /* 7 usec - infinite */ w_dtr(ppb, 0xff); udelay(10); /* 7 usec - infinite */ /* * The following table is electrical pin values. * (BSY is inverted at the CTR register) * * BSY ACK POut SEL Fault * S1 0 X 1 1 1 * S2 1 X 0 1 1 * S3 L X 1 1 S * * L => Last device in chain * S => Selected * * Observered values for S1,S2,S3 are: * Disconnect => f8/58/78 * Connect => f8/58/70 */ if ((s1 == 0xb8) && (s2 == 0x18) && (s3 == 0x30)) return 1; /* Connected */ if ((s1 == 0xb8) && (s2 == 0x18) && (s3 == 0x38)) return 0; /* Disconnected */ return -1; /* No device present */ } static inline int imm_connect(imm_struct *dev, int flag) { unsigned short ppb = dev->base; imm_cpp(ppb, 0xe0); /* Select device 0 in compatible mode */ imm_cpp(ppb, 0x30); /* Disconnect all devices */ if ((dev->mode == IMM_EPP_8) || (dev->mode == IMM_EPP_16) || (dev->mode == IMM_EPP_32)) return imm_cpp(ppb, 0x28); /* Select device 0 in EPP mode */ return imm_cpp(ppb, 0xe0); /* Select device 0 in compatible mode */ } static void imm_disconnect(imm_struct *dev) { imm_cpp(dev->base, 0x30); /* Disconnect all devices */ } static int imm_select(imm_struct *dev, int target) { int k; unsigned short ppb = dev->base; /* * Firstly we want to make sure there is nothing * holding onto the SCSI bus. */ w_ctr(ppb, 0xc); k = IMM_SELECT_TMO; do { k--; } while ((r_str(ppb) & 0x08) && (k)); if (!k) return 0; /* * Now assert the SCSI ID (HOST and TARGET) on the data bus */ w_ctr(ppb, 0x4); w_dtr(ppb, 0x80 | (1 << target)); udelay(1); /* * Deassert SELIN first followed by STROBE */ w_ctr(ppb, 0xc); w_ctr(ppb, 0xd); /* * ACK should drop low while SELIN is deasserted. * FAULT should drop low when the SCSI device latches the bus. */ k = IMM_SELECT_TMO; do { k--; } while (!(r_str(ppb) & 0x08) && (k)); /* * Place the interface back into a sane state (status mode) */ w_ctr(ppb, 0xc); return (k) ? 1 : 0; } static int imm_init(imm_struct *dev) { if (imm_connect(dev, 0) != 1) return -EIO; imm_reset_pulse(dev->base); mdelay(1); /* Delay to allow devices to settle */ imm_disconnect(dev); mdelay(1); /* Another delay to allow devices to settle */ return device_check(dev); } static inline int imm_send_command(struct scsi_cmnd *cmd) { imm_struct *dev = imm_dev(cmd->device->host); int k; /* NOTE: IMM uses byte pairs */ for (k = 0; k < cmd->cmd_len; k += 2) if (!imm_out(dev, &cmd->cmnd[k], 2)) return 0; return 1; } /* * The bulk flag enables some optimisations in the data transfer loops, * it should be true for any command that transfers data in integral * numbers of sectors. * * The driver appears to remain stable if we speed up the parallel port * i/o in this function, but not elsewhere. */ static int imm_completion(struct scsi_cmnd *cmd) { /* Return codes: * -1 Error * 0 Told to schedule * 1 Finished data transfer */ imm_struct *dev = imm_dev(cmd->device->host); unsigned short ppb = dev->base; unsigned long start_jiffies = jiffies; unsigned char r, v; int fast, bulk, status; v = cmd->cmnd[0]; bulk = ((v == READ_6) || (v == READ_10) || (v == WRITE_6) || (v == WRITE_10)); /* * We only get here if the drive is ready to comunicate, * hence no need for a full imm_wait. */ w_ctr(ppb, 0x0c); r = (r_str(ppb) & 0xb8); /* * while (device is not ready to send status byte) * loop; */ while (r != (unsigned char) 0xb8) { /* * If we have been running for more than a full timer tick * then take a rest. */ if (time_after(jiffies, start_jiffies + 1)) return 0; /* * FAIL if: * a) Drive status is screwy (!ready && !present) * b) Drive is requesting/sending more data than expected */ if (((r & 0x88) != 0x88) || (cmd->SCp.this_residual <= 0)) { imm_fail(dev, DID_ERROR); return -1; /* ERROR_RETURN */ } /* determine if we should use burst I/O */ if (dev->rd == 0) { fast = (bulk && (cmd->SCp.this_residual >= IMM_BURST_SIZE)) ? IMM_BURST_SIZE : 2; status = imm_out(dev, cmd->SCp.ptr, fast); } else { fast = (bulk && (cmd->SCp.this_residual >= IMM_BURST_SIZE)) ? IMM_BURST_SIZE : 1; status = imm_in(dev, cmd->SCp.ptr, fast); } cmd->SCp.ptr += fast; cmd->SCp.this_residual -= fast; if (!status) { imm_fail(dev, DID_BUS_BUSY); return -1; /* ERROR_RETURN */ } if (cmd->SCp.buffer && !cmd->SCp.this_residual) { /* if scatter/gather, advance to the next segment */ if (cmd->SCp.buffers_residual--) { cmd->SCp.buffer++; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); /* * Make sure that we transfer even number of bytes * otherwise it makes imm_byte_out() messy. */ if (cmd->SCp.this_residual & 0x01) cmd->SCp.this_residual++; } } /* Now check to see if the drive is ready to comunicate */ w_ctr(ppb, 0x0c); r = (r_str(ppb) & 0xb8); /* If not, drop back down to the scheduler and wait a timer tick */ if (!(r & 0x80)) return 0; } return 1; /* FINISH_RETURN */ } /* * Since the IMM itself doesn't generate interrupts, we use * the scheduler's task queue to generate a stream of call-backs and * complete the request when the drive is ready. */ static void imm_interrupt(struct work_struct *work) { imm_struct *dev = container_of(work, imm_struct, imm_tq.work); struct scsi_cmnd *cmd = dev->cur_cmd; struct Scsi_Host *host = cmd->device->host; unsigned long flags; if (imm_engine(dev, cmd)) { schedule_delayed_work(&dev->imm_tq, 1); return; } /* Command must of completed hence it is safe to let go... */ #if IMM_DEBUG > 0 switch ((cmd->result >> 16) & 0xff) { case DID_OK: break; case DID_NO_CONNECT: printk("imm: no device at SCSI ID %i\n", cmd->device->id); break; case DID_BUS_BUSY: printk("imm: BUS BUSY - EPP timeout detected\n"); break; case DID_TIME_OUT: printk("imm: unknown timeout\n"); break; case DID_ABORT: printk("imm: told to abort\n"); break; case DID_PARITY: printk("imm: parity error (???)\n"); break; case DID_ERROR: printk("imm: internal driver error\n"); break; case DID_RESET: printk("imm: told to reset device\n"); break; case DID_BAD_INTR: printk("imm: bad interrupt (???)\n"); break; default: printk("imm: bad return code (%02x)\n", (cmd->result >> 16) & 0xff); } #endif if (cmd->SCp.phase > 1) imm_disconnect(dev); imm_pb_dismiss(dev); spin_lock_irqsave(host->host_lock, flags); dev->cur_cmd = NULL; cmd->scsi_done(cmd); spin_unlock_irqrestore(host->host_lock, flags); return; } static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) { unsigned short ppb = dev->base; unsigned char l = 0, h = 0; int retv, x; /* First check for any errors that may have occurred * Here we check for internal errors */ if (dev->failed) return 0; switch (cmd->SCp.phase) { case 0: /* Phase 0 - Waiting for parport */ if (time_after(jiffies, dev->jstart + HZ)) { /* * We waited more than a second * for parport to call us */ imm_fail(dev, DID_BUS_BUSY); return 0; } return 1; /* wait until imm_wakeup claims parport */ /* Phase 1 - Connected */ case 1: imm_connect(dev, CONNECT_EPP_MAYBE); cmd->SCp.phase++; /* Phase 2 - We are now talking to the scsi bus */ case 2: if (!imm_select(dev, scmd_id(cmd))) { imm_fail(dev, DID_NO_CONNECT); return 0; } cmd->SCp.phase++; /* Phase 3 - Ready to accept a command */ case 3: w_ctr(ppb, 0x0c); if (!(r_str(ppb) & 0x80)) return 1; if (!imm_send_command(cmd)) return 0; cmd->SCp.phase++; /* Phase 4 - Setup scatter/gather buffers */ case 4: if (scsi_bufflen(cmd)) { cmd->SCp.buffer = scsi_sglist(cmd); cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); } else { cmd->SCp.buffer = NULL; cmd->SCp.this_residual = 0; cmd->SCp.ptr = NULL; } cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.phase++; if (cmd->SCp.this_residual & 0x01) cmd->SCp.this_residual++; /* Phase 5 - Pre-Data transfer stage */ case 5: /* Spin lock for BUSY */ w_ctr(ppb, 0x0c); if (!(r_str(ppb) & 0x80)) return 1; /* Require negotiation for read requests */ x = (r_str(ppb) & 0xb8); dev->rd = (x & 0x10) ? 1 : 0; dev->dp = (x & 0x20) ? 0 : 1; if ((dev->dp) && (dev->rd)) if (imm_negotiate(dev)) return 0; cmd->SCp.phase++; /* Phase 6 - Data transfer stage */ case 6: /* Spin lock for BUSY */ w_ctr(ppb, 0x0c); if (!(r_str(ppb) & 0x80)) return 1; if (dev->dp) { retv = imm_completion(cmd); if (retv == -1) return 0; if (retv == 0) return 1; } cmd->SCp.phase++; /* Phase 7 - Post data transfer stage */ case 7: if ((dev->dp) && (dev->rd)) { if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) { w_ctr(ppb, 0x4); w_ctr(ppb, 0xc); w_ctr(ppb, 0xe); w_ctr(ppb, 0x4); } } cmd->SCp.phase++; /* Phase 8 - Read status/message */ case 8: /* Check for data overrun */ if (imm_wait(dev) != (unsigned char) 0xb8) { imm_fail(dev, DID_ERROR); return 0; } if (imm_negotiate(dev)) return 0; if (imm_in(dev, &l, 1)) { /* read status byte */ /* Check for optional message byte */ if (imm_wait(dev) == (unsigned char) 0xb8) imm_in(dev, &h, 1); cmd->result = (DID_OK << 16) + (l & STATUS_MASK); } if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) { w_ctr(ppb, 0x4); w_ctr(ppb, 0xc); w_ctr(ppb, 0xe); w_ctr(ppb, 0x4); } return 0; /* Finished */ break; default: printk("imm: Invalid scsi phase\n"); } return 0; } static int imm_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { imm_struct *dev = imm_dev(cmd->device->host); if (dev->cur_cmd) { printk("IMM: bug in imm_queuecommand\n"); return 0; } dev->failed = 0; dev->jstart = jiffies; dev->cur_cmd = cmd; cmd->scsi_done = done; cmd->result = DID_ERROR << 16; /* default return code */ cmd->SCp.phase = 0; /* bus free */ schedule_delayed_work(&dev->imm_tq, 0); imm_pb_claim(dev); return 0; } static DEF_SCSI_QCMD(imm_queuecommand) /* * Apparently the disk->capacity attribute is off by 1 sector * for all disk drives. We add the one here, but it should really * be done in sd.c. Even if it gets fixed there, this will still * work. */ static int imm_biosparam(struct scsi_device *sdev, struct block_device *dev, sector_t capacity, int ip[]) { ip[0] = 0x40; ip[1] = 0x20; ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]); if (ip[2] > 1024) { ip[0] = 0xff; ip[1] = 0x3f; ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]); } return 0; } static int imm_abort(struct scsi_cmnd *cmd) { imm_struct *dev = imm_dev(cmd->device->host); /* * There is no method for aborting commands since Iomega * have tied the SCSI_MESSAGE line high in the interface */ switch (cmd->SCp.phase) { case 0: /* Do not have access to parport */ case 1: /* Have not connected to interface */ dev->cur_cmd = NULL; /* Forget the problem */ return SUCCESS; break; default: /* SCSI command sent, can not abort */ return FAILED; break; } } static void imm_reset_pulse(unsigned int base) { w_ctr(base, 0x04); w_dtr(base, 0x40); udelay(1); w_ctr(base, 0x0c); w_ctr(base, 0x0d); udelay(50); w_ctr(base, 0x0c); w_ctr(base, 0x04); } static int imm_reset(struct scsi_cmnd *cmd) { imm_struct *dev = imm_dev(cmd->device->host); if (cmd->SCp.phase) imm_disconnect(dev); dev->cur_cmd = NULL; /* Forget the problem */ imm_connect(dev, CONNECT_NORMAL); imm_reset_pulse(dev->base); mdelay(1); /* device settle delay */ imm_disconnect(dev); mdelay(1); /* device settle delay */ return SUCCESS; } static int device_check(imm_struct *dev) { /* This routine looks for a device and then attempts to use EPP to send a command. If all goes as planned then EPP is available. */ static char cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; int loop, old_mode, status, k, ppb = dev->base; unsigned char l; old_mode = dev->mode; for (loop = 0; loop < 8; loop++) { /* Attempt to use EPP for Test Unit Ready */ if ((ppb & 0x0007) == 0x0000) dev->mode = IMM_EPP_32; second_pass: imm_connect(dev, CONNECT_EPP_MAYBE); /* Select SCSI device */ if (!imm_select(dev, loop)) { imm_disconnect(dev); continue; } printk("imm: Found device at ID %i, Attempting to use %s\n", loop, IMM_MODE_STRING[dev->mode]); /* Send SCSI command */ status = 1; w_ctr(ppb, 0x0c); for (l = 0; (l < 3) && (status); l++) status = imm_out(dev, &cmd[l << 1], 2); if (!status) { imm_disconnect(dev); imm_connect(dev, CONNECT_EPP_MAYBE); imm_reset_pulse(dev->base); udelay(1000); imm_disconnect(dev); udelay(1000); if (dev->mode == IMM_EPP_32) { dev->mode = old_mode; goto second_pass; } printk("imm: Unable to establish communication\n"); return -EIO; } w_ctr(ppb, 0x0c); k = 1000000; /* 1 Second */ do { l = r_str(ppb); k--; udelay(1); } while (!(l & 0x80) && (k)); l &= 0xb8; if (l != 0xb8) { imm_disconnect(dev); imm_connect(dev, CONNECT_EPP_MAYBE); imm_reset_pulse(dev->base); udelay(1000); imm_disconnect(dev); udelay(1000); if (dev->mode == IMM_EPP_32) { dev->mode = old_mode; goto second_pass; } printk ("imm: Unable to establish communication\n"); return -EIO; } imm_disconnect(dev); printk ("imm: Communication established at 0x%x with ID %i using %s\n", ppb, loop, IMM_MODE_STRING[dev->mode]); imm_connect(dev, CONNECT_EPP_MAYBE); imm_reset_pulse(dev->base); udelay(1000); imm_disconnect(dev); udelay(1000); return 0; } printk("imm: No devices found\n"); return -ENODEV; } /* * imm cannot deal with highmem, so this causes all IO pages for this host * to reside in low memory (hence mapped) */ static int imm_adjust_queue(struct scsi_device *device) { blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH); return 0; } static struct scsi_host_template imm_template = { .module = THIS_MODULE, .proc_name = "imm", .show_info = imm_show_info, .write_info = imm_write_info, .name = "Iomega VPI2 (imm) interface", .queuecommand = imm_queuecommand, .eh_abort_handler = imm_abort, .eh_bus_reset_handler = imm_reset, .eh_host_reset_handler = imm_reset, .bios_param = imm_biosparam, .this_id = 7, .sg_tablesize = SG_ALL, .use_clustering = ENABLE_CLUSTERING, .can_queue = 1, .slave_alloc = imm_adjust_queue, }; /*************************************************************************** * Parallel port probing routines * ***************************************************************************/ static LIST_HEAD(imm_hosts); static int __imm_attach(struct parport *pb) { struct Scsi_Host *host; imm_struct *dev; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting); DEFINE_WAIT(wait); int ports; int modes, ppb; int err = -ENOMEM; init_waitqueue_head(&waiting); dev = kzalloc(sizeof(imm_struct), GFP_KERNEL); if (!dev) return -ENOMEM; dev->base = -1; dev->mode = IMM_AUTODETECT; INIT_LIST_HEAD(&dev->list); dev->dev = parport_register_device(pb, "imm", NULL, imm_wakeup, NULL, 0, dev); if (!dev->dev) goto out; /* Claim the bus so it remembers what we do to the control * registers. [ CTR and ECP ] */ err = -EBUSY; dev->waiting = &waiting; prepare_to_wait(&waiting, &wait, TASK_UNINTERRUPTIBLE); if (imm_pb_claim(dev)) schedule_timeout(3 * HZ); if (dev->wanted) { printk(KERN_ERR "imm%d: failed to claim parport because " "a pardevice is owning the port for too long " "time!\n", pb->number); imm_pb_dismiss(dev); dev->waiting = NULL; finish_wait(&waiting, &wait); goto out1; } dev->waiting = NULL; finish_wait(&waiting, &wait); ppb = dev->base = dev->dev->port->base; dev->base_hi = dev->dev->port->base_hi; w_ctr(ppb, 0x0c); modes = dev->dev->port->modes; /* Mode detection works up the chain of speed * This avoids a nasty if-then-else-if-... tree */ dev->mode = IMM_NIBBLE; if (modes & PARPORT_MODE_TRISTATE) dev->mode = IMM_PS2; /* Done configuration */ err = imm_init(dev); imm_pb_release(dev); if (err) goto out1; /* now the glue ... */ if (dev->mode == IMM_NIBBLE || dev->mode == IMM_PS2) ports = 3; else ports = 8; INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt); err = -ENOMEM; host = scsi_host_alloc(&imm_template, sizeof(imm_struct *)); if (!host) goto out1; host->io_port = pb->base; host->n_io_port = ports; host->dma_channel = -1; host->unique_id = pb->number; *(imm_struct **)&host->hostdata = dev; dev->host = host; list_add_tail(&dev->list, &imm_hosts); err = scsi_add_host(host, NULL); if (err) goto out2; scsi_scan_host(host); return 0; out2: list_del_init(&dev->list); scsi_host_put(host); out1: parport_unregister_device(dev->dev); out: kfree(dev); return err; } static void imm_attach(struct parport *pb) { __imm_attach(pb); } static void imm_detach(struct parport *pb) { imm_struct *dev; list_for_each_entry(dev, &imm_hosts, list) { if (dev->dev->port == pb) { list_del_init(&dev->list); scsi_remove_host(dev->host); scsi_host_put(dev->host); parport_unregister_device(dev->dev); kfree(dev); break; } } } static struct parport_driver imm_driver = { .name = "imm", .attach = imm_attach, .detach = imm_detach, }; static int __init imm_driver_init(void) { printk("imm: Version %s\n", IMM_VERSION); return parport_register_driver(&imm_driver); } static void __exit imm_driver_exit(void) { parport_unregister_driver(&imm_driver); } module_init(imm_driver_init); module_exit(imm_driver_exit); MODULE_LICENSE("GPL");
gpl-2.0
Rover-Yu/ali_kernel
net/bridge/netfilter/ebtable_filter.c
551
3153
/* * ebtable_filter * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * * April, 2002 * */ #include <linux/netfilter_bridge/ebtables.h> #include <linux/module.h> #define FILTER_VALID_HOOKS ((1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | \ (1 << NF_BR_LOCAL_OUT)) static struct ebt_entries initial_chains[] = { { .name = "INPUT", .policy = EBT_ACCEPT, }, { .name = "FORWARD", .policy = EBT_ACCEPT, }, { .name = "OUTPUT", .policy = EBT_ACCEPT, }, }; static struct ebt_replace_kernel initial_table = { .name = "filter", .valid_hooks = FILTER_VALID_HOOKS, .entries_size = 3 * sizeof(struct ebt_entries), .hook_entry = { [NF_BR_LOCAL_IN] = &initial_chains[0], [NF_BR_FORWARD] = &initial_chains[1], [NF_BR_LOCAL_OUT] = &initial_chains[2], }, .entries = (char *)initial_chains, }; static int check(const struct ebt_table_info *info, unsigned int valid_hooks) { if (valid_hooks & ~FILTER_VALID_HOOKS) return -EINVAL; return 0; } static const struct ebt_table frame_filter = { .name = "filter", .table = &initial_table, .valid_hooks = FILTER_VALID_HOOKS, .check = check, .me = THIS_MODULE, }; static unsigned int ebt_in_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_filter); } static unsigned int ebt_out_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { return ebt_do_table(hook, skb, in, out, dev_net(out)->xt.frame_filter); } static struct nf_hook_ops ebt_ops_filter[] __read_mostly = { { .hook = ebt_in_hook, .owner = THIS_MODULE, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_LOCAL_IN, .priority = NF_BR_PRI_FILTER_BRIDGED, }, { .hook = ebt_in_hook, .owner = THIS_MODULE, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_FORWARD, .priority = NF_BR_PRI_FILTER_BRIDGED, }, { .hook = ebt_out_hook, .owner = THIS_MODULE, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_LOCAL_OUT, .priority = NF_BR_PRI_FILTER_OTHER, }, }; static int __net_init frame_filter_net_init(struct net *net) { net->xt.frame_filter = ebt_register_table(net, &frame_filter); if (IS_ERR(net->xt.frame_filter)) return PTR_ERR(net->xt.frame_filter); return 0; } static void __net_exit frame_filter_net_exit(struct net *net) { ebt_unregister_table(net->xt.frame_filter); } static struct pernet_operations frame_filter_net_ops = { .init = frame_filter_net_init, .exit = frame_filter_net_exit, }; static int __init ebtable_filter_init(void) { int ret; ret = register_pernet_subsys(&frame_filter_net_ops); if (ret < 0) return ret; ret = nf_register_hooks(ebt_ops_filter, ARRAY_SIZE(ebt_ops_filter)); if (ret < 0) unregister_pernet_subsys(&frame_filter_net_ops); return ret; } static void __exit ebtable_filter_fini(void) { nf_unregister_hooks(ebt_ops_filter, ARRAY_SIZE(ebt_ops_filter)); unregister_pernet_subsys(&frame_filter_net_ops); } module_init(ebtable_filter_init); module_exit(ebtable_filter_fini); MODULE_LICENSE("GPL");
gpl-2.0
andr00ib/e730-e739_cm10.1_kernel
drivers/staging/rtl8192e/ieee80211/rtl819x_HTProc.c
807
58974
//As this function is mainly ported from Windows driver, so leave the name little changed. If any confusion caused, tell me. Created by WB. 2008.05.08 #include "ieee80211.h" #include "rtl819x_HT.h" u8 MCS_FILTER_ALL[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; u8 MCS_FILTER_1SS[16] = {0xff, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; u16 MCS_DATA_RATE[2][2][77] = { { {13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78 ,104, 156, 208, 234, 260, 39, 78, 117, 234, 312, 351, 390, 52, 104, 156, 208, 312, 416, 468, 520, 0, 78, 104, 130, 117, 156, 195, 104, 130, 130, 156, 182, 182, 208, 156, 195, 195, 234, 273, 273, 312, 130, 156, 181, 156, 181, 208, 234, 208, 234, 260, 260, 286, 195, 234, 273, 234, 273, 312, 351, 312, 351, 390, 390, 429}, // Long GI, 20MHz {14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289, 43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520, 578, 0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231, 173, 217, 217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260, 231, 260, 289, 289, 318, 217, 260, 303, 260, 303, 347, 390, 347, 390, 433, 433, 477} }, // Short GI, 20MHz { {27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486, 540, 81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648, 864, 972, 1080, 12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324, 378, 378, 432, 324, 405, 405, 486, 567, 567, 648, 270, 324, 378, 324, 378, 432, 486, 432, 486, 540, 540, 594, 405, 486, 567, 486, 567, 648, 729, 648, 729, 810, 810, 891}, // Long GI, 40MHz {30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540, 600, 90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720, 960, 1080, 1200, 13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360, 420, 420, 480, 360, 450, 450, 540, 630, 630, 720, 300, 360, 420, 360, 420, 480, 540, 480, 540, 600, 600, 660, 450, 540, 630, 540, 630, 720, 810, 720, 810, 900, 900, 990} } // Short GI, 40MHz }; static u8 UNKNOWN_BORADCOM[3] = {0x00, 0x14, 0xbf}; static u8 LINKSYSWRT330_LINKSYSWRT300_BROADCOM[3] = {0x00, 0x1a, 0x70}; static u8 LINKSYSWRT350_LINKSYSWRT150_BROADCOM[3] = {0x00, 0x1d, 0x7e}; //static u8 NETGEAR834Bv2_BROADCOM[3] = {0x00, 0x1b, 0x2f}; static u8 BELKINF5D8233V1_RALINK[3] = {0x00, 0x17, 0x3f}; //cosa 03202008 static u8 BELKINF5D82334V3_RALINK[3] = {0x00, 0x1c, 0xdf}; static u8 PCI_RALINK[3] = {0x00, 0x90, 0xcc}; static u8 EDIMAX_RALINK[3] = {0x00, 0x0e, 0x2e}; static u8 AIRLINK_RALINK[3] = {0x00, 0x18, 0x02}; static u8 DLINK_ATHEROS[3] = {0x00, 0x1c, 0xf0}; static u8 CISCO_BROADCOM[3] = {0x00, 0x17, 0x94}; static u8 LINKSYS_MARVELL_4400N[3] = {0x00, 0x14, 0xa4}; // 2008/04/01 MH For Cisco G mode RX TP We need to change FW duration. Should we put the // code in other place?? //static u8 WIFI_CISCO_G_AP[3] = {0x00, 0x40, 0x96}; /******************************************************************************************************************** *function: This function update default settings in pHTInfo structure * input: PRT_HIGH_THROUGHPUT pHTInfo * output: none * return: none * notice: These value need be modified if any changes. * *****************************************************************************************************************/ void HTUpdateDefaultSetting(struct ieee80211_device* ieee) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; //const typeof( ((struct ieee80211_device *)0)->pHTInfo ) *__mptr = &pHTInfo; //printk("pHTinfo:%p, &pHTinfo:%p, mptr:%p, offsetof:%x\n", pHTInfo, &pHTInfo, __mptr, offsetof(struct ieee80211_device, pHTInfo)); //printk("===>ieee:%p,\n", ieee); // ShortGI support pHTInfo->bRegShortGI20MHz= 1; pHTInfo->bRegShortGI40MHz= 1; // 40MHz channel support pHTInfo->bRegBW40MHz = 1; // CCK rate support in 40MHz channel if(pHTInfo->bRegBW40MHz) pHTInfo->bRegSuppCCK = 1; else pHTInfo->bRegSuppCCK = true; // AMSDU related pHTInfo->nAMSDU_MaxSize = 7935UL; pHTInfo->bAMSDU_Support = 0; // AMPDU related pHTInfo->bAMPDUEnable = 1; pHTInfo->AMPDU_Factor = 2; //// 0: 2n13(8K), 1:2n14(16K), 2:2n15(32K), 3:2n16(64k) pHTInfo->MPDU_Density = 0;// 0: No restriction, 1: 1/8usec, 2: 1/4usec, 3: 1/2usec, 4: 1usec, 5: 2usec, 6: 4usec, 7:8usec // MIMO Power Save pHTInfo->SelfMimoPs = 3;// 0: Static Mimo Ps, 1: Dynamic Mimo Ps, 3: No Limitation, 2: Reserved(Set to 3 automatically.) if(pHTInfo->SelfMimoPs == 2) pHTInfo->SelfMimoPs = 3; // 8190 only. Assign rate operation mode to firmware ieee->bTxDisableRateFallBack = 0; ieee->bTxUseDriverAssingedRate = 0; #ifdef TO_DO_LIST // 8190 only. Assign duration operation mode to firmware pMgntInfo->bTxEnableFwCalcDur = (BOOLEAN)pNdisCommon->bRegTxEnableFwCalcDur; #endif // 8190 only, Realtek proprietary aggregation mode // Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others pHTInfo->bRegRT2RTAggregation = 1;//0: Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others // For Rx Reorder Control pHTInfo->bRegRxReorderEnable = 1; pHTInfo->RxReorderWinSize = 64; pHTInfo->RxReorderPendingTime = 30; #ifdef USB_TX_DRIVER_AGGREGATION_ENABLE pHTInfo->UsbTxAggrNum = 4; #endif #ifdef USB_RX_AGGREGATION_SUPPORT pHTInfo->UsbRxFwAggrEn = 1; pHTInfo->UsbRxFwAggrPageNum = 24; pHTInfo->UsbRxFwAggrPacketNum = 8; pHTInfo->UsbRxFwAggrTimeout = 16; ////usb rx FW aggregation timeout threshold.It's in units of 64us #endif } /******************************************************************************************************************** *function: This function print out each field on HT capability IE mainly from (Beacon/ProbeRsp/AssocReq) * input: u8* CapIE //Capability IE to be printed out * u8* TitleString //mainly print out caller function * output: none * return: none * notice: Driver should not print out this message by default. * *****************************************************************************************************************/ void HTDebugHTCapability(u8* CapIE, u8* TitleString ) { static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily PHT_CAPABILITY_ELE pCapELE; if(!memcmp(CapIE, EWC11NHTCap, sizeof(EWC11NHTCap))) { //EWC IE IEEE80211_DEBUG(IEEE80211_DL_HT, "EWC IE in %s()\n", __FUNCTION__); pCapELE = (PHT_CAPABILITY_ELE)(&CapIE[4]); }else pCapELE = (PHT_CAPABILITY_ELE)(&CapIE[0]); IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Capability>. Called by %s\n", TitleString ); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupported Channel Width = %s\n", (pCapELE->ChlWidth)?"20MHz": "20/40MHz"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 20M = %s\n", (pCapELE->ShortGI20Mhz)?"YES": "NO"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 40M = %s\n", (pCapELE->ShortGI40Mhz)?"YES": "NO"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport TX STBC = %s\n", (pCapELE->TxSTBC)?"YES": "NO"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMax AMSDU Size = %s\n", (pCapELE->MaxAMSDUSize)?"3839": "7935"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport CCK in 20/40 mode = %s\n", (pCapELE->DssCCk)?"YES": "NO"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMax AMPDU Factor = %d\n", pCapELE->MaxRxAMPDUFactor); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMPDU Density = %d\n", pCapELE->MPDUDensity); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMCS Rate Set = [%x][%x][%x][%x][%x]\n", pCapELE->MCS[0],\ pCapELE->MCS[1], pCapELE->MCS[2], pCapELE->MCS[3], pCapELE->MCS[4]); return; } /******************************************************************************************************************** *function: This function print out each field on HT Information IE mainly from (Beacon/ProbeRsp) * input: u8* InfoIE //Capability IE to be printed out * u8* TitleString //mainly print out caller function * output: none * return: none * notice: Driver should not print out this message by default. * *****************************************************************************************************************/ void HTDebugHTInfo(u8* InfoIE, u8* TitleString) { static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily PHT_INFORMATION_ELE pHTInfoEle; if(!memcmp(InfoIE, EWC11NHTInfo, sizeof(EWC11NHTInfo))) { // Not EWC IE IEEE80211_DEBUG(IEEE80211_DL_HT, "EWC IE in %s()\n", __FUNCTION__); pHTInfoEle = (PHT_INFORMATION_ELE)(&InfoIE[4]); }else pHTInfoEle = (PHT_INFORMATION_ELE)(&InfoIE[0]); IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Information Element>. Called by %s\n", TitleString); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tPrimary channel = %d\n", pHTInfoEle->ControlChl); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSenondary channel ="); switch(pHTInfoEle->ExtChlOffset) { case 0: IEEE80211_DEBUG(IEEE80211_DL_HT, "Not Present\n"); break; case 1: IEEE80211_DEBUG(IEEE80211_DL_HT, "Upper channel\n"); break; case 2: IEEE80211_DEBUG(IEEE80211_DL_HT, "Reserved. Eooro!!!\n"); break; case 3: IEEE80211_DEBUG(IEEE80211_DL_HT, "Lower Channel\n"); break; } IEEE80211_DEBUG(IEEE80211_DL_HT, "\tRecommended channel width = %s\n", (pHTInfoEle->RecommemdedTxWidth)?"20Mhz": "40Mhz"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tOperation mode for protection = "); switch(pHTInfoEle->OptMode) { case 0: IEEE80211_DEBUG(IEEE80211_DL_HT, "No Protection\n"); break; case 1: IEEE80211_DEBUG(IEEE80211_DL_HT, "HT non-member protection mode\n"); break; case 2: IEEE80211_DEBUG(IEEE80211_DL_HT, "Suggest to open protection\n"); break; case 3: IEEE80211_DEBUG(IEEE80211_DL_HT, "HT mixed mode\n"); break; } IEEE80211_DEBUG(IEEE80211_DL_HT, "\tBasic MCS Rate Set = [%x][%x][%x][%x][%x]\n", pHTInfoEle->BasicMSC[0],\ pHTInfoEle->BasicMSC[1], pHTInfoEle->BasicMSC[2], pHTInfoEle->BasicMSC[3], pHTInfoEle->BasicMSC[4]); return; } /* * Return: true if station in half n mode and AP supports 40 bw */ bool IsHTHalfNmode40Bandwidth(struct ieee80211_device* ieee) { bool retValue = false; PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; if(pHTInfo->bCurrentHTSupport == false ) // wireless is n mode retValue = false; else if(pHTInfo->bRegBW40MHz == false) // station supports 40 bw retValue = false; else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode retValue = false; else if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ChlWidth) // ap support 40 bw retValue = true; else retValue = false; return retValue; } bool IsHTHalfNmodeSGI(struct ieee80211_device* ieee, bool is40MHz) { bool retValue = false; PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; if(pHTInfo->bCurrentHTSupport == false ) // wireless is n mode retValue = false; else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode retValue = false; else if(is40MHz) // ap support 40 bw { if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ShortGI40Mhz) // ap support 40 bw short GI retValue = true; else retValue = false; } else { if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ShortGI20Mhz) // ap support 40 bw short GI retValue = true; else retValue = false; } return retValue; } u16 HTHalfMcsToDataRate(struct ieee80211_device* ieee, u8 nMcsRate) { u8 is40MHz; u8 isShortGI; is40MHz = (IsHTHalfNmode40Bandwidth(ieee))?1:0; isShortGI = (IsHTHalfNmodeSGI(ieee, is40MHz))? 1:0; return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate&0x7f)]; } u16 HTMcsToDataRate( struct ieee80211_device* ieee, u8 nMcsRate) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; u8 is40MHz = (pHTInfo->bCurBW40MHz)?1:0; u8 isShortGI = (pHTInfo->bCurBW40MHz)? ((pHTInfo->bCurShortGI40MHz)?1:0): ((pHTInfo->bCurShortGI20MHz)?1:0); return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate&0x7f)]; } /******************************************************************************************************************** *function: This function returns current datarate. * input: struct ieee80211_device* ieee * u8 nDataRate * output: none * return: tx rate * notice: quite unsure about how to use this function //wb * *****************************************************************************************************************/ u16 TxCountToDataRate( struct ieee80211_device* ieee, u8 nDataRate) { //PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; u16 CCKOFDMRate[12] = {0x02 , 0x04 , 0x0b , 0x16 , 0x0c , 0x12 , 0x18 , 0x24 , 0x30 , 0x48 , 0x60 , 0x6c}; u8 is40MHz = 0; u8 isShortGI = 0; if(nDataRate < 12) { return CCKOFDMRate[nDataRate]; } else { if (nDataRate >= 0x10 && nDataRate <= 0x1f)//if(nDataRate > 11 && nDataRate < 28 ) { is40MHz = 0; isShortGI = 0; // nDataRate = nDataRate - 12; } else if(nDataRate >=0x20 && nDataRate <= 0x2f ) //(27, 44) { is40MHz = 1; isShortGI = 0; //nDataRate = nDataRate - 28; } else if(nDataRate >= 0x30 && nDataRate <= 0x3f ) //(43, 60) { is40MHz = 0; isShortGI = 1; //nDataRate = nDataRate - 44; } else if(nDataRate >= 0x40 && nDataRate <= 0x4f ) //(59, 76) { is40MHz = 1; isShortGI = 1; //nDataRate = nDataRate - 60; } return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate&0xf]; } } bool IsHTHalfNmodeAPs(struct ieee80211_device* ieee) { bool retValue = false; struct ieee80211_network* net = &ieee->current_network; #if 0 if(ieee->bHalfNMode == false) retValue = false; else #endif if((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3)==0) || (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3)==0) || (memcmp(net->bssid, PCI_RALINK, 3)==0) || (memcmp(net->bssid, EDIMAX_RALINK, 3)==0) || (memcmp(net->bssid, AIRLINK_RALINK, 3)==0) || (net->ralink_cap_exist)) retValue = true; else if((memcmp(net->bssid, UNKNOWN_BORADCOM, 3)==0) || (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)|| (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)|| //(memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3)==0) || (net->broadcom_cap_exist)) retValue = true; else if(net->bssht.bdRT2RTAggregation) retValue = true; else retValue = false; return retValue; } /******************************************************************************************************************** *function: This function returns peer IOT. * input: struct ieee80211_device* ieee * output: none * return: * notice: * *****************************************************************************************************************/ void HTIOTPeerDetermine(struct ieee80211_device* ieee) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; struct ieee80211_network* net = &ieee->current_network; if(net->bssht.bdRT2RTAggregation) pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK; else if(net->broadcom_cap_exist){ pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM; } else if((memcmp(net->bssid, UNKNOWN_BORADCOM, 3)==0) || (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)|| (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)){//|| //(memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3)==0) ){ pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM; } else if((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3)==0) || (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3)==0) || (memcmp(net->bssid, PCI_RALINK, 3)==0) || (memcmp(net->bssid, EDIMAX_RALINK, 3)==0) || (memcmp(net->bssid, AIRLINK_RALINK, 3)==0) || net->ralink_cap_exist) pHTInfo->IOTPeer = HT_IOT_PEER_RALINK; else if((net->atheros_cap_exist )|| (memcmp(net->bssid, DLINK_ATHEROS, 3) == 0)) pHTInfo->IOTPeer = HT_IOT_PEER_ATHEROS; else if(memcmp(net->bssid, CISCO_BROADCOM, 3)==0) pHTInfo->IOTPeer = HT_IOT_PEER_CISCO; else if ((memcmp(net->bssid, LINKSYS_MARVELL_4400N, 3) == 0) || net->marvell_cap_exist){ pHTInfo->IOTPeer = HT_IOT_PEER_MARVELL; } else pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN; IEEE80211_DEBUG(IEEE80211_DL_IOT, "Joseph debug!! IOTPEER: %x\n", pHTInfo->IOTPeer); } /******************************************************************************************************************** *function: Check whether driver should declare received rate up to MCS13 only since some chipset is not good * at receiving MCS14~15 frame from some AP. * input: struct ieee80211_device* ieee * u8 * PeerMacAddr * output: none * return: return 1 if driver should declare MCS13 only(otherwise return 0) * *****************************************************************************************************************/ u8 HTIOTActIsDisableMCS14(struct ieee80211_device* ieee, u8* PeerMacAddr) { u8 ret = 0; #if 0 // Apply for 819u only #if (HAL_CODE_BASE==RTL8192 && DEV_BUS_TYPE==USB_INTERFACE) if((memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3)==0) || (memcmp(PeerMacAddr, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0) ) { ret = 1; } if(pHTInfo->bCurrentRT2RTAggregation) { // The parameter of pHTInfo->bCurrentRT2RTAggregation must be decided previously ret = 1; } #endif #endif return ret; } u8 HTIOTActIsForcedCTS2Self(struct ieee80211_device *ieee, struct ieee80211_network *network) { u8 retValue = 0; //if(network->marvell_cap_exist) if(ieee->pHTInfo->IOTPeer == HT_IOT_PEER_MARVELL) { retValue = 1; } return retValue; } /** * Function: HTIOTActIsDisableMCS15 * * Overview: Check whether driver should declare capability of receving MCS15 * * Input: * PADAPTER Adapter, * * Output: None * Return: true if driver should disable MCS15 * 2008.04.15 Emily */ bool HTIOTActIsDisableMCS15(struct ieee80211_device* ieee) { bool retValue = false; #ifdef TODO // Apply for 819u only #if (HAL_CODE_BASE==RTL8192) #if (DEV_BUS_TYPE == USB_INTERFACE) // Alway disable MCS15 by Jerry Chang's request.by Emily, 2008.04.15 retValue = true; #elif (DEV_BUS_TYPE == PCI_INTERFACE) // Enable MCS15 if the peer is Cisco AP. by Emily, 2008.05.12 // if(pBssDesc->bCiscoCapExist) // retValue = false; // else retValue = false; #endif #endif #endif // Jerry Chang suggest that 8190 1x2 does not need to disable MCS15 return retValue; } /** * Function: HTIOTActIsDisableMCSTwoSpatialStream * * Overview: Check whether driver should declare capability of receving All 2 ss packets * * Input: * PADAPTER Adapter, * * Output: None * Return: true if driver should disable all two spatial stream packet * 2008.04.21 Emily */ bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device* ieee, u8 *PeerMacAddr) { bool retValue = false; #ifdef TODO // Apply for 819u only //#if (HAL_CODE_BASE==RTL8192) //This rule only apply to Belkin(Ralink) AP if(IS_UNDER_11N_AES_MODE(Adapter)) { if((PlatformCompareMemory(PeerMacAddr, BELKINF5D8233V1_RALINK, 3)==0) || (PlatformCompareMemory(PeerMacAddr, PCI_RALINK, 3)==0) || (PlatformCompareMemory(PeerMacAddr, EDIMAX_RALINK, 3)==0)) { //Set True to disable this function. Disable by default, Emily, 2008.04.23 retValue = false; } } //#endif #endif return retValue; } /******************************************************************************************************************** *function: Check whether driver should disable EDCA turbo mode * input: struct ieee80211_device* ieee * u8* PeerMacAddr * output: none * return: return 1 if driver should disable EDCA turbo mode(otherwise return 0) * *****************************************************************************************************************/ u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device* ieee, u8* PeerMacAddr) { u8 retValue = false; // default enable EDCA Turbo mode. // Set specific EDCA parameter for different AP in DM handler. return retValue; #if 0 if((memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3)==0)|| (memcmp(PeerMacAddr, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)|| (memcmp(PeerMacAddr, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)|| (memcmp(PeerMacAddr, NETGEAR834Bv2_BROADCOM, 3)==0)) { retValue = 1; //Linksys disable EDCA turbo mode } return retValue; #endif } /******************************************************************************************************************** *function: Check whether we need to use OFDM to sned MGNT frame for broadcom AP * input: struct ieee80211_network *network //current network we live * output: none * return: return 1 if true * *****************************************************************************************************************/ u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network) { u8 retValue = 0; // 2008/01/25 MH Judeg if we need to use OFDM to sned MGNT frame for broadcom AP. // 2008/01/28 MH We must prevent that we select null bssid to link. if(network->broadcom_cap_exist) { retValue = 1; } return retValue; } u8 HTIOTActIsCCDFsync(u8* PeerMacAddr) { u8 retValue = 0; if( (memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3)==0) || (memcmp(PeerMacAddr, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0) || (memcmp(PeerMacAddr, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) ==0)) { retValue = 1; } return retValue; } // // Send null data for to tell AP that we are awake. // bool HTIOTActIsNullDataPowerSaving(struct ieee80211_device* ieee,struct ieee80211_network *network) { bool retValue = false; PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; { if(pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) // ||(pBssDesc->Vender == HT_IOT_PEER_ATHEROS && pBssDesc->SubTypeOfVender == HT_IOT_PEER_ATHEROS_DIR635)) return true; } return retValue; } void HTResetIOTSetting( PRT_HIGH_THROUGHPUT pHTInfo ) { pHTInfo->IOTAction = 0; pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN; } /******************************************************************************************************************** *function: Construct Capablility Element in Beacon... if HTEnable is turned on * input: struct ieee80211_device* ieee * u8* posHTCap //pointer to store Capability Ele * u8* len //store length of CE * u8 IsEncrypt //whether encrypt, needed further * output: none * return: none * notice: posHTCap can't be null and should be initialized before. * *****************************************************************************************************************/ void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u8* len, u8 IsEncrypt) { PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo; PHT_CAPABILITY_ELE pCapELE = NULL; //u8 bIsDeclareMCS13; if ((posHTCap == NULL) || (pHT == NULL)) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "posHTCap or pHTInfo can't be null in HTConstructCapabilityElement()\n"); return; } memset(posHTCap, 0, *len); if(pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC) { u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap)); pCapELE = (PHT_CAPABILITY_ELE)&(posHTCap[4]); }else { pCapELE = (PHT_CAPABILITY_ELE)posHTCap; } //HT capability info pCapELE->AdvCoding = 0; // This feature is not supported now!! if(ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) { pCapELE->ChlWidth = 0; } else { pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0); } // pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0); pCapELE->MimoPwrSave = pHT->SelfMimoPs; pCapELE->GreenField = 0; // This feature is not supported now!! pCapELE->ShortGI20Mhz = 1; // We can receive Short GI!! pCapELE->ShortGI40Mhz = 1; // We can receive Short GI!! //DbgPrint("TX HT cap/info ele BW=%d SG20=%d SG40=%d\n\r", //pCapELE->ChlWidth, pCapELE->ShortGI20Mhz, pCapELE->ShortGI40Mhz); pCapELE->TxSTBC = 1; pCapELE->RxSTBC = 0; pCapELE->DelayBA = 0; // Do not support now!! pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE>=7935)?1:0; pCapELE->DssCCk = ((pHT->bRegBW40MHz)?(pHT->bRegSuppCCK?1:0):0); pCapELE->PSMP = 0; // Do not support now!! pCapELE->LSigTxopProtect = 0; // Do not support now!! //MAC HT parameters info // TODO: Nedd to take care of this part IEEE80211_DEBUG(IEEE80211_DL_HT, "TX HT cap/info ele BW=%d MaxAMSDUSize:%d DssCCk:%d\n", pCapELE->ChlWidth, pCapELE->MaxAMSDUSize, pCapELE->DssCCk); if( IsEncrypt) { pCapELE->MPDUDensity = 7; // 8us pCapELE->MaxRxAMPDUFactor = 2; // 2 is for 32 K and 3 is 64K } else { pCapELE->MaxRxAMPDUFactor = 3; // 2 is for 32 K and 3 is 64K pCapELE->MPDUDensity = 0; // no density } //Supported MCS set memcpy(pCapELE->MCS, ieee->Regdot11HTOperationalRateSet, 16); if(pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS15) pCapELE->MCS[1] &= 0x7f; if(pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS14) pCapELE->MCS[1] &= 0xbf; if(pHT->IOTAction & HT_IOT_ACT_DISABLE_ALL_2SS) pCapELE->MCS[1] &= 0x00; // 2008.06.12 // For RTL819X, if pairwisekey = wep/tkip, ap is ralink, we support only MCS0~7. if(ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) { int i; for(i = 1; i< 16; i++) pCapELE->MCS[i] = 0; } //Extended HT Capability Info memset(&pCapELE->ExtHTCapInfo, 0, 2); //TXBF Capabilities memset(pCapELE->TxBFCap, 0, 4); //Antenna Selection Capabilities pCapELE->ASCap = 0; //add 2 to give space for element ID and len when construct frames if(pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC) *len = 30 + 2; else *len = 26 + 2; // IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_HT, posHTCap, *len -2); //Print each field in detail. Driver should not print out this message by default // HTDebugHTCapability(posHTCap, (u8*)"HTConstructCapability()"); return; } /******************************************************************************************************************** *function: Construct Information Element in Beacon... if HTEnable is turned on * input: struct ieee80211_device* ieee * u8* posHTCap //pointer to store Information Ele * u8* len //store len of * u8 IsEncrypt //whether encrypt, needed further * output: none * return: none * notice: posHTCap can't be null and be initialized before. only AP and IBSS sta should do this * *****************************************************************************************************************/ void HTConstructInfoElement(struct ieee80211_device* ieee, u8* posHTInfo, u8* len, u8 IsEncrypt) { PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo; PHT_INFORMATION_ELE pHTInfoEle = (PHT_INFORMATION_ELE)posHTInfo; if ((posHTInfo == NULL) || (pHTInfoEle == NULL)) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "posHTInfo or pHTInfoEle can't be null in HTConstructInfoElement()\n"); return; } memset(posHTInfo, 0, *len); if ( (ieee->iw_mode == IW_MODE_ADHOC) || (ieee->iw_mode == IW_MODE_MASTER)) //ap mode is not currently supported { pHTInfoEle->ControlChl = ieee->current_network.channel; pHTInfoEle->ExtChlOffset = ((pHT->bRegBW40MHz == false)?HT_EXTCHNL_OFFSET_NO_EXT: (ieee->current_network.channel<=6)? HT_EXTCHNL_OFFSET_UPPER:HT_EXTCHNL_OFFSET_LOWER); pHTInfoEle->RecommemdedTxWidth = pHT->bRegBW40MHz; pHTInfoEle->RIFS = 0; pHTInfoEle->PSMPAccessOnly = 0; pHTInfoEle->SrvIntGranularity = 0; pHTInfoEle->OptMode = pHT->CurrentOpMode; pHTInfoEle->NonGFDevPresent = 0; pHTInfoEle->DualBeacon = 0; pHTInfoEle->SecondaryBeacon = 0; pHTInfoEle->LSigTxopProtectFull = 0; pHTInfoEle->PcoActive = 0; pHTInfoEle->PcoPhase = 0; memset(pHTInfoEle->BasicMSC, 0, 16); *len = 22 + 2; //same above } else { //STA should not generate High Throughput Information Element *len = 0; } //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_HT, posHTInfo, *len - 2); //HTDebugHTInfo(posHTInfo, "HTConstructInforElement"); return; } /* * According to experiment, Realtek AP to STA (based on rtl8190) may achieve best performance * if both STA and AP set limitation of aggregation size to 32K, that is, set AMPDU density to 2 * (Ref: IEEE 11n specification). However, if Realtek STA associates to other AP, STA should set * limitation of aggregation size to 8K, otherwise, performance of traffic stream from STA to AP * will be much less than the traffic stream from AP to STA if both of the stream runs concurrently * at the same time. * * Frame Format * Element ID Length OUI Type1 Reserved * 1 byte 1 byte 3 bytes 1 byte 1 byte * * OUI = 0x00, 0xe0, 0x4c, * Type = 0x02 * Reserved = 0x00 * * 2007.8.21 by Emily */ /******************************************************************************************************************** *function: Construct Information Element in Beacon... in RT2RT condition * input: struct ieee80211_device* ieee * u8* posRT2RTAgg //pointer to store Information Ele * u8* len //store len * output: none * return: none * notice: * *****************************************************************************************************************/ void HTConstructRT2RTAggElement(struct ieee80211_device* ieee, u8* posRT2RTAgg, u8* len) { if (posRT2RTAgg == NULL) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "posRT2RTAgg can't be null in HTConstructRT2RTAggElement()\n"); return; } memset(posRT2RTAgg, 0, *len); *posRT2RTAgg++ = 0x00; *posRT2RTAgg++ = 0xe0; *posRT2RTAgg++ = 0x4c; *posRT2RTAgg++ = 0x02; *posRT2RTAgg++ = 0x01; *posRT2RTAgg = 0x10;//*posRT2RTAgg = 0x02; if(ieee->bSupportRemoteWakeUp) { *posRT2RTAgg |= 0x08;//RT_HT_CAP_USE_WOW; } *len = 6 + 2; return; #ifdef TODO #if(HAL_CODE_BASE == RTL8192 && DEV_BUS_TYPE == USB_INTERFACE) /* //Emily. If it is required to Ask Realtek AP to send AMPDU during AES mode, enable this section of code. if(IS_UNDER_11N_AES_MODE(Adapter)) { posRT2RTAgg->Octet[5] |=RT_HT_CAP_USE_AMPDU; }else { posRT2RTAgg->Octet[5] &= 0xfb; } */ #else // Do Nothing #endif posRT2RTAgg->Length = 6; #endif } /******************************************************************************************************************** *function: Pick the right Rate Adaptive table to use * input: struct ieee80211_device* ieee * u8* pOperateMCS //A pointer to MCS rate bitmap * return: always we return true * notice: * *****************************************************************************************************************/ u8 HT_PickMCSRate(struct ieee80211_device* ieee, u8* pOperateMCS) { u8 i; if (pOperateMCS == NULL) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "pOperateMCS can't be null in HT_PickMCSRate()\n"); return false; } switch(ieee->mode) { case IEEE_A: case IEEE_B: case IEEE_G: //legacy rate routine handled at selectedrate //no MCS rate for(i=0;i<=15;i++){ pOperateMCS[i] = 0; } break; case IEEE_N_24G: //assume CCK rate ok case IEEE_N_5G: // Legacy part we only use 6, 5.5,2,1 for N_24G and 6 for N_5G. // Legacy part shall be handled at SelectRateSet(). //HT part // TODO: may be different if we have different number of antenna pOperateMCS[0] &=RATE_ADPT_1SS_MASK; //support MCS 0~7 pOperateMCS[1] &=RATE_ADPT_2SS_MASK; pOperateMCS[3] &=RATE_ADPT_MCS32_MASK; break; //should never reach here default: break; } return true; } /* * Description: * This function will get the highest speed rate in input MCS set. * * /param Adapter Pionter to Adapter entity * pMCSRateSet Pointer to MCS rate bitmap * pMCSFilter Pointer to MCS rate filter * * /return Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter. * */ /******************************************************************************************************************** *function: This function will get the highest speed rate in input MCS set. * input: struct ieee80211_device* ieee * u8* pMCSRateSet //Pointer to MCS rate bitmap * u8* pMCSFilter //Pointer to MCS rate filter * return: Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter * notice: * *****************************************************************************************************************/ u8 HTGetHighestMCSRate(struct ieee80211_device* ieee, u8* pMCSRateSet, u8* pMCSFilter) { u8 i, j; u8 bitMap; u8 mcsRate = 0; u8 availableMcsRate[16]; if (pMCSRateSet == NULL || pMCSFilter == NULL) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "pMCSRateSet or pMCSFilter can't be null in HTGetHighestMCSRate()\n"); return false; } for(i=0; i<16; i++) availableMcsRate[i] = pMCSRateSet[i] & pMCSFilter[i]; for(i = 0; i < 16; i++) { if(availableMcsRate[i] != 0) break; } if(i == 16) return false; for(i = 0; i < 16; i++) { if(availableMcsRate[i] != 0) { bitMap = availableMcsRate[i]; for(j = 0; j < 8; j++) { if((bitMap%2) != 0) { if(HTMcsToDataRate(ieee, (8*i+j)) > HTMcsToDataRate(ieee, mcsRate)) mcsRate = (8*i+j); } bitMap = bitMap>>1; } } } return (mcsRate|0x80); } /* ** **1.Filter our operation rate set with AP's rate set **2.shall reference channel bandwidth, STBC, Antenna number **3.generate rate adative table for firmware **David 20060906 ** ** \pHTSupportedCap: the connected STA's supported rate Capability element */ u8 HTFilterMCSRate( struct ieee80211_device* ieee, u8* pSupportMCS, u8* pOperateMCS) { u8 i=0; // filter out operational rate set not supported by AP, the lenth of it is 16 for(i=0;i<=15;i++){ pOperateMCS[i] = ieee->Regdot11HTOperationalRateSet[i]&pSupportMCS[i]; } // TODO: adjust our operational rate set according to our channel bandwidth, STBC and Antenna number // TODO: fill suggested rate adaptive rate index and give firmware info using Tx command packet // we also shall suggested the first start rate set according to our singal strength HT_PickMCSRate(ieee, pOperateMCS); // For RTL819X, if pairwisekey = wep/tkip, we support only MCS0~7. if(ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) pOperateMCS[1] = 0; // // For RTL819X, we support only MCS0~15. // And also, we do not know how to use MCS32 now. // for(i=2; i<=15; i++) pOperateMCS[i] = 0; return true; } void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset); #if 0 //I need move this function to other places, such as rx? #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) void HTOnAssocRsp_wq(struct work_struct *work) { struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, ht_onAssRsp); #else void HTOnAssocRsp_wq(struct ieee80211_device *ieee) { #endif #endif void HTOnAssocRsp(struct ieee80211_device *ieee) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; PHT_CAPABILITY_ELE pPeerHTCap = NULL; PHT_INFORMATION_ELE pPeerHTInfo = NULL; u16 nMaxAMSDUSize = 0; u8* pMcsFilter = NULL; static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily if( pHTInfo->bCurrentHTSupport == false ) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "<=== HTOnAssocRsp(): HT_DISABLE\n"); return; } IEEE80211_DEBUG(IEEE80211_DL_HT, "===> HTOnAssocRsp_wq(): HT_ENABLE\n"); // IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, pHTInfo->PeerHTCapBuf, sizeof(HT_CAPABILITY_ELE)); // IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, pHTInfo->PeerHTInfoBuf, sizeof(HT_INFORMATION_ELE)); // HTDebugHTCapability(pHTInfo->PeerHTCapBuf,"HTOnAssocRsp_wq"); // HTDebugHTInfo(pHTInfo->PeerHTInfoBuf,"HTOnAssocRsp_wq"); // if(!memcmp(pHTInfo->PeerHTCapBuf,EWC11NHTCap, sizeof(EWC11NHTCap))) pPeerHTCap = (PHT_CAPABILITY_ELE)(&pHTInfo->PeerHTCapBuf[4]); else pPeerHTCap = (PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf); if(!memcmp(pHTInfo->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo))) pPeerHTInfo = (PHT_INFORMATION_ELE)(&pHTInfo->PeerHTInfoBuf[4]); else pPeerHTInfo = (PHT_INFORMATION_ELE)(pHTInfo->PeerHTInfoBuf); //////////////////////////////////////////////////////// // Configurations: //////////////////////////////////////////////////////// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_HT, pPeerHTCap, sizeof(HT_CAPABILITY_ELE)); // IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_HT, pPeerHTInfo, sizeof(HT_INFORMATION_ELE)); // Config Supported Channel Width setting // HTSetConnectBwMode(ieee, (HT_CHANNEL_WIDTH)(pPeerHTCap->ChlWidth), (HT_EXTCHNL_OFFSET)(pPeerHTInfo->ExtChlOffset)); // if(pHTInfo->bCurBW40MHz == true) pHTInfo->bCurTxBW40MHz = ((pPeerHTInfo->RecommemdedTxWidth == 1)?true:false); // // Update short GI/ long GI setting // // TODO: pHTInfo->bCurShortGI20MHz= ((pHTInfo->bRegShortGI20MHz)?((pPeerHTCap->ShortGI20Mhz==1)?true:false):false); pHTInfo->bCurShortGI40MHz= ((pHTInfo->bRegShortGI40MHz)?((pPeerHTCap->ShortGI40Mhz==1)?true:false):false); // // Config TX STBC setting // // TODO: // // Config DSSS/CCK mode in 40MHz mode // // TODO: pHTInfo->bCurSuppCCK = ((pHTInfo->bRegSuppCCK)?((pPeerHTCap->DssCCk==1)?true:false):false); // // Config and configure A-MSDU setting // pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support; if (ieee->rtllib_ap_sec_type && (ieee->rtllib_ap_sec_type(ieee)&(SEC_ALG_WEP|SEC_ALG_TKIP))){ if( (pHTInfo->IOTPeer== HT_IOT_PEER_ATHEROS) || (pHTInfo->IOTPeer == HT_IOT_PEER_UNKNOWN) ) pHTInfo->bCurrentAMPDUEnable = false; } nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize==0)?3839:7935; if(pHTInfo->nAMSDU_MaxSize > nMaxAMSDUSize ) pHTInfo->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize; else pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize; // // Config A-MPDU setting // pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable; // <1> Decide AMPDU Factor // By Emily if(!pHTInfo->bRegRT2RTAggregation) { // Decide AMPDU Factor according to protocol handshake if(pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor) pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor; else pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor; }else { // Set MPDU density to 2 to Realtek AP, and set it to 0 for others // Replace MPDU factor declared in original association response frame format. 2007.08.20 by Emily #if 0 osTmp= PacketGetElement( asocpdu, EID_Vendor, OUI_SUB_REALTEK_AGG, OUI_SUBTYPE_DONT_CARE); if(osTmp.Length >= 5) //00:e0:4c:02:00 #endif if (ieee->current_network.bssht.bdRT2RTAggregation) { if( ieee->pairwise_key_type != KEY_TYPE_NA) // Realtek may set 32k in security mode and 64k for others pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor; else pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_64K; }else { if(pPeerHTCap->MaxRxAMPDUFactor < HT_AGG_SIZE_32K) pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor; else pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_32K; } } // <2> Set AMPDU Minimum MPDU Start Spacing // 802.11n 3.0 section 9.7d.3 #if 1 if(pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity) pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density; else pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity; if(ieee->pairwise_key_type != KEY_TYPE_NA ) pHTInfo->CurrentMPDUDensity = 7; // 8us #else if(pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity) pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density; else pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity; #endif // Force TX AMSDU // Lanhsin: mark for tmp to avoid deauth by ap from s3 //if(memcmp(pMgntInfo->Bssid, NETGEAR834Bv2_BROADCOM, 3)==0) if(0) { pHTInfo->bCurrentAMPDUEnable = false; pHTInfo->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE; pHTInfo->ForcedAMSDUMaxSize = 7935; pHTInfo->IOTAction |= HT_IOT_ACT_TX_USE_AMSDU_8K; } // Rx Reorder Setting pHTInfo->bCurRxReorderEnable = pHTInfo->bRegRxReorderEnable; // // Filter out unsupported HT rate for this AP // Update RATR table // This is only for 8190 ,8192 or later product which using firmware to handle rate adaptive mechanism. // // Handle Ralink AP bad MCS rate set condition. Joseph. // This fix the bug of Ralink AP. This may be removed in the future. if(pPeerHTCap->MCS[0] == 0) pPeerHTCap->MCS[0] = 0xff; HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11HTOperationalRateSet); // // Config MIMO Power Save setting // pHTInfo->PeerMimoPs = pPeerHTCap->MimoPwrSave; if(pHTInfo->PeerMimoPs == MIMO_PS_STATIC) pMcsFilter = MCS_FILTER_1SS; else pMcsFilter = MCS_FILTER_ALL; //WB add for MCS8 bug // pMcsFilter = MCS_FILTER_1SS; ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, pMcsFilter); ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate; // // Config current operation mode. // pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode; } void HTSetConnectBwModeCallback(struct ieee80211_device* ieee); /******************************************************************************************************************** *function: initialize HT info(struct PRT_HIGH_THROUGHPUT) * input: struct ieee80211_device* ieee * output: none * return: none * notice: This function is called when * (1) MPInitialization Phase * (2) Receiving of Deauthentication from AP ********************************************************************************************************************/ // TODO: Should this funciton be called when receiving of Disassociation? void HTInitializeHTInfo(struct ieee80211_device* ieee) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; // // These parameters will be reset when receiving deauthentication packet // IEEE80211_DEBUG(IEEE80211_DL_HT, "===========>%s()\n", __FUNCTION__); pHTInfo->bCurrentHTSupport = false; // 40MHz channel support pHTInfo->bCurBW40MHz = false; pHTInfo->bCurTxBW40MHz = false; // Short GI support pHTInfo->bCurShortGI20MHz = false; pHTInfo->bCurShortGI40MHz = false; pHTInfo->bForcedShortGI = false; // CCK rate support // This flag is set to true to support CCK rate by default. // It will be affected by "pHTInfo->bRegSuppCCK" and AP capabilities only when associate to // 11N BSS. pHTInfo->bCurSuppCCK = true; // AMSDU related pHTInfo->bCurrent_AMSDU_Support = false; pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize; // AMPUD related pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density; pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor; // Initialize all of the parameters related to 11n memset((void*)(&(pHTInfo->SelfHTCap)), 0, sizeof(pHTInfo->SelfHTCap)); memset((void*)(&(pHTInfo->SelfHTInfo)), 0, sizeof(pHTInfo->SelfHTInfo)); memset((void*)(&(pHTInfo->PeerHTCapBuf)), 0, sizeof(pHTInfo->PeerHTCapBuf)); memset((void*)(&(pHTInfo->PeerHTInfoBuf)), 0, sizeof(pHTInfo->PeerHTInfoBuf)); pHTInfo->bSwBwInProgress = false; pHTInfo->ChnlOp = CHNLOP_NONE; // Set default IEEE spec for Draft N pHTInfo->ePeerHTSpecVer = HT_SPEC_VER_IEEE; // Realtek proprietary aggregation mode pHTInfo->bCurrentRT2RTAggregation = false; pHTInfo->bCurrentRT2RTLongSlotTime = false; pHTInfo->IOTPeer = 0; pHTInfo->IOTAction = 0; //MCS rate initialized here { u8* RegHTSuppRateSets = &(ieee->RegHTSuppRateSet[0]); RegHTSuppRateSets[0] = 0xFF; //support MCS 0~7 RegHTSuppRateSets[1] = 0xFF; //support MCS 8~15 RegHTSuppRateSets[4] = 0x01; //support MCS 32 } } /******************************************************************************************************************** *function: initialize Bss HT structure(struct PBSS_HT) * input: PBSS_HT pBssHT //to be initialized * output: none * return: none * notice: This function is called when initialize network structure ********************************************************************************************************************/ void HTInitializeBssDesc(PBSS_HT pBssHT) { pBssHT->bdSupportHT = false; memset(pBssHT->bdHTCapBuf, 0, sizeof(pBssHT->bdHTCapBuf)); pBssHT->bdHTCapLen = 0; memset(pBssHT->bdHTInfoBuf, 0, sizeof(pBssHT->bdHTInfoBuf)); pBssHT->bdHTInfoLen = 0; pBssHT->bdHTSpecVer= HT_SPEC_VER_IEEE; pBssHT->bdRT2RTAggregation = false; pBssHT->bdRT2RTLongSlotTime = false; } #if 0 //below function has merged into ieee80211_network_init() in ieee80211_rx.c void HTParsingHTCapElement( IN PADAPTER Adapter, IN OCTET_STRING HTCapIE, OUT PRT_WLAN_BSS pBssDesc ) { PMGNT_INFO pMgntInfo = &Adapter->MgntInfo; if( HTCapIE.Length > sizeof(pBssDesc->BssHT.bdHTCapBuf) ) { RT_TRACE( COMP_HT, DBG_LOUD, ("HTParsingHTCapElement(): HT Capability Element length is too long!\n") ); return; } // TODO: Check the correctness of HT Cap //Print each field in detail. Driver should not print out this message by default if(!pMgntInfo->mActingAsAp && !pMgntInfo->mAssoc) HTDebugHTCapability(DBG_TRACE, Adapter, &HTCapIE, (pu8)"HTParsingHTCapElement()"); HTCapIE.Length = HTCapIE.Length > sizeof(pBssDesc->BssHT.bdHTCapBuf)?\ sizeof(pBssDesc->BssHT.bdHTCapBuf):HTCapIE.Length; //prevent from overflow CopyMem(pBssDesc->BssHT.bdHTCapBuf, HTCapIE.Octet, HTCapIE.Length); pBssDesc->BssHT.bdHTCapLen = HTCapIE.Length; } void HTParsingHTInfoElement( PADAPTER Adapter, OCTET_STRING HTInfoIE, PRT_WLAN_BSS pBssDesc ) { PMGNT_INFO pMgntInfo = &Adapter->MgntInfo; if( HTInfoIE.Length > sizeof(pBssDesc->BssHT.bdHTInfoBuf)) { RT_TRACE( COMP_HT, DBG_LOUD, ("HTParsingHTInfoElement(): HT Information Element length is too long!\n") ); return; } // TODO: Check the correctness of HT Info //Print each field in detail. Driver should not print out this message by default if(!pMgntInfo->mActingAsAp && !pMgntInfo->mAssoc) HTDebugHTInfo(DBG_TRACE, Adapter, &HTInfoIE, (pu8)"HTParsingHTInfoElement()"); HTInfoIE.Length = HTInfoIE.Length > sizeof(pBssDesc->BssHT.bdHTInfoBuf)?\ sizeof(pBssDesc->BssHT.bdHTInfoBuf):HTInfoIE.Length; //prevent from overflow CopyMem( pBssDesc->BssHT.bdHTInfoBuf, HTInfoIE.Octet, HTInfoIE.Length); pBssDesc->BssHT.bdHTInfoLen = HTInfoIE.Length; } /* * Get HT related information from beacon and save it in BssDesc * * (1) Parse HTCap, and HTInfo, and record whether it is 11n AP * (2) If peer is HT, but not WMM, call QosSetLegacyWMMParamWithHT() * (3) Check whether peer is Realtek AP (for Realtek proprietary aggregation mode). * Input: * PADAPTER Adapter * * Output: * PRT_TCB BssDesc * */ void HTGetValueFromBeaconOrProbeRsp( PADAPTER Adapter, POCTET_STRING pSRCmmpdu, PRT_WLAN_BSS bssDesc ) { PMGNT_INFO pMgntInfo = &Adapter->MgntInfo; PRT_HIGH_THROUGHPUT pHTInfo = GET_HT_INFO(pMgntInfo); OCTET_STRING HTCapIE, HTInfoIE, HTRealtekAgg, mmpdu; OCTET_STRING BroadcomElement, CiscoElement; mmpdu.Octet = pSRCmmpdu->Octet; mmpdu.Length = pSRCmmpdu->Length; //2Note: // Mark for IOT testing using Linksys WRT350N, This AP does not contain WMM IE when // it is configured at pure-N mode. // if(bssDesc->BssQos.bdQoSMode & QOS_WMM) // HTInitializeBssDesc (&bssDesc->BssHT); //2<1> Parse HTCap, and HTInfo // Get HT Capability IE: (1) Get IEEE Draft N IE or (2) Get EWC IE HTCapIE = PacketGetElement(mmpdu, EID_HTCapability, OUI_SUB_DONT_CARE, OUI_SUBTYPE_DONT_CARE); if(HTCapIE.Length == 0) { HTCapIE = PacketGetElement(mmpdu, EID_Vendor, OUI_SUB_11N_EWC_HT_CAP, OUI_SUBTYPE_DONT_CARE); if(HTCapIE.Length != 0) bssDesc->BssHT.bdHTSpecVer= HT_SPEC_VER_EWC; } if(HTCapIE.Length != 0) HTParsingHTCapElement(Adapter, HTCapIE, bssDesc); // Get HT Information IE: (1) Get IEEE Draft N IE or (2) Get EWC IE HTInfoIE = PacketGetElement(mmpdu, EID_HTInfo, OUI_SUB_DONT_CARE, OUI_SUBTYPE_DONT_CARE); if(HTInfoIE.Length == 0) { HTInfoIE = PacketGetElement(mmpdu, EID_Vendor, OUI_SUB_11N_EWC_HT_INFO, OUI_SUBTYPE_DONT_CARE); if(HTInfoIE.Length != 0) bssDesc->BssHT.bdHTSpecVer = HT_SPEC_VER_EWC; } if(HTInfoIE.Length != 0) HTParsingHTInfoElement(Adapter, HTInfoIE, bssDesc); //2<2>If peer is HT, but not WMM, call QosSetLegacyWMMParamWithHT() if(HTCapIE.Length != 0) { bssDesc->BssHT.bdSupportHT = true; if(bssDesc->BssQos.bdQoSMode == QOS_DISABLE) QosSetLegacyWMMParamWithHT(Adapter, bssDesc); } else { bssDesc->BssHT.bdSupportHT = false; } //2<3>Check whether the peer is Realtek AP/STA if(pHTInfo->bRegRT2RTAggregation) { if(bssDesc->BssHT.bdSupportHT) { HTRealtekAgg = PacketGetElement(mmpdu, EID_Vendor, OUI_SUB_REALTEK_AGG, OUI_SUBTYPE_DONT_CARE); if(HTRealtekAgg.Length >=5 ) { bssDesc->BssHT.bdRT2RTAggregation = true; if((HTRealtekAgg.Octet[4]==1) && (HTRealtekAgg.Octet[5] & 0x02)) bssDesc->BssHT.bdRT2RTLongSlotTime = true; } } } // // 2008/01/25 MH Get Broadcom AP IE for manamgent frame CCK rate problem. // AP can not receive CCK managemtn from from 92E. // // Initialize every new bss broadcom cap exist as false.. bssDesc->bBroadcomCapExist= false; if(HTCapIE.Length != 0 || HTInfoIE.Length != 0) { u4Byte Length = 0; FillOctetString(BroadcomElement, NULL, 0); BroadcomElement = PacketGetElement( mmpdu, EID_Vendor, OUI_SUB_BROADCOM_IE_1, OUI_SUBTYPE_DONT_CARE); Length += BroadcomElement.Length; BroadcomElement = PacketGetElement( mmpdu, EID_Vendor, OUI_SUB_BROADCOM_IE_2, OUI_SUBTYPE_DONT_CARE); Length += BroadcomElement.Length; BroadcomElement = PacketGetElement( mmpdu, EID_Vendor, OUI_SUB_BROADCOM_IE_3, OUI_SUBTYPE_DONT_CARE); Length += BroadcomElement.Length; if(Length > 0) bssDesc->bBroadcomCapExist = true; } // For Cisco IOT issue CiscoElement = PacketGetElement( mmpdu, EID_Vendor, OUI_SUB_CISCO_IE, OUI_SUBTYPE_DONT_CARE); if(CiscoElement.Length != 0){ // 3: 0x00, 0x40, 0x96 .... bssDesc->bCiscoCapExist = true; }else{ bssDesc->bCiscoCapExist = false; } } #endif /******************************************************************************************************************** *function: initialize Bss HT structure(struct PBSS_HT) * input: struct ieee80211_device *ieee * struct ieee80211_network *pNetwork //usually current network we are live in * output: none * return: none * notice: This function should ONLY be called before association ********************************************************************************************************************/ void HTResetSelfAndSavePeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; // u16 nMaxAMSDUSize; // PHT_CAPABILITY_ELE pPeerHTCap = (PHT_CAPABILITY_ELE)pNetwork->bssht.bdHTCapBuf; // PHT_INFORMATION_ELE pPeerHTInfo = (PHT_INFORMATION_ELE)pNetwork->bssht.bdHTInfoBuf; // u8* pMcsFilter; u8 bIOTAction = 0; // // Save Peer Setting before Association // IEEE80211_DEBUG(IEEE80211_DL_HT, "==============>%s()\n", __FUNCTION__); /*unmark bEnableHT flag here is the same reason why unmarked in function ieee80211_softmac_new_net. WB 2008.09.10*/ // if( pHTInfo->bEnableHT && pNetwork->bssht.bdSupportHT) if (pNetwork->bssht.bdSupportHT) { pHTInfo->bCurrentHTSupport = true; pHTInfo->ePeerHTSpecVer = pNetwork->bssht.bdHTSpecVer; // Save HTCap and HTInfo information Element if(pNetwork->bssht.bdHTCapLen > 0 && pNetwork->bssht.bdHTCapLen <= sizeof(pHTInfo->PeerHTCapBuf)) memcpy(pHTInfo->PeerHTCapBuf, pNetwork->bssht.bdHTCapBuf, pNetwork->bssht.bdHTCapLen); if(pNetwork->bssht.bdHTInfoLen > 0 && pNetwork->bssht.bdHTInfoLen <= sizeof(pHTInfo->PeerHTInfoBuf)) memcpy(pHTInfo->PeerHTInfoBuf, pNetwork->bssht.bdHTInfoBuf, pNetwork->bssht.bdHTInfoLen); // Check whether RT to RT aggregation mode is enabled if(pHTInfo->bRegRT2RTAggregation) { pHTInfo->bCurrentRT2RTAggregation = pNetwork->bssht.bdRT2RTAggregation; pHTInfo->bCurrentRT2RTLongSlotTime = pNetwork->bssht.bdRT2RTLongSlotTime; } else { pHTInfo->bCurrentRT2RTAggregation = false; pHTInfo->bCurrentRT2RTLongSlotTime = false; } // Determine the IOT Peer Vendor. HTIOTPeerDetermine(ieee); // Decide IOT Action // Must be called after the parameter of pHTInfo->bCurrentRT2RTAggregation is decided pHTInfo->IOTAction = 0; bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS14; bIOTAction = HTIOTActIsForcedCTS2Self(ieee, pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_CTS2SELF; bIOTAction = HTIOTActIsDisableMCS15(ieee); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS15; bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee, pNetwork->bssid); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_ALL_2SS; bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_EDCA_TURBO; bIOTAction = HTIOTActIsMgntUseCCK6M(pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_MGNT_USE_CCK_6M; bIOTAction = HTIOTActIsCCDFsync(pNetwork->bssid); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_CDD_FSYNC; bIOTAction = HTIOTActIsNullDataPowerSaving(ieee, pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_NULL_DATA_POWER_SAVING; } else { pHTInfo->bCurrentHTSupport = false; pHTInfo->bCurrentRT2RTAggregation = false; pHTInfo->bCurrentRT2RTLongSlotTime = false; pHTInfo->IOTAction = 0; } } void HTUpdateSelfAndPeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; // PHT_CAPABILITY_ELE pPeerHTCap = (PHT_CAPABILITY_ELE)pNetwork->bssht.bdHTCapBuf; PHT_INFORMATION_ELE pPeerHTInfo = (PHT_INFORMATION_ELE)pNetwork->bssht.bdHTInfoBuf; if(pHTInfo->bCurrentHTSupport) { // // Config current operation mode. // if(pNetwork->bssht.bdHTInfoLen != 0) pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode; // // <TODO: Config according to OBSS non-HT STA present!!> // } } void HTUseDefaultSetting(struct ieee80211_device* ieee) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; // u8 regBwOpMode; if(pHTInfo->bEnableHT) { pHTInfo->bCurrentHTSupport = true; pHTInfo->bCurSuppCCK = pHTInfo->bRegSuppCCK; pHTInfo->bCurBW40MHz = pHTInfo->bRegBW40MHz; pHTInfo->bCurShortGI20MHz= pHTInfo->bRegShortGI20MHz; pHTInfo->bCurShortGI40MHz= pHTInfo->bRegShortGI40MHz; pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support; pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize; pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable; pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor; pHTInfo->CurrentMPDUDensity = pHTInfo->CurrentMPDUDensity; // Set BWOpMode register //update RATR index0 HTFilterMCSRate(ieee, ieee->Regdot11HTOperationalRateSet, ieee->dot11HTOperationalRateSet); //function below is not implemented at all. WB #ifdef TODO Adapter->HalFunc.InitHalRATRTableHandler( Adapter, &pMgntInfo->dot11OperationalRateSet, pMgntInfo->dot11HTOperationalRateSet); #endif ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, MCS_FILTER_ALL); ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate; } else { pHTInfo->bCurrentHTSupport = false; } return; } /******************************************************************************************************************** *function: check whether HT control field exists * input: struct ieee80211_device *ieee * u8* pFrame //coming skb->data * output: none * return: return true if HT control field exists(false otherwise) * notice: ********************************************************************************************************************/ u8 HTCCheck(struct ieee80211_device* ieee, u8* pFrame) { if(ieee->pHTInfo->bCurrentHTSupport) { if( (IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) { IEEE80211_DEBUG(IEEE80211_DL_HT, "HT CONTROL FILED EXIST!!\n"); return true; } } return false; } // // This function set bandwidth mode in protocol layer. // void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; // u32 flags = 0; if(pHTInfo->bRegBW40MHz == false) return; // To reduce dummy operation // if((pHTInfo->bCurBW40MHz==false && Bandwidth==HT_CHANNEL_WIDTH_20) || // (pHTInfo->bCurBW40MHz==true && Bandwidth==HT_CHANNEL_WIDTH_20_40 && Offset==pHTInfo->CurSTAExtChnlOffset)) // return; // spin_lock_irqsave(&(ieee->bw_spinlock), flags); if(pHTInfo->bSwBwInProgress) { // spin_unlock_irqrestore(&(ieee->bw_spinlock), flags); return; } //if in half N mode, set to 20M bandwidth please 09.08.2008 WB. if(Bandwidth==HT_CHANNEL_WIDTH_20_40 && (!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))) { // Handle Illegal extention channel offset!! if(ieee->current_network.channel<2 && Offset==HT_EXTCHNL_OFFSET_LOWER) Offset = HT_EXTCHNL_OFFSET_NO_EXT; if(Offset==HT_EXTCHNL_OFFSET_UPPER || Offset==HT_EXTCHNL_OFFSET_LOWER) { pHTInfo->bCurBW40MHz = true; pHTInfo->CurSTAExtChnlOffset = Offset; } else { pHTInfo->bCurBW40MHz = false; pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT; } } else { pHTInfo->bCurBW40MHz = false; pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT; } pHTInfo->bSwBwInProgress = true; // TODO: 2007.7.13 by Emily Wait 2000ms in order to garantee that switching // bandwidth is executed after scan is finished. It is a temporal solution // because software should ganrantee the last operation of switching bandwidth // is executed properlly. HTSetConnectBwModeCallback(ieee); // spin_unlock_irqrestore(&(ieee->bw_spinlock), flags); } void HTSetConnectBwModeCallback(struct ieee80211_device* ieee) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; IEEE80211_DEBUG(IEEE80211_DL_HT, "======>%s()\n", __FUNCTION__); if(pHTInfo->bCurBW40MHz) { if(pHTInfo->CurSTAExtChnlOffset==HT_EXTCHNL_OFFSET_UPPER) ieee->set_chan(ieee->dev, ieee->current_network.channel+2); else if(pHTInfo->CurSTAExtChnlOffset==HT_EXTCHNL_OFFSET_LOWER) ieee->set_chan(ieee->dev, ieee->current_network.channel-2); else ieee->set_chan(ieee->dev, ieee->current_network.channel); ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20_40, pHTInfo->CurSTAExtChnlOffset); } else { ieee->set_chan(ieee->dev, ieee->current_network.channel); ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); } pHTInfo->bSwBwInProgress = false; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) //EXPORT_SYMBOL_NOVERS(HTUpdateSelfAndPeerSetting); #else //EXPORT_SYMBOL(HTUpdateSelfAndPeerSetting); #endif
gpl-2.0
faux123/pyramid-2.6.35_sense
drivers/media/dvb/firewire/firedtv-fw.c
807
7915
/* * FireDTV driver -- firewire I/O backend */ #include <linux/device.h> #include <linux/errno.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/types.h> #include <asm/page.h> #include <dvb_demux.h> #include "firedtv.h" static LIST_HEAD(node_list); static DEFINE_SPINLOCK(node_list_lock); static inline struct fw_device *device_of(struct firedtv *fdtv) { return fw_device(fdtv->device->parent); } static int node_req(struct firedtv *fdtv, u64 addr, void *data, size_t len, int tcode) { struct fw_device *device = device_of(fdtv); int rcode, generation = device->generation; smp_rmb(); /* node_id vs. generation */ rcode = fw_run_transaction(device->card, tcode, device->node_id, generation, device->max_speed, addr, data, len); return rcode != RCODE_COMPLETE ? -EIO : 0; } static int node_lock(struct firedtv *fdtv, u64 addr, void *data) { return node_req(fdtv, addr, data, 8, TCODE_LOCK_COMPARE_SWAP); } static int node_read(struct firedtv *fdtv, u64 addr, void *data) { return node_req(fdtv, addr, data, 4, TCODE_READ_QUADLET_REQUEST); } static int node_write(struct firedtv *fdtv, u64 addr, void *data, size_t len) { return node_req(fdtv, addr, data, len, TCODE_WRITE_BLOCK_REQUEST); } #define ISO_HEADER_SIZE 4 #define CIP_HEADER_SIZE 8 #define MPEG2_TS_HEADER_SIZE 4 #define MPEG2_TS_SOURCE_PACKET_SIZE (4 + 188) #define MAX_PACKET_SIZE 1024 /* 776, rounded up to 2^n */ #define PACKETS_PER_PAGE (PAGE_SIZE / MAX_PACKET_SIZE) #define N_PACKETS 64 /* buffer size */ #define N_PAGES DIV_ROUND_UP(N_PACKETS, PACKETS_PER_PAGE) #define IRQ_INTERVAL 16 struct firedtv_receive_context { struct fw_iso_context *context; struct fw_iso_buffer buffer; int interrupt_packet; int current_packet; char *pages[N_PAGES]; }; static int queue_iso(struct firedtv_receive_context *ctx, int index) { struct fw_iso_packet p; p.payload_length = MAX_PACKET_SIZE; p.interrupt = !(++ctx->interrupt_packet & (IRQ_INTERVAL - 1)); p.skip = 0; p.header_length = ISO_HEADER_SIZE; return fw_iso_context_queue(ctx->context, &p, &ctx->buffer, index * MAX_PACKET_SIZE); } static void handle_iso(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data) { struct firedtv *fdtv = data; struct firedtv_receive_context *ctx = fdtv->backend_data; __be32 *h, *h_end; int length, err, i = ctx->current_packet; char *p, *p_end; for (h = header, h_end = h + header_length / 4; h < h_end; h++) { length = be32_to_cpup(h) >> 16; if (unlikely(length > MAX_PACKET_SIZE)) { dev_err(fdtv->device, "length = %d\n", length); length = MAX_PACKET_SIZE; } p = ctx->pages[i / PACKETS_PER_PAGE] + (i % PACKETS_PER_PAGE) * MAX_PACKET_SIZE; p_end = p + length; for (p += CIP_HEADER_SIZE + MPEG2_TS_HEADER_SIZE; p < p_end; p += MPEG2_TS_SOURCE_PACKET_SIZE) dvb_dmx_swfilter_packets(&fdtv->demux, p, 1); err = queue_iso(ctx, i); if (unlikely(err)) dev_err(fdtv->device, "requeue failed\n"); i = (i + 1) & (N_PACKETS - 1); } ctx->current_packet = i; } static int start_iso(struct firedtv *fdtv) { struct firedtv_receive_context *ctx; struct fw_device *device = device_of(fdtv); int i, err; ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->context = fw_iso_context_create(device->card, FW_ISO_CONTEXT_RECEIVE, fdtv->isochannel, device->max_speed, ISO_HEADER_SIZE, handle_iso, fdtv); if (IS_ERR(ctx->context)) { err = PTR_ERR(ctx->context); goto fail_free; } err = fw_iso_buffer_init(&ctx->buffer, device->card, N_PAGES, DMA_FROM_DEVICE); if (err) goto fail_context_destroy; ctx->interrupt_packet = 0; ctx->current_packet = 0; for (i = 0; i < N_PAGES; i++) ctx->pages[i] = page_address(ctx->buffer.pages[i]); for (i = 0; i < N_PACKETS; i++) { err = queue_iso(ctx, i); if (err) goto fail; } err = fw_iso_context_start(ctx->context, -1, 0, FW_ISO_CONTEXT_MATCH_ALL_TAGS); if (err) goto fail; fdtv->backend_data = ctx; return 0; fail: fw_iso_buffer_destroy(&ctx->buffer, device->card); fail_context_destroy: fw_iso_context_destroy(ctx->context); fail_free: kfree(ctx); return err; } static void stop_iso(struct firedtv *fdtv) { struct firedtv_receive_context *ctx = fdtv->backend_data; fw_iso_context_stop(ctx->context); fw_iso_buffer_destroy(&ctx->buffer, device_of(fdtv)->card); fw_iso_context_destroy(ctx->context); kfree(ctx); } static const struct firedtv_backend backend = { .lock = node_lock, .read = node_read, .write = node_write, .start_iso = start_iso, .stop_iso = stop_iso, }; static void handle_fcp(struct fw_card *card, struct fw_request *request, int tcode, int destination, int source, int generation, int speed, unsigned long long offset, void *payload, size_t length, void *callback_data) { struct firedtv *f, *fdtv = NULL; struct fw_device *device; unsigned long flags; int su; if (length < 2 || (((u8 *)payload)[0] & 0xf0) != 0) return; su = ((u8 *)payload)[1] & 0x7; spin_lock_irqsave(&node_list_lock, flags); list_for_each_entry(f, &node_list, list) { device = device_of(f); if (device->generation != generation) continue; smp_rmb(); /* node_id vs. generation */ if (device->card == card && device->node_id == source && (f->subunit == su || (f->subunit == 0 && su == 0x7))) { fdtv = f; break; } } spin_unlock_irqrestore(&node_list_lock, flags); if (fdtv) avc_recv(fdtv, payload, length); } static struct fw_address_handler fcp_handler = { .length = CSR_FCP_END - CSR_FCP_RESPONSE, .address_callback = handle_fcp, }; static const struct fw_address_region fcp_region = { .start = CSR_REGISTER_BASE + CSR_FCP_RESPONSE, .end = CSR_REGISTER_BASE + CSR_FCP_END, }; /* Adjust the template string if models with longer names appear. */ #define MAX_MODEL_NAME_LEN sizeof("FireDTV ????") static int node_probe(struct device *dev) { struct firedtv *fdtv; char name[MAX_MODEL_NAME_LEN]; int name_len, err; name_len = fw_csr_string(fw_unit(dev)->directory, CSR_MODEL, name, sizeof(name)); fdtv = fdtv_alloc(dev, &backend, name, name_len >= 0 ? name_len : 0); if (!fdtv) return -ENOMEM; err = fdtv_register_rc(fdtv, dev); if (err) goto fail_free; spin_lock_irq(&node_list_lock); list_add_tail(&fdtv->list, &node_list); spin_unlock_irq(&node_list_lock); err = avc_identify_subunit(fdtv); if (err) goto fail; err = fdtv_dvb_register(fdtv); if (err) goto fail; avc_register_remote_control(fdtv); return 0; fail: spin_lock_irq(&node_list_lock); list_del(&fdtv->list); spin_unlock_irq(&node_list_lock); fdtv_unregister_rc(fdtv); fail_free: kfree(fdtv); return err; } static int node_remove(struct device *dev) { struct firedtv *fdtv = dev_get_drvdata(dev); fdtv_dvb_unregister(fdtv); spin_lock_irq(&node_list_lock); list_del(&fdtv->list); spin_unlock_irq(&node_list_lock); fdtv_unregister_rc(fdtv); kfree(fdtv); return 0; } static void node_update(struct fw_unit *unit) { struct firedtv *fdtv = dev_get_drvdata(&unit->device); if (fdtv->isochannel >= 0) cmp_establish_pp_connection(fdtv, fdtv->subunit, fdtv->isochannel); } static struct fw_driver fdtv_driver = { .driver = { .owner = THIS_MODULE, .name = "firedtv", .bus = &fw_bus_type, .probe = node_probe, .remove = node_remove, }, .update = node_update, .id_table = fdtv_id_table, }; int __init fdtv_fw_init(void) { int ret; ret = fw_core_add_address_handler(&fcp_handler, &fcp_region); if (ret < 0) return ret; return driver_register(&fdtv_driver.driver); } void fdtv_fw_exit(void) { driver_unregister(&fdtv_driver.driver); fw_core_remove_address_handler(&fcp_handler); }
gpl-2.0
XCage15/linux
drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
1319
3552
/* * This file is part of the Chelsio T4 Ethernet driver for Linux. * * Copyright (c) 2015 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef CONFIG_CHELSIO_T4_FCOE #include <scsi/fc/fc_fs.h> #include <scsi/libfcoe.h> #include "cxgb4.h" bool cxgb_fcoe_sof_eof_supported(struct adapter *adap, struct sk_buff *skb) { struct fcoe_hdr *fcoeh = (struct fcoe_hdr *)skb_network_header(skb); u8 sof = fcoeh->fcoe_sof; u8 eof = 0; if ((sof != FC_SOF_I3) && (sof != FC_SOF_N3)) { dev_err(adap->pdev_dev, "Unsupported SOF 0x%x\n", sof); return false; } skb_copy_bits(skb, skb->len - 4, &eof, 1); if ((eof != FC_EOF_N) && (eof != FC_EOF_T)) { dev_err(adap->pdev_dev, "Unsupported EOF 0x%x\n", eof); return false; } return true; } /** * cxgb_fcoe_enable - enable FCoE offload features * @netdev: net device * * Returns 0 on success or -EINVAL on failure. */ int cxgb_fcoe_enable(struct net_device *netdev) { struct port_info *pi = netdev_priv(netdev); struct adapter *adap = pi->adapter; struct cxgb_fcoe *fcoe = &pi->fcoe; if (is_t4(adap->params.chip)) return -EINVAL; if (!(adap->flags & FULL_INIT_DONE)) return -EINVAL; dev_info(adap->pdev_dev, "Enabling FCoE offload features\n"); netdev->features |= NETIF_F_FCOE_CRC; netdev->vlan_features |= NETIF_F_FCOE_CRC; netdev->features |= NETIF_F_FCOE_MTU; netdev->vlan_features |= NETIF_F_FCOE_MTU; netdev_features_change(netdev); fcoe->flags |= CXGB_FCOE_ENABLED; return 0; } /** * cxgb_fcoe_disable - disable FCoE offload * @netdev: net device * * Returns 0 on success or -EINVAL on failure. */ int cxgb_fcoe_disable(struct net_device *netdev) { struct port_info *pi = netdev_priv(netdev); struct adapter *adap = pi->adapter; struct cxgb_fcoe *fcoe = &pi->fcoe; if (!(fcoe->flags & CXGB_FCOE_ENABLED)) return -EINVAL; dev_info(adap->pdev_dev, "Disabling FCoE offload features\n"); fcoe->flags &= ~CXGB_FCOE_ENABLED; netdev->features &= ~NETIF_F_FCOE_CRC; netdev->vlan_features &= ~NETIF_F_FCOE_CRC; netdev->features &= ~NETIF_F_FCOE_MTU; netdev->vlan_features &= ~NETIF_F_FCOE_MTU; netdev_features_change(netdev); return 0; } #endif /* CONFIG_CHELSIO_T4_FCOE */
gpl-2.0
hannesweisbach/linux-atlas
drivers/clk/ti/clk-2xxx.c
1319
9260
/* * OMAP2 Clock init * * Copyright (C) 2013 Texas Instruments, Inc * Tero Kristo (t-kristo@ti.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/clk.h> #include <linux/clk/ti.h> #include "clock.h" static struct ti_dt_clk omap2xxx_clks[] = { DT_CLK(NULL, "func_32k_ck", "func_32k_ck"), DT_CLK(NULL, "secure_32k_ck", "secure_32k_ck"), DT_CLK(NULL, "virt_12m_ck", "virt_12m_ck"), DT_CLK(NULL, "virt_13m_ck", "virt_13m_ck"), DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"), DT_CLK(NULL, "virt_26m_ck", "virt_26m_ck"), DT_CLK(NULL, "aplls_clkin_ck", "aplls_clkin_ck"), DT_CLK(NULL, "aplls_clkin_x2_ck", "aplls_clkin_x2_ck"), DT_CLK(NULL, "osc_ck", "osc_ck"), DT_CLK(NULL, "sys_ck", "sys_ck"), DT_CLK(NULL, "alt_ck", "alt_ck"), DT_CLK(NULL, "mcbsp_clks", "mcbsp_clks"), DT_CLK(NULL, "dpll_ck", "dpll_ck"), DT_CLK(NULL, "apll96_ck", "apll96_ck"), DT_CLK(NULL, "apll54_ck", "apll54_ck"), DT_CLK(NULL, "func_54m_ck", "func_54m_ck"), DT_CLK(NULL, "core_ck", "core_ck"), DT_CLK(NULL, "func_96m_ck", "func_96m_ck"), DT_CLK(NULL, "func_48m_ck", "func_48m_ck"), DT_CLK(NULL, "func_12m_ck", "func_12m_ck"), DT_CLK(NULL, "sys_clkout_src", "sys_clkout_src"), DT_CLK(NULL, "sys_clkout", "sys_clkout"), DT_CLK(NULL, "emul_ck", "emul_ck"), DT_CLK(NULL, "mpu_ck", "mpu_ck"), DT_CLK(NULL, "dsp_fck", "dsp_fck"), DT_CLK(NULL, "gfx_3d_fck", "gfx_3d_fck"), DT_CLK(NULL, "gfx_2d_fck", "gfx_2d_fck"), DT_CLK(NULL, "gfx_ick", "gfx_ick"), DT_CLK("omapdss_dss", "ick", "dss_ick"), DT_CLK(NULL, "dss_ick", "dss_ick"), DT_CLK(NULL, "dss1_fck", "dss1_fck"), DT_CLK(NULL, "dss2_fck", "dss2_fck"), DT_CLK(NULL, "dss_54m_fck", "dss_54m_fck"), DT_CLK(NULL, "core_l3_ck", "core_l3_ck"), DT_CLK(NULL, "ssi_fck", "ssi_ssr_sst_fck"), DT_CLK(NULL, "usb_l4_ick", "usb_l4_ick"), DT_CLK(NULL, "l4_ck", "l4_ck"), DT_CLK(NULL, "ssi_l4_ick", "ssi_l4_ick"), DT_CLK(NULL, "gpt1_ick", "gpt1_ick"), DT_CLK(NULL, "gpt1_fck", "gpt1_fck"), DT_CLK(NULL, "gpt2_ick", "gpt2_ick"), DT_CLK(NULL, "gpt2_fck", "gpt2_fck"), DT_CLK(NULL, "gpt3_ick", "gpt3_ick"), DT_CLK(NULL, "gpt3_fck", "gpt3_fck"), DT_CLK(NULL, "gpt4_ick", "gpt4_ick"), DT_CLK(NULL, "gpt4_fck", "gpt4_fck"), DT_CLK(NULL, "gpt5_ick", "gpt5_ick"), DT_CLK(NULL, "gpt5_fck", "gpt5_fck"), DT_CLK(NULL, "gpt6_ick", "gpt6_ick"), DT_CLK(NULL, "gpt6_fck", "gpt6_fck"), DT_CLK(NULL, "gpt7_ick", "gpt7_ick"), DT_CLK(NULL, "gpt7_fck", "gpt7_fck"), DT_CLK(NULL, "gpt8_ick", "gpt8_ick"), DT_CLK(NULL, "gpt8_fck", "gpt8_fck"), DT_CLK(NULL, "gpt9_ick", "gpt9_ick"), DT_CLK(NULL, "gpt9_fck", "gpt9_fck"), DT_CLK(NULL, "gpt10_ick", "gpt10_ick"), DT_CLK(NULL, "gpt10_fck", "gpt10_fck"), DT_CLK(NULL, "gpt11_ick", "gpt11_ick"), DT_CLK(NULL, "gpt11_fck", "gpt11_fck"), DT_CLK(NULL, "gpt12_ick", "gpt12_ick"), DT_CLK(NULL, "gpt12_fck", "gpt12_fck"), DT_CLK("omap-mcbsp.1", "ick", "mcbsp1_ick"), DT_CLK(NULL, "mcbsp1_ick", "mcbsp1_ick"), DT_CLK(NULL, "mcbsp1_fck", "mcbsp1_fck"), DT_CLK("omap-mcbsp.2", "ick", "mcbsp2_ick"), DT_CLK(NULL, "mcbsp2_ick", "mcbsp2_ick"), DT_CLK(NULL, "mcbsp2_fck", "mcbsp2_fck"), DT_CLK("omap2_mcspi.1", "ick", "mcspi1_ick"), DT_CLK(NULL, "mcspi1_ick", "mcspi1_ick"), DT_CLK(NULL, "mcspi1_fck", "mcspi1_fck"), DT_CLK("omap2_mcspi.2", "ick", "mcspi2_ick"), DT_CLK(NULL, "mcspi2_ick", "mcspi2_ick"), DT_CLK(NULL, "mcspi2_fck", "mcspi2_fck"), DT_CLK(NULL, "uart1_ick", "uart1_ick"), DT_CLK(NULL, "uart1_fck", "uart1_fck"), DT_CLK(NULL, "uart2_ick", "uart2_ick"), DT_CLK(NULL, "uart2_fck", "uart2_fck"), DT_CLK(NULL, "uart3_ick", "uart3_ick"), DT_CLK(NULL, "uart3_fck", "uart3_fck"), DT_CLK(NULL, "gpios_ick", "gpios_ick"), DT_CLK(NULL, "gpios_fck", "gpios_fck"), DT_CLK("omap_wdt", "ick", "mpu_wdt_ick"), DT_CLK(NULL, "mpu_wdt_ick", "mpu_wdt_ick"), DT_CLK(NULL, "mpu_wdt_fck", "mpu_wdt_fck"), DT_CLK(NULL, "sync_32k_ick", "sync_32k_ick"), DT_CLK(NULL, "wdt1_ick", "wdt1_ick"), DT_CLK(NULL, "omapctrl_ick", "omapctrl_ick"), DT_CLK("omap24xxcam", "fck", "cam_fck"), DT_CLK(NULL, "cam_fck", "cam_fck"), DT_CLK("omap24xxcam", "ick", "cam_ick"), DT_CLK(NULL, "cam_ick", "cam_ick"), DT_CLK(NULL, "mailboxes_ick", "mailboxes_ick"), DT_CLK(NULL, "wdt4_ick", "wdt4_ick"), DT_CLK(NULL, "wdt4_fck", "wdt4_fck"), DT_CLK(NULL, "mspro_ick", "mspro_ick"), DT_CLK(NULL, "mspro_fck", "mspro_fck"), DT_CLK(NULL, "fac_ick", "fac_ick"), DT_CLK(NULL, "fac_fck", "fac_fck"), DT_CLK("omap_hdq.0", "ick", "hdq_ick"), DT_CLK(NULL, "hdq_ick", "hdq_ick"), DT_CLK("omap_hdq.0", "fck", "hdq_fck"), DT_CLK(NULL, "hdq_fck", "hdq_fck"), DT_CLK("omap_i2c.1", "ick", "i2c1_ick"), DT_CLK(NULL, "i2c1_ick", "i2c1_ick"), DT_CLK("omap_i2c.2", "ick", "i2c2_ick"), DT_CLK(NULL, "i2c2_ick", "i2c2_ick"), DT_CLK(NULL, "gpmc_fck", "gpmc_fck"), DT_CLK(NULL, "sdma_fck", "sdma_fck"), DT_CLK(NULL, "sdma_ick", "sdma_ick"), DT_CLK(NULL, "sdrc_ick", "sdrc_ick"), DT_CLK(NULL, "des_ick", "des_ick"), DT_CLK("omap-sham", "ick", "sha_ick"), DT_CLK(NULL, "sha_ick", "sha_ick"), DT_CLK("omap_rng", "ick", "rng_ick"), DT_CLK(NULL, "rng_ick", "rng_ick"), DT_CLK("omap-aes", "ick", "aes_ick"), DT_CLK(NULL, "aes_ick", "aes_ick"), DT_CLK(NULL, "pka_ick", "pka_ick"), DT_CLK(NULL, "usb_fck", "usb_fck"), DT_CLK(NULL, "timer_32k_ck", "func_32k_ck"), DT_CLK(NULL, "timer_sys_ck", "sys_ck"), DT_CLK(NULL, "timer_ext_ck", "alt_ck"), { .node_name = NULL }, }; static struct ti_dt_clk omap2420_clks[] = { DT_CLK(NULL, "sys_clkout2_src", "sys_clkout2_src"), DT_CLK(NULL, "sys_clkout2", "sys_clkout2"), DT_CLK(NULL, "dsp_ick", "dsp_ick"), DT_CLK(NULL, "iva1_ifck", "iva1_ifck"), DT_CLK(NULL, "iva1_mpu_int_ifck", "iva1_mpu_int_ifck"), DT_CLK(NULL, "wdt3_ick", "wdt3_ick"), DT_CLK(NULL, "wdt3_fck", "wdt3_fck"), DT_CLK("mmci-omap.0", "ick", "mmc_ick"), DT_CLK(NULL, "mmc_ick", "mmc_ick"), DT_CLK("mmci-omap.0", "fck", "mmc_fck"), DT_CLK(NULL, "mmc_fck", "mmc_fck"), DT_CLK(NULL, "eac_ick", "eac_ick"), DT_CLK(NULL, "eac_fck", "eac_fck"), DT_CLK(NULL, "i2c1_fck", "i2c1_fck"), DT_CLK(NULL, "i2c2_fck", "i2c2_fck"), DT_CLK(NULL, "vlynq_ick", "vlynq_ick"), DT_CLK(NULL, "vlynq_fck", "vlynq_fck"), DT_CLK("musb-hdrc", "fck", "osc_ck"), { .node_name = NULL }, }; static struct ti_dt_clk omap2430_clks[] = { DT_CLK("twl", "fck", "osc_ck"), DT_CLK(NULL, "iva2_1_ick", "iva2_1_ick"), DT_CLK(NULL, "mdm_ick", "mdm_ick"), DT_CLK(NULL, "mdm_osc_ck", "mdm_osc_ck"), DT_CLK("omap-mcbsp.3", "ick", "mcbsp3_ick"), DT_CLK(NULL, "mcbsp3_ick", "mcbsp3_ick"), DT_CLK(NULL, "mcbsp3_fck", "mcbsp3_fck"), DT_CLK("omap-mcbsp.4", "ick", "mcbsp4_ick"), DT_CLK(NULL, "mcbsp4_ick", "mcbsp4_ick"), DT_CLK(NULL, "mcbsp4_fck", "mcbsp4_fck"), DT_CLK("omap-mcbsp.5", "ick", "mcbsp5_ick"), DT_CLK(NULL, "mcbsp5_ick", "mcbsp5_ick"), DT_CLK(NULL, "mcbsp5_fck", "mcbsp5_fck"), DT_CLK("omap2_mcspi.3", "ick", "mcspi3_ick"), DT_CLK(NULL, "mcspi3_ick", "mcspi3_ick"), DT_CLK(NULL, "mcspi3_fck", "mcspi3_fck"), DT_CLK(NULL, "icr_ick", "icr_ick"), DT_CLK(NULL, "i2chs1_fck", "i2chs1_fck"), DT_CLK(NULL, "i2chs2_fck", "i2chs2_fck"), DT_CLK("musb-omap2430", "ick", "usbhs_ick"), DT_CLK(NULL, "usbhs_ick", "usbhs_ick"), DT_CLK("omap_hsmmc.0", "ick", "mmchs1_ick"), DT_CLK(NULL, "mmchs1_ick", "mmchs1_ick"), DT_CLK(NULL, "mmchs1_fck", "mmchs1_fck"), DT_CLK("omap_hsmmc.1", "ick", "mmchs2_ick"), DT_CLK(NULL, "mmchs2_ick", "mmchs2_ick"), DT_CLK(NULL, "mmchs2_fck", "mmchs2_fck"), DT_CLK(NULL, "gpio5_ick", "gpio5_ick"), DT_CLK(NULL, "gpio5_fck", "gpio5_fck"), DT_CLK(NULL, "mdm_intc_ick", "mdm_intc_ick"), DT_CLK("omap_hsmmc.0", "mmchsdb_fck", "mmchsdb1_fck"), DT_CLK(NULL, "mmchsdb1_fck", "mmchsdb1_fck"), DT_CLK("omap_hsmmc.1", "mmchsdb_fck", "mmchsdb2_fck"), DT_CLK(NULL, "mmchsdb2_fck", "mmchsdb2_fck"), { .node_name = NULL }, }; static const char *enable_init_clks[] = { "apll96_ck", "apll54_ck", "sync_32k_ick", "omapctrl_ick", "gpmc_fck", "sdrc_ick", }; enum { OMAP2_SOC_OMAP2420, OMAP2_SOC_OMAP2430, }; static int __init omap2xxx_dt_clk_init(int soc_type) { ti_dt_clocks_register(omap2xxx_clks); if (soc_type == OMAP2_SOC_OMAP2420) ti_dt_clocks_register(omap2420_clks); else ti_dt_clocks_register(omap2430_clks); omap2xxx_clkt_vps_init(); omap2_clk_disable_autoidle_all(); omap2_clk_enable_init_clocks(enable_init_clks, ARRAY_SIZE(enable_init_clks)); pr_info("Clocking rate (Crystal/DPLL/MPU): %ld.%01ld/%ld/%ld MHz\n", (clk_get_rate(clk_get_sys(NULL, "sys_ck")) / 1000000), (clk_get_rate(clk_get_sys(NULL, "sys_ck")) / 100000) % 10, (clk_get_rate(clk_get_sys(NULL, "dpll_ck")) / 1000000), (clk_get_rate(clk_get_sys(NULL, "mpu_ck")) / 1000000)); return 0; } int __init omap2420_dt_clk_init(void) { return omap2xxx_dt_clk_init(OMAP2_SOC_OMAP2420); } int __init omap2430_dt_clk_init(void) { return omap2xxx_dt_clk_init(OMAP2_SOC_OMAP2430); }
gpl-2.0
NamelessRom/android_kernel_yu_msm8916
arch/powerpc/kernel/cputable.c
1319
68734
/* * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) * * Modifications for ppc64: * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/string.h> #include <linux/sched.h> #include <linux/threads.h> #include <linux/init.h> #include <linux/export.h> #include <asm/oprofile_impl.h> #include <asm/cputable.h> #include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */ #include <asm/mmu.h> #include <asm/setup.h> struct cpu_spec* cur_cpu_spec = NULL; EXPORT_SYMBOL(cur_cpu_spec); /* The platform string corresponding to the real PVR */ const char *powerpc_base_platform; /* NOTE: * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's * the responsibility of the appropriate CPU save/restore functions to * eventually copy these settings over. Those save/restore aren't yet * part of the cputable though. That has to be fixed for both ppc32 * and ppc64 */ #ifdef CONFIG_PPC32 extern void __setup_cpu_e200(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_e500v1(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_e500v2(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_e500mc(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec); extern void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec); extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_750cx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_750fx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec); #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC64 extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_a2(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_pa6t(void); extern void __restore_cpu_ppc970(void); extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power7(void); extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power8(void); extern void __restore_cpu_a2(void); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_e6500(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_e5500(void); extern void __restore_cpu_e6500(void); #endif /* CONFIG_E500 */ /* This table only contains "desktop" CPUs, it need to be filled with embedded * ones as well... */ #define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \ PPC_FEATURE_HAS_MMU) #define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64) #define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4) #define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5 |\ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) #define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) #define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_PSERIES_PERFMON_COMPAT) #define COMMON_USER_POWER7 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_PSERIES_PERFMON_COMPAT) #define COMMON_USER2_POWER7 (PPC_FEATURE2_DSCR) #define COMMON_USER_POWER8 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_PSERIES_PERFMON_COMPAT) #define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \ PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \ PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \ PPC_FEATURE2_VEC_CRYPTO) #define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_HAS_ALTIVEC_COMP) #ifdef CONFIG_PPC_BOOK3E_64 #define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE) #else #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ PPC_FEATURE_BOOKE) #endif static struct cpu_spec __initdata cpu_specs[] = { #ifdef CONFIG_PPC_BOOK3S_64 { /* Power3 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00400000, .cpu_name = "POWER3 (630)", .cpu_features = CPU_FTRS_POWER3, .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power3", .oprofile_type = PPC_OPROFILE_RS64, .platform = "power3", }, { /* Power3+ */ .pvr_mask = 0xffff0000, .pvr_value = 0x00410000, .cpu_name = "POWER3 (630+)", .cpu_features = CPU_FTRS_POWER3, .cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power3", .oprofile_type = PPC_OPROFILE_RS64, .platform = "power3", }, { /* Northstar */ .pvr_mask = 0xffff0000, .pvr_value = 0x00330000, .cpu_name = "RS64-II (northstar)", .cpu_features = CPU_FTRS_RS64, .cpu_user_features = COMMON_USER_PPC64, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/rs64", .oprofile_type = PPC_OPROFILE_RS64, .platform = "rs64", }, { /* Pulsar */ .pvr_mask = 0xffff0000, .pvr_value = 0x00340000, .cpu_name = "RS64-III (pulsar)", .cpu_features = CPU_FTRS_RS64, .cpu_user_features = COMMON_USER_PPC64, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/rs64", .oprofile_type = PPC_OPROFILE_RS64, .platform = "rs64", }, { /* I-star */ .pvr_mask = 0xffff0000, .pvr_value = 0x00360000, .cpu_name = "RS64-III (icestar)", .cpu_features = CPU_FTRS_RS64, .cpu_user_features = COMMON_USER_PPC64, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/rs64", .oprofile_type = PPC_OPROFILE_RS64, .platform = "rs64", }, { /* S-star */ .pvr_mask = 0xffff0000, .pvr_value = 0x00370000, .cpu_name = "RS64-IV (sstar)", .cpu_features = CPU_FTRS_RS64, .cpu_user_features = COMMON_USER_PPC64, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/rs64", .oprofile_type = PPC_OPROFILE_RS64, .platform = "rs64", }, { /* Power4 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00350000, .cpu_name = "POWER4 (gp)", .cpu_features = CPU_FTRS_POWER4, .cpu_user_features = COMMON_USER_POWER4, .mmu_features = MMU_FTRS_POWER4, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power4", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power4", }, { /* Power4+ */ .pvr_mask = 0xffff0000, .pvr_value = 0x00380000, .cpu_name = "POWER4+ (gq)", .cpu_features = CPU_FTRS_POWER4, .cpu_user_features = COMMON_USER_POWER4, .mmu_features = MMU_FTRS_POWER4, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power4", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power4", }, { /* PPC970 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00390000, .cpu_name = "PPC970", .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_ppc970, .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, { /* PPC970FX */ .pvr_mask = 0xffff0000, .pvr_value = 0x003c0000, .cpu_name = "PPC970FX", .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_ppc970, .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */ .pvr_mask = 0xffffffff, .pvr_value = 0x00440100, .cpu_name = "PPC970MP", .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_ppc970, .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970MP", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, { /* PPC970MP */ .pvr_mask = 0xffff0000, .pvr_value = 0x00440000, .cpu_name = "PPC970MP", .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_ppc970MP, .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970MP", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, { /* PPC970GX */ .pvr_mask = 0xffff0000, .pvr_value = 0x00450000, .cpu_name = "PPC970GX", .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, { /* Power5 GR */ .pvr_mask = 0xffff0000, .pvr_value = 0x003a0000, .cpu_name = "POWER5 (gr)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5, .mmu_features = MMU_FTRS_POWER5, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power5", .oprofile_type = PPC_OPROFILE_POWER4, /* SIHV / SIPR bits are implemented on POWER4+ (GQ) * and above but only works on POWER5 and above */ .oprofile_mmcra_sihv = MMCRA_SIHV, .oprofile_mmcra_sipr = MMCRA_SIPR, .platform = "power5", }, { /* Power5++ */ .pvr_mask = 0xffffff00, .pvr_value = 0x003b0300, .cpu_name = "POWER5+ (gs)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, .mmu_features = MMU_FTRS_POWER5, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .oprofile_cpu_type = "ppc64/power5++", .oprofile_type = PPC_OPROFILE_POWER4, .oprofile_mmcra_sihv = MMCRA_SIHV, .oprofile_mmcra_sipr = MMCRA_SIPR, .platform = "power5+", }, { /* Power5 GS */ .pvr_mask = 0xffff0000, .pvr_value = 0x003b0000, .cpu_name = "POWER5+ (gs)", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, .mmu_features = MMU_FTRS_POWER5, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power5+", .oprofile_type = PPC_OPROFILE_POWER4, .oprofile_mmcra_sihv = MMCRA_SIHV, .oprofile_mmcra_sipr = MMCRA_SIPR, .platform = "power5+", }, { /* POWER6 in P5+ mode; 2.04-compliant processor */ .pvr_mask = 0xffffffff, .pvr_value = 0x0f000001, .cpu_name = "POWER5+", .cpu_features = CPU_FTRS_POWER5, .cpu_user_features = COMMON_USER_POWER5_PLUS, .mmu_features = MMU_FTRS_POWER5, .icache_bsize = 128, .dcache_bsize = 128, .oprofile_cpu_type = "ppc64/ibm-compat-v1", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power5+", }, { /* Power6 */ .pvr_mask = 0xffff0000, .pvr_value = 0x003e0000, .cpu_name = "POWER6 (raw)", .cpu_features = CPU_FTRS_POWER6, .cpu_user_features = COMMON_USER_POWER6 | PPC_FEATURE_POWER6_EXT, .mmu_features = MMU_FTRS_POWER6, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power6", .oprofile_type = PPC_OPROFILE_POWER4, .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV, .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, .oprofile_mmcra_clear = POWER6_MMCRA_THRM | POWER6_MMCRA_OTHER, .platform = "power6x", }, { /* 2.05-compliant processor, i.e. Power6 "architected" mode */ .pvr_mask = 0xffffffff, .pvr_value = 0x0f000002, .cpu_name = "POWER6 (architected)", .cpu_features = CPU_FTRS_POWER6, .cpu_user_features = COMMON_USER_POWER6, .mmu_features = MMU_FTRS_POWER6, .icache_bsize = 128, .dcache_bsize = 128, .oprofile_cpu_type = "ppc64/ibm-compat-v1", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power6", }, { /* 2.06-compliant processor, i.e. Power7 "architected" mode */ .pvr_mask = 0xffffffff, .pvr_value = 0x0f000003, .cpu_name = "POWER7 (architected)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, .cpu_user_features2 = COMMON_USER2_POWER7, .mmu_features = MMU_FTRS_POWER7, .icache_bsize = 128, .dcache_bsize = 128, .oprofile_type = PPC_OPROFILE_POWER4, .oprofile_cpu_type = "ppc64/ibm-compat-v1", .cpu_setup = __setup_cpu_power7, .cpu_restore = __restore_cpu_power7, .platform = "power7", }, { /* 2.07-compliant processor, i.e. Power8 "architected" mode */ .pvr_mask = 0xffffffff, .pvr_value = 0x0f000004, .cpu_name = "POWER8 (architected)", .cpu_features = CPU_FTRS_POWER8, .cpu_user_features = COMMON_USER_POWER8, .cpu_user_features2 = COMMON_USER2_POWER8, .mmu_features = MMU_FTRS_POWER8, .icache_bsize = 128, .dcache_bsize = 128, .oprofile_type = PPC_OPROFILE_INVALID, .oprofile_cpu_type = "ppc64/ibm-compat-v1", .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, .platform = "power8", }, { /* Power7 */ .pvr_mask = 0xffff0000, .pvr_value = 0x003f0000, .cpu_name = "POWER7 (raw)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, .cpu_user_features2 = COMMON_USER2_POWER7, .mmu_features = MMU_FTRS_POWER7, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power7", .oprofile_type = PPC_OPROFILE_POWER4, .cpu_setup = __setup_cpu_power7, .cpu_restore = __restore_cpu_power7, .platform = "power7", }, { /* Power7+ */ .pvr_mask = 0xffff0000, .pvr_value = 0x004A0000, .cpu_name = "POWER7+ (raw)", .cpu_features = CPU_FTRS_POWER7, .cpu_user_features = COMMON_USER_POWER7, .cpu_user_features2 = COMMON_USER2_POWER7, .mmu_features = MMU_FTRS_POWER7, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power7", .oprofile_type = PPC_OPROFILE_POWER4, .cpu_setup = __setup_cpu_power7, .cpu_restore = __restore_cpu_power7, .platform = "power7+", }, { /* Power8 */ .pvr_mask = 0xffff0000, .pvr_value = 0x004b0000, .cpu_name = "POWER8 (raw)", .cpu_features = CPU_FTRS_POWER8, .cpu_user_features = COMMON_USER_POWER8, .cpu_user_features2 = COMMON_USER2_POWER8, .mmu_features = MMU_FTRS_POWER8, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power8", .oprofile_type = PPC_OPROFILE_INVALID, .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, .platform = "power8", }, { /* Cell Broadband Engine */ .pvr_mask = 0xffff0000, .pvr_value = 0x00700000, .cpu_name = "Cell Broadband Engine", .cpu_features = CPU_FTRS_CELL, .cpu_user_features = COMMON_USER_PPC64 | PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_SMT, .mmu_features = MMU_FTRS_CELL, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/cell-be", .oprofile_type = PPC_OPROFILE_CELL, .platform = "ppc-cell-be", }, { /* PA Semi PA6T */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00900000, .cpu_name = "PA6T", .cpu_features = CPU_FTRS_PA6T, .cpu_user_features = COMMON_USER_PA6T, .mmu_features = MMU_FTRS_PA6T, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 6, .pmc_type = PPC_PMC_PA6T, .cpu_setup = __setup_cpu_pa6t, .cpu_restore = __restore_cpu_pa6t, .oprofile_cpu_type = "ppc64/pa6t", .oprofile_type = PPC_OPROFILE_PA6T, .platform = "pa6t", }, { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, .cpu_name = "POWER4 (compatible)", .cpu_features = CPU_FTRS_COMPATIBLE, .cpu_user_features = COMMON_USER_PPC64, .mmu_features = MMU_FTRS_DEFAULT_HPTE_ARCH_V2, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, .platform = "power4", } #endif /* CONFIG_PPC_BOOK3S_64 */ #ifdef CONFIG_PPC32 #if CLASSIC_PPC { /* 601 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00010000, .cpu_name = "601", .cpu_features = CPU_FTRS_PPC601, .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR | PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_generic, .platform = "ppc601", }, { /* 603 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00030000, .cpu_name = "603", .cpu_features = CPU_FTRS_603, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* 603e */ .pvr_mask = 0xffff0000, .pvr_value = 0x00060000, .cpu_name = "603e", .cpu_features = CPU_FTRS_603, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* 603ev */ .pvr_mask = 0xffff0000, .pvr_value = 0x00070000, .cpu_name = "603ev", .cpu_features = CPU_FTRS_603, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* 604 */ .pvr_mask = 0xffff0000, .pvr_value = 0x00040000, .cpu_name = "604", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 2, .cpu_setup = __setup_cpu_604, .machine_check = machine_check_generic, .platform = "ppc604", }, { /* 604e */ .pvr_mask = 0xfffff000, .pvr_value = 0x00090000, .cpu_name = "604e", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .cpu_setup = __setup_cpu_604, .machine_check = machine_check_generic, .platform = "ppc604", }, { /* 604r */ .pvr_mask = 0xffff0000, .pvr_value = 0x00090000, .cpu_name = "604r", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .cpu_setup = __setup_cpu_604, .machine_check = machine_check_generic, .platform = "ppc604", }, { /* 604ev */ .pvr_mask = 0xffff0000, .pvr_value = 0x000a0000, .cpu_name = "604ev", .cpu_features = CPU_FTRS_604, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .cpu_setup = __setup_cpu_604, .machine_check = machine_check_generic, .platform = "ppc604", }, { /* 740/750 (0x4202, don't support TAU ?) */ .pvr_mask = 0xffffffff, .pvr_value = 0x00084202, .cpu_name = "740/750", .cpu_features = CPU_FTRS_740_NOTAU, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .cpu_setup = __setup_cpu_750, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 750CX (80100 and 8010x?) */ .pvr_mask = 0xfffffff0, .pvr_value = 0x00080100, .cpu_name = "750CX", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .cpu_setup = __setup_cpu_750cx, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 750CX (82201 and 82202) */ .pvr_mask = 0xfffffff0, .pvr_value = 0x00082200, .cpu_name = "750CX", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750cx, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 750CXe (82214) */ .pvr_mask = 0xfffffff0, .pvr_value = 0x00082210, .cpu_name = "750CXe", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750cx, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 750CXe "Gekko" (83214) */ .pvr_mask = 0xffffffff, .pvr_value = 0x00083214, .cpu_name = "750CXe", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750cx, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 750CL (and "Broadway") */ .pvr_mask = 0xfffff0e0, .pvr_value = 0x00087000, .cpu_name = "750CL", .cpu_features = CPU_FTRS_750CL, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750, .machine_check = machine_check_generic, .platform = "ppc750", .oprofile_cpu_type = "ppc/750", .oprofile_type = PPC_OPROFILE_G4, }, { /* 745/755 */ .pvr_mask = 0xfffff000, .pvr_value = 0x00083000, .cpu_name = "745/755", .cpu_features = CPU_FTRS_750, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 750FX rev 1.x */ .pvr_mask = 0xffffff00, .pvr_value = 0x70000100, .cpu_name = "750FX", .cpu_features = CPU_FTRS_750FX1, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750, .machine_check = machine_check_generic, .platform = "ppc750", .oprofile_cpu_type = "ppc/750", .oprofile_type = PPC_OPROFILE_G4, }, { /* 750FX rev 2.0 must disable HID0[DPM] */ .pvr_mask = 0xffffffff, .pvr_value = 0x70000200, .cpu_name = "750FX", .cpu_features = CPU_FTRS_750FX2, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750, .machine_check = machine_check_generic, .platform = "ppc750", .oprofile_cpu_type = "ppc/750", .oprofile_type = PPC_OPROFILE_G4, }, { /* 750FX (All revs except 2.0) */ .pvr_mask = 0xffff0000, .pvr_value = 0x70000000, .cpu_name = "750FX", .cpu_features = CPU_FTRS_750FX, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750fx, .machine_check = machine_check_generic, .platform = "ppc750", .oprofile_cpu_type = "ppc/750", .oprofile_type = PPC_OPROFILE_G4, }, { /* 750GX */ .pvr_mask = 0xffff0000, .pvr_value = 0x70020000, .cpu_name = "750GX", .cpu_features = CPU_FTRS_750GX, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750fx, .machine_check = machine_check_generic, .platform = "ppc750", .oprofile_cpu_type = "ppc/750", .oprofile_type = PPC_OPROFILE_G4, }, { /* 740/750 (L2CR bit need fixup for 740) */ .pvr_mask = 0xffff0000, .pvr_value = 0x00080000, .cpu_name = "740/750", .cpu_features = CPU_FTRS_740, .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_IBM, .cpu_setup = __setup_cpu_750, .machine_check = machine_check_generic, .platform = "ppc750", }, { /* 7400 rev 1.1 ? (no TAU) */ .pvr_mask = 0xffffffff, .pvr_value = 0x000c1101, .cpu_name = "7400 (1.1)", .cpu_features = CPU_FTRS_7400_NOTAU, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_7400, .machine_check = machine_check_generic, .platform = "ppc7400", }, { /* 7400 */ .pvr_mask = 0xffff0000, .pvr_value = 0x000c0000, .cpu_name = "7400", .cpu_features = CPU_FTRS_7400, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_7400, .machine_check = machine_check_generic, .platform = "ppc7400", }, { /* 7410 */ .pvr_mask = 0xffff0000, .pvr_value = 0x800c0000, .cpu_name = "7410", .cpu_features = CPU_FTRS_7400, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_7410, .machine_check = machine_check_generic, .platform = "ppc7400", }, { /* 7450 2.0 - no doze/nap */ .pvr_mask = 0xffffffff, .pvr_value = 0x80000200, .cpu_name = "7450", .cpu_features = CPU_FTRS_7450_20, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7450 2.1 */ .pvr_mask = 0xffffffff, .pvr_value = 0x80000201, .cpu_name = "7450", .cpu_features = CPU_FTRS_7450_21, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7450 2.3 and newer */ .pvr_mask = 0xffff0000, .pvr_value = 0x80000000, .cpu_name = "7450", .cpu_features = CPU_FTRS_7450_23, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7455 rev 1.x */ .pvr_mask = 0xffffff00, .pvr_value = 0x80010100, .cpu_name = "7455", .cpu_features = CPU_FTRS_7455_1, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7455 rev 2.0 */ .pvr_mask = 0xffffffff, .pvr_value = 0x80010200, .cpu_name = "7455", .cpu_features = CPU_FTRS_7455_20, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7455 others */ .pvr_mask = 0xffff0000, .pvr_value = 0x80010000, .cpu_name = "7455", .cpu_features = CPU_FTRS_7455, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7447/7457 Rev 1.0 */ .pvr_mask = 0xffffffff, .pvr_value = 0x80020100, .cpu_name = "7447/7457", .cpu_features = CPU_FTRS_7447_10, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7447/7457 Rev 1.1 */ .pvr_mask = 0xffffffff, .pvr_value = 0x80020101, .cpu_name = "7447/7457", .cpu_features = CPU_FTRS_7447_10, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7447/7457 Rev 1.2 and later */ .pvr_mask = 0xffff0000, .pvr_value = 0x80020000, .cpu_name = "7447/7457", .cpu_features = CPU_FTRS_7447, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7447A */ .pvr_mask = 0xffff0000, .pvr_value = 0x80030000, .cpu_name = "7447A", .cpu_features = CPU_FTRS_7447A, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 7448 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80040000, .cpu_name = "7448", .cpu_features = CPU_FTRS_7448, .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 6, .pmc_type = PPC_PMC_G4, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = PPC_OPROFILE_G4, .machine_check = machine_check_generic, .platform = "ppc7450", }, { /* 82xx (8240, 8245, 8260 are all 603e cores) */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00810000, .cpu_name = "82xx", .cpu_features = CPU_FTRS_82XX, .cpu_user_features = COMMON_USER, .mmu_features = 0, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* All G2_LE (603e core, plus some) have the same pvr */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00820000, .cpu_name = "G2_LE", .cpu_features = CPU_FTRS_G2_LE, .cpu_user_features = COMMON_USER, .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* e300c1 (a 603e core, plus some) on 83xx */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00830000, .cpu_name = "e300c1", .cpu_features = CPU_FTRS_E300, .cpu_user_features = COMMON_USER, .mmu_features = MMU_FTR_USE_HIGH_BATS, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00840000, .cpu_name = "e300c2", .cpu_features = CPU_FTRS_E300C2, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .mmu_features = MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .platform = "ppc603", }, { /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00850000, .cpu_name = "e300c3", .cpu_features = CPU_FTRS_E300, .cpu_user_features = COMMON_USER, .mmu_features = MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e300", .oprofile_type = PPC_OPROFILE_FSL_EMB, .platform = "ppc603", }, { /* e300c4 (e300c1, plus one IU) */ .pvr_mask = 0x7fff0000, .pvr_value = 0x00860000, .cpu_name = "e300c4", .cpu_features = CPU_FTRS_E300, .cpu_user_features = COMMON_USER, .mmu_features = MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_603, .machine_check = machine_check_generic, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e300", .oprofile_type = PPC_OPROFILE_FSL_EMB, .platform = "ppc603", }, { /* default match, we assume split I/D cache & TB (non-601)... */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, .cpu_name = "(generic PPC)", .cpu_features = CPU_FTRS_CLASSIC32, .cpu_user_features = COMMON_USER, .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_generic, .platform = "ppc603", }, #endif /* CLASSIC_PPC */ #ifdef CONFIG_8xx { /* 8xx */ .pvr_mask = 0xffff0000, .pvr_value = 0x00500000, .cpu_name = "8xx", /* CPU_FTR_MAYBE_CAN_DOZE is possible, * if the 8xx code is there.... */ .cpu_features = CPU_FTRS_8XX, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .mmu_features = MMU_FTR_TYPE_8xx, .icache_bsize = 16, .dcache_bsize = 16, .platform = "ppc823", }, #endif /* CONFIG_8xx */ #ifdef CONFIG_40x { /* 403GC */ .pvr_mask = 0xffffff00, .pvr_value = 0x00200200, .cpu_name = "403GC", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 16, .dcache_bsize = 16, .machine_check = machine_check_4xx, .platform = "ppc403", }, { /* 403GCX */ .pvr_mask = 0xffffff00, .pvr_value = 0x00201400, .cpu_name = "403GCX", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 16, .dcache_bsize = 16, .machine_check = machine_check_4xx, .platform = "ppc403", }, { /* 403G ?? */ .pvr_mask = 0xffff0000, .pvr_value = 0x00200000, .cpu_name = "403G ??", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 16, .dcache_bsize = 16, .machine_check = machine_check_4xx, .platform = "ppc403", }, { /* 405GP */ .pvr_mask = 0xffff0000, .pvr_value = 0x40110000, .cpu_name = "405GP", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* STB 03xxx */ .pvr_mask = 0xffff0000, .pvr_value = 0x40130000, .cpu_name = "STB03xxx", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* STB 04xxx */ .pvr_mask = 0xffff0000, .pvr_value = 0x41810000, .cpu_name = "STB04xxx", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* NP405L */ .pvr_mask = 0xffff0000, .pvr_value = 0x41610000, .cpu_name = "NP405L", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* NP4GS3 */ .pvr_mask = 0xffff0000, .pvr_value = 0x40B10000, .cpu_name = "NP4GS3", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* NP405H */ .pvr_mask = 0xffff0000, .pvr_value = 0x41410000, .cpu_name = "NP405H", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405GPr */ .pvr_mask = 0xffff0000, .pvr_value = 0x50910000, .cpu_name = "405GPr", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* STBx25xx */ .pvr_mask = 0xffff0000, .pvr_value = 0x51510000, .cpu_name = "STBx25xx", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405LP */ .pvr_mask = 0xffff0000, .pvr_value = 0x41F10000, .cpu_name = "405LP", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* Xilinx Virtex-II Pro */ .pvr_mask = 0xfffff000, .pvr_value = 0x20010000, .cpu_name = "Virtex-II Pro", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* Xilinx Virtex-4 FX */ .pvr_mask = 0xfffff000, .pvr_value = 0x20011000, .cpu_name = "Virtex-4 FX", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EP */ .pvr_mask = 0xffff0000, .pvr_value = 0x51210000, .cpu_name = "405EP", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EX Rev. A/B with Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910007, .cpu_name = "405EX Rev. A/B", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EX Rev. C without Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x1291000d, .cpu_name = "405EX Rev. C", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EX Rev. C with Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x1291000f, .cpu_name = "405EX Rev. C", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EX Rev. D without Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910003, .cpu_name = "405EX Rev. D", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EX Rev. D with Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910005, .cpu_name = "405EX Rev. D", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EXr Rev. A/B without Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910001, .cpu_name = "405EXr Rev. A/B", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EXr Rev. C without Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910009, .cpu_name = "405EXr Rev. C", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EXr Rev. C with Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x1291000b, .cpu_name = "405EXr Rev. C", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EXr Rev. D without Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910000, .cpu_name = "405EXr Rev. D", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EXr Rev. D with Security */ .pvr_mask = 0xffff000f, .pvr_value = 0x12910002, .cpu_name = "405EXr Rev. D", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* 405EZ */ .pvr_mask = 0xffff0000, .pvr_value = 0x41510000, .cpu_name = "405EZ", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* APM8018X */ .pvr_mask = 0xffff0000, .pvr_value = 0x7ff11432, .cpu_name = "APM8018X", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", }, { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, .cpu_name = "(generic 40x PPC)", .cpu_features = CPU_FTRS_40X, .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .mmu_features = MMU_FTR_TYPE_40x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc405", } #endif /* CONFIG_40x */ #ifdef CONFIG_44x { .pvr_mask = 0xf0000fff, .pvr_value = 0x40000850, .cpu_name = "440GR Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc440", }, { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */ .pvr_mask = 0xf0000fff, .pvr_value = 0x40000858, .cpu_name = "440EP Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440ep, .machine_check = machine_check_4xx, .platform = "ppc440", }, { .pvr_mask = 0xf0000fff, .pvr_value = 0x400008d3, .cpu_name = "440GR Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc440", }, { /* Matches both physical and logical PVR for 440EP (logical pvr = pvr | 0x8) */ .pvr_mask = 0xf0000ff7, .pvr_value = 0x400008d4, .cpu_name = "440EP Rev. C", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440ep, .machine_check = machine_check_4xx, .platform = "ppc440", }, { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */ .pvr_mask = 0xf0000fff, .pvr_value = 0x400008db, .cpu_name = "440EP Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440ep, .machine_check = machine_check_4xx, .platform = "ppc440", }, { /* 440GRX */ .pvr_mask = 0xf0000ffb, .pvr_value = 0x200008D0, .cpu_name = "440GRX", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440grx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* Use logical PVR for 440EPx (logical pvr = pvr | 0x8) */ .pvr_mask = 0xf0000ffb, .pvr_value = 0x200008D8, .cpu_name = "440EPX", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440epx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440GP Rev. B */ .pvr_mask = 0xf0000fff, .pvr_value = 0x40000440, .cpu_name = "440GP Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc440gp", }, { /* 440GP Rev. C */ .pvr_mask = 0xf0000fff, .pvr_value = 0x40000481, .cpu_name = "440GP Rev. C", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc440gp", }, { /* 440GX Rev. A */ .pvr_mask = 0xf0000fff, .pvr_value = 0x50000850, .cpu_name = "440GX Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440GX Rev. B */ .pvr_mask = 0xf0000fff, .pvr_value = 0x50000851, .cpu_name = "440GX Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440GX Rev. C */ .pvr_mask = 0xf0000fff, .pvr_value = 0x50000892, .cpu_name = "440GX Rev. C", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440GX Rev. F */ .pvr_mask = 0xf0000fff, .pvr_value = 0x50000894, .cpu_name = "440GX Rev. F", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440gx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440SP Rev. A */ .pvr_mask = 0xfff00fff, .pvr_value = 0x53200891, .cpu_name = "440SP Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc440", }, { /* 440SPe Rev. A */ .pvr_mask = 0xfff00fff, .pvr_value = 0x53400890, .cpu_name = "440SPe Rev. A", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440spe, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440SPe Rev. B */ .pvr_mask = 0xfff00fff, .pvr_value = 0x53400891, .cpu_name = "440SPe Rev. B", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440spe, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 440 in Xilinx Virtex-5 FXT */ .pvr_mask = 0xfffffff0, .pvr_value = 0x7ff21910, .cpu_name = "440 in Virtex-5 FXT", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_440x5, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 460EX */ .pvr_mask = 0xffff0006, .pvr_value = 0x13020002, .cpu_name = "460EX", .cpu_features = CPU_FTRS_440x6, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460ex, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 460EX Rev B */ .pvr_mask = 0xffff0007, .pvr_value = 0x13020004, .cpu_name = "460EX Rev. B", .cpu_features = CPU_FTRS_440x6, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460ex, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 460GT */ .pvr_mask = 0xffff0006, .pvr_value = 0x13020000, .cpu_name = "460GT", .cpu_features = CPU_FTRS_440x6, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460gt, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 460GT Rev B */ .pvr_mask = 0xffff0007, .pvr_value = 0x13020005, .cpu_name = "460GT Rev. B", .cpu_features = CPU_FTRS_440x6, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460gt, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 460SX */ .pvr_mask = 0xffffff00, .pvr_value = 0x13541800, .cpu_name = "460SX", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_460sx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 464 in APM821xx */ .pvr_mask = 0xfffffff0, .pvr_value = 0x12C41C80, .cpu_name = "APM821XX", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_apm821xx, .machine_check = machine_check_440A, .platform = "ppc440", }, { /* 476 DD2 core */ .pvr_mask = 0xffffffff, .pvr_value = 0x11a52080, .cpu_name = "476", .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, .icache_bsize = 32, .dcache_bsize = 128, .machine_check = machine_check_47x, .platform = "ppc470", }, { /* 476fpe */ .pvr_mask = 0xffff0000, .pvr_value = 0x7ff50000, .cpu_name = "476fpe", .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, .icache_bsize = 32, .dcache_bsize = 128, .machine_check = machine_check_47x, .platform = "ppc470", }, { /* 476 iss */ .pvr_mask = 0xffff0000, .pvr_value = 0x00050000, .cpu_name = "476", .cpu_features = CPU_FTRS_47X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, .icache_bsize = 32, .dcache_bsize = 128, .machine_check = machine_check_47x, .platform = "ppc470", }, { /* 476 others */ .pvr_mask = 0xffff0000, .pvr_value = 0x11a50000, .cpu_name = "476", .cpu_features = CPU_FTRS_47X, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, .icache_bsize = 32, .dcache_bsize = 128, .machine_check = machine_check_47x, .platform = "ppc470", }, { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, .cpu_name = "(generic 44x PPC)", .cpu_features = CPU_FTRS_44X, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_44x, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_4xx, .platform = "ppc440", } #endif /* CONFIG_44x */ #ifdef CONFIG_E200 { /* e200z5 */ .pvr_mask = 0xfff00000, .pvr_value = 0x81000000, .cpu_name = "e200z5", /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ .cpu_features = CPU_FTRS_E200, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_UNIFIED_CACHE, .mmu_features = MMU_FTR_TYPE_FSL_E, .dcache_bsize = 32, .machine_check = machine_check_e200, .platform = "ppc5554", }, { /* e200z6 */ .pvr_mask = 0xfff00000, .pvr_value = 0x81100000, .cpu_name = "e200z6", /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ .cpu_features = CPU_FTRS_E200, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP | PPC_FEATURE_UNIFIED_CACHE, .mmu_features = MMU_FTR_TYPE_FSL_E, .dcache_bsize = 32, .machine_check = machine_check_e200, .platform = "ppc5554", }, { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, .cpu_name = "(generic E200 PPC)", .cpu_features = CPU_FTRS_E200, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_UNIFIED_CACHE, .mmu_features = MMU_FTR_TYPE_FSL_E, .dcache_bsize = 32, .cpu_setup = __setup_cpu_e200, .machine_check = machine_check_e200, .platform = "ppc5554", } #endif /* CONFIG_E200 */ #endif /* CONFIG_PPC32 */ #ifdef CONFIG_E500 #ifdef CONFIG_PPC32 { /* e500 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80200000, .cpu_name = "e500", .cpu_features = CPU_FTRS_E500, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP, .cpu_user_features2 = PPC_FEATURE2_ISEL, .mmu_features = MMU_FTR_TYPE_FSL_E, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e500", .oprofile_type = PPC_OPROFILE_FSL_EMB, .cpu_setup = __setup_cpu_e500v1, .machine_check = machine_check_e500, .platform = "ppc8540", }, { /* e500v2 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80210000, .cpu_name = "e500v2", .cpu_features = CPU_FTRS_E500_2, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP | PPC_FEATURE_HAS_EFP_DOUBLE_COMP, .cpu_user_features2 = PPC_FEATURE2_ISEL, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e500", .oprofile_type = PPC_OPROFILE_FSL_EMB, .cpu_setup = __setup_cpu_e500v2, .machine_check = machine_check_e500, .platform = "ppc8548", }, { /* e500mc */ .pvr_mask = 0xffff0000, .pvr_value = 0x80230000, .cpu_name = "e500mc", .cpu_features = CPU_FTRS_E500MC, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .cpu_user_features2 = PPC_FEATURE2_ISEL, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e500mc", .oprofile_type = PPC_OPROFILE_FSL_EMB, .cpu_setup = __setup_cpu_e500mc, .machine_check = machine_check_e500mc, .platform = "ppce500mc", }, #endif /* CONFIG_PPC32 */ { /* e5500 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80240000, .cpu_name = "e5500", .cpu_features = CPU_FTRS_E5500, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .cpu_user_features2 = PPC_FEATURE2_ISEL, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e500mc", .oprofile_type = PPC_OPROFILE_FSL_EMB, .cpu_setup = __setup_cpu_e5500, #ifndef CONFIG_PPC32 .cpu_restore = __restore_cpu_e5500, #endif .machine_check = machine_check_e500mc, .platform = "ppce5500", }, { /* e6500 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80400000, .cpu_name = "e6500", .cpu_features = CPU_FTRS_E6500, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_ALTIVEC_COMP, .cpu_user_features2 = PPC_FEATURE2_ISEL, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e6500", .oprofile_type = PPC_OPROFILE_FSL_EMB, .cpu_setup = __setup_cpu_e6500, #ifndef CONFIG_PPC32 .cpu_restore = __restore_cpu_e6500, #endif .machine_check = machine_check_e500mc, .platform = "ppce6500", }, #ifdef CONFIG_PPC32 { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, .cpu_name = "(generic E500 PPC)", .cpu_features = CPU_FTRS_E500, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE_COMP, .mmu_features = MMU_FTR_TYPE_FSL_E, .icache_bsize = 32, .dcache_bsize = 32, .machine_check = machine_check_e500, .platform = "powerpc", } #endif /* CONFIG_PPC32 */ #endif /* CONFIG_E500 */ #ifdef CONFIG_PPC_A2 { /* Standard A2 (>= DD2) + FPU core */ .pvr_mask = 0xffff0000, .pvr_value = 0x00480000, .cpu_name = "A2 (>= DD2)", .cpu_features = CPU_FTRS_A2, .cpu_user_features = COMMON_USER_PPC64, .mmu_features = MMU_FTRS_A2, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 0, .cpu_setup = __setup_cpu_a2, .cpu_restore = __restore_cpu_a2, .machine_check = machine_check_generic, .platform = "ppca2", }, { /* This is a default entry to get going, to be replaced by * a real one at some stage */ #define CPU_FTRS_BASE_BOOK3E (CPU_FTR_USE_TB | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_SMT | \ CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) .pvr_mask = 0x00000000, .pvr_value = 0x00000000, .cpu_name = "Book3E", .cpu_features = CPU_FTRS_BASE_BOOK3E, .cpu_user_features = COMMON_USER_PPC64, .mmu_features = MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 0, .machine_check = machine_check_generic, .platform = "power6", }, #endif /* CONFIG_PPC_A2 */ }; static struct cpu_spec the_cpu_spec; static struct cpu_spec * __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) { struct cpu_spec *t = &the_cpu_spec; struct cpu_spec old; t = PTRRELOC(t); old = *t; /* Copy everything, then do fixups */ *t = *s; /* * If we are overriding a previous value derived from the real * PVR with a new value obtained using a logical PVR value, * don't modify the performance monitor fields. */ if (old.num_pmcs && !s->num_pmcs) { t->num_pmcs = old.num_pmcs; t->pmc_type = old.pmc_type; t->oprofile_type = old.oprofile_type; t->oprofile_mmcra_sihv = old.oprofile_mmcra_sihv; t->oprofile_mmcra_sipr = old.oprofile_mmcra_sipr; t->oprofile_mmcra_clear = old.oprofile_mmcra_clear; /* * If we have passed through this logic once before and * have pulled the default case because the real PVR was * not found inside cpu_specs[], then we are possibly * running in compatibility mode. In that case, let the * oprofiler know which set of compatibility counters to * pull from by making sure the oprofile_cpu_type string * is set to that of compatibility mode. If the * oprofile_cpu_type already has a value, then we are * possibly overriding a real PVR with a logical one, * and, in that case, keep the current value for * oprofile_cpu_type. */ if (old.oprofile_cpu_type != NULL) { t->oprofile_cpu_type = old.oprofile_cpu_type; t->oprofile_type = old.oprofile_type; } } *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec; /* * Set the base platform string once; assumes * we're called with real pvr first. */ if (*PTRRELOC(&powerpc_base_platform) == NULL) *PTRRELOC(&powerpc_base_platform) = t->platform; #if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE) /* ppc64 and booke expect identify_cpu to also call setup_cpu for * that processor. I will consolidate that at a later time, for now, * just use #ifdef. We also don't need to PTRRELOC the function * pointer on ppc64 and booke as we are running at 0 in real mode * on ppc64 and reloc_offset is always 0 on booke. */ if (t->cpu_setup) { t->cpu_setup(offset, t); } #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ return t; } struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) { struct cpu_spec *s = cpu_specs; int i; s = PTRRELOC(s); for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) { if ((pvr & s->pvr_mask) == s->pvr_value) return setup_cpu_spec(offset, s); } BUG(); return NULL; }
gpl-2.0
TeamExodus/kernel_oneplus_msm8994
sound/soc/samsung/neo1973_wm8753.c
2087
12031
/* * neo1973_wm8753.c -- SoC audio for Openmoko Neo1973 and Freerunner devices * * Copyright 2007 Openmoko Inc * Author: Graeme Gregory <graeme@openmoko.org> * Copyright 2007 Wolfson Microelectronics PLC. * Author: Graeme Gregory * graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com * Copyright 2009 Wolfson Microelectronics * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <sound/soc.h> #include <asm/mach-types.h> #include "regs-iis.h" #include "../codecs/wm8753.h" #include "s3c24xx-i2s.h" static int neo1973_hifi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; unsigned int pll_out = 0, bclk = 0; int ret = 0; unsigned long iis_clkrate; iis_clkrate = s3c24xx_i2s_get_clockrate(); switch (params_rate(params)) { case 8000: case 16000: pll_out = 12288000; break; case 48000: bclk = WM8753_BCLK_DIV_4; pll_out = 12288000; break; case 96000: bclk = WM8753_BCLK_DIV_2; pll_out = 12288000; break; case 11025: bclk = WM8753_BCLK_DIV_16; pll_out = 11289600; break; case 22050: bclk = WM8753_BCLK_DIV_8; pll_out = 11289600; break; case 44100: bclk = WM8753_BCLK_DIV_4; pll_out = 11289600; break; case 88200: bclk = WM8753_BCLK_DIV_2; pll_out = 11289600; break; } /* set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; /* set cpu DAI configuration */ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM); if (ret < 0) return ret; /* set the codec system clock for DAC and ADC */ ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_MCLK, pll_out, SND_SOC_CLOCK_IN); if (ret < 0) return ret; /* set MCLK division for sample rate */ ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK, S3C2410_IISMOD_32FS); if (ret < 0) return ret; /* set codec BCLK division for sample rate */ ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_BCLKDIV, bclk); if (ret < 0) return ret; /* set prescaler division for sample rate */ ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER, S3C24XX_PRESCALE(4, 4)); if (ret < 0) return ret; /* codec PLL input is PCLK/4 */ ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0, iis_clkrate / 4, pll_out); if (ret < 0) return ret; return 0; } static int neo1973_hifi_hw_free(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; /* disable the PLL */ return snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0, 0, 0); } /* * Neo1973 WM8753 HiFi DAI opserations. */ static struct snd_soc_ops neo1973_hifi_ops = { .hw_params = neo1973_hifi_hw_params, .hw_free = neo1973_hifi_hw_free, }; static int neo1973_voice_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; unsigned int pcmdiv = 0; int ret = 0; unsigned long iis_clkrate; iis_clkrate = s3c24xx_i2s_get_clockrate(); if (params_rate(params) != 8000) return -EINVAL; if (params_channels(params) != 1) return -EINVAL; pcmdiv = WM8753_PCM_DIV_6; /* 2.048 MHz */ /* todo: gg check mode (DSP_B) against CSR datasheet */ /* set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* set the codec system clock for DAC and ADC */ ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_PCMCLK, 12288000, SND_SOC_CLOCK_IN); if (ret < 0) return ret; /* set codec PCM division for sample rate */ ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_PCMDIV, pcmdiv); if (ret < 0) return ret; /* configure and enable PLL for 12.288MHz output */ ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, iis_clkrate / 4, 12288000); if (ret < 0) return ret; return 0; } static int neo1973_voice_hw_free(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; /* disable the PLL */ return snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 0, 0); } static struct snd_soc_ops neo1973_voice_ops = { .hw_params = neo1973_voice_hw_params, .hw_free = neo1973_voice_hw_free, }; /* Shared routes and controls */ static const struct snd_soc_dapm_widget neo1973_wm8753_dapm_widgets[] = { SND_SOC_DAPM_LINE("GSM Line Out", NULL), SND_SOC_DAPM_LINE("GSM Line In", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_MIC("Handset Mic", NULL), }; static const struct snd_soc_dapm_route neo1973_wm8753_routes[] = { /* Connections to the GSM Module */ {"GSM Line Out", NULL, "MONO1"}, {"GSM Line Out", NULL, "MONO2"}, {"RXP", NULL, "GSM Line In"}, {"RXN", NULL, "GSM Line In"}, /* Connections to Headset */ {"MIC1", NULL, "Mic Bias"}, {"Mic Bias", NULL, "Headset Mic"}, /* Call Mic */ {"MIC2", NULL, "Mic Bias"}, {"MIC2N", NULL, "Mic Bias"}, {"Mic Bias", NULL, "Handset Mic"}, /* Connect the ALC pins */ {"ACIN", NULL, "ACOP"}, }; static const struct snd_kcontrol_new neo1973_wm8753_controls[] = { SOC_DAPM_PIN_SWITCH("GSM Line Out"), SOC_DAPM_PIN_SWITCH("GSM Line In"), SOC_DAPM_PIN_SWITCH("Headset Mic"), SOC_DAPM_PIN_SWITCH("Handset Mic"), }; /* GTA02 specific routes and controls */ static int gta02_speaker_enabled; static int lm4853_set_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { gta02_speaker_enabled = ucontrol->value.integer.value[0]; gpio_set_value(S3C2410_GPJ(2), !gta02_speaker_enabled); return 0; } static int lm4853_get_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = gta02_speaker_enabled; return 0; } static int lm4853_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { gpio_set_value(S3C2410_GPJ(1), SND_SOC_DAPM_EVENT_OFF(event)); return 0; } static const struct snd_soc_dapm_route neo1973_gta02_routes[] = { /* Connections to the amp */ {"Stereo Out", NULL, "LOUT1"}, {"Stereo Out", NULL, "ROUT1"}, /* Call Speaker */ {"Handset Spk", NULL, "LOUT2"}, {"Handset Spk", NULL, "ROUT2"}, }; static const struct snd_kcontrol_new neo1973_gta02_wm8753_controls[] = { SOC_DAPM_PIN_SWITCH("Handset Spk"), SOC_DAPM_PIN_SWITCH("Stereo Out"), SOC_SINGLE_BOOL_EXT("Amp Spk Switch", 0, lm4853_get_spk, lm4853_set_spk), }; static const struct snd_soc_dapm_widget neo1973_gta02_wm8753_dapm_widgets[] = { SND_SOC_DAPM_SPK("Handset Spk", NULL), SND_SOC_DAPM_SPK("Stereo Out", lm4853_event), }; static int neo1973_gta02_wm8753_init(struct snd_soc_codec *codec) { struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; ret = snd_soc_dapm_new_controls(dapm, neo1973_gta02_wm8753_dapm_widgets, ARRAY_SIZE(neo1973_gta02_wm8753_dapm_widgets)); if (ret) return ret; ret = snd_soc_dapm_add_routes(dapm, neo1973_gta02_routes, ARRAY_SIZE(neo1973_gta02_routes)); if (ret) return ret; ret = snd_soc_add_card_controls(codec->card, neo1973_gta02_wm8753_controls, ARRAY_SIZE(neo1973_gta02_wm8753_controls)); if (ret) return ret; snd_soc_dapm_disable_pin(dapm, "Stereo Out"); snd_soc_dapm_disable_pin(dapm, "Handset Spk"); snd_soc_dapm_ignore_suspend(dapm, "Stereo Out"); snd_soc_dapm_ignore_suspend(dapm, "Handset Spk"); return 0; } static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; /* set up NC codec pins */ snd_soc_dapm_nc_pin(dapm, "OUT3"); snd_soc_dapm_nc_pin(dapm, "OUT4"); snd_soc_dapm_nc_pin(dapm, "LINE1"); snd_soc_dapm_nc_pin(dapm, "LINE2"); /* Add neo1973 specific widgets */ ret = snd_soc_dapm_new_controls(dapm, neo1973_wm8753_dapm_widgets, ARRAY_SIZE(neo1973_wm8753_dapm_widgets)); if (ret) return ret; /* add neo1973 specific controls */ ret = snd_soc_add_card_controls(rtd->card, neo1973_wm8753_controls, ARRAY_SIZE(neo1973_wm8753_controls)); if (ret) return ret; /* set up neo1973 specific audio routes */ ret = snd_soc_dapm_add_routes(dapm, neo1973_wm8753_routes, ARRAY_SIZE(neo1973_wm8753_routes)); if (ret) return ret; /* set endpoints to default off mode */ snd_soc_dapm_disable_pin(dapm, "GSM Line Out"); snd_soc_dapm_disable_pin(dapm, "GSM Line In"); snd_soc_dapm_disable_pin(dapm, "Headset Mic"); snd_soc_dapm_disable_pin(dapm, "Handset Mic"); /* allow audio paths from the GSM modem to run during suspend */ snd_soc_dapm_ignore_suspend(dapm, "GSM Line Out"); snd_soc_dapm_ignore_suspend(dapm, "GSM Line In"); snd_soc_dapm_ignore_suspend(dapm, "Headset Mic"); snd_soc_dapm_ignore_suspend(dapm, "Handset Mic"); if (machine_is_neo1973_gta02()) { ret = neo1973_gta02_wm8753_init(codec); if (ret) return ret; } return 0; } static struct snd_soc_dai_link neo1973_dai[] = { { /* Hifi Playback - for similatious use with voice below */ .name = "WM8753", .stream_name = "WM8753 HiFi", .platform_name = "s3c24xx-iis", .cpu_dai_name = "s3c24xx-iis", .codec_dai_name = "wm8753-hifi", .codec_name = "wm8753.0-001a", .init = neo1973_wm8753_init, .ops = &neo1973_hifi_ops, }, { /* Voice via BT */ .name = "Bluetooth", .stream_name = "Voice", .cpu_dai_name = "dfbmcs320-pcm", .codec_dai_name = "wm8753-voice", .codec_name = "wm8753.0-001a", .ops = &neo1973_voice_ops, }, }; static struct snd_soc_aux_dev neo1973_aux_devs[] = { { .name = "dfbmcs320", .codec_name = "dfbmcs320.0", }, }; static struct snd_soc_codec_conf neo1973_codec_conf[] = { { .dev_name = "lm4857.0-007c", .name_prefix = "Amp", }, }; static const struct gpio neo1973_gta02_gpios[] = { { S3C2410_GPJ(2), GPIOF_OUT_INIT_HIGH, "GTA02_HP_IN" }, { S3C2410_GPJ(1), GPIOF_OUT_INIT_HIGH, "GTA02_AMP_SHUT" }, }; static struct snd_soc_card neo1973 = { .name = "neo1973", .owner = THIS_MODULE, .dai_link = neo1973_dai, .num_links = ARRAY_SIZE(neo1973_dai), .aux_dev = neo1973_aux_devs, .num_aux_devs = ARRAY_SIZE(neo1973_aux_devs), .codec_conf = neo1973_codec_conf, .num_configs = ARRAY_SIZE(neo1973_codec_conf), }; static struct platform_device *neo1973_snd_device; static int __init neo1973_init(void) { int ret; if (!machine_is_neo1973_gta02()) return -ENODEV; if (machine_is_neo1973_gta02()) { neo1973.name = "neo1973gta02"; neo1973.num_aux_devs = 1; ret = gpio_request_array(neo1973_gta02_gpios, ARRAY_SIZE(neo1973_gta02_gpios)); if (ret) return ret; } neo1973_snd_device = platform_device_alloc("soc-audio", -1); if (!neo1973_snd_device) { ret = -ENOMEM; goto err_gpio_free; } platform_set_drvdata(neo1973_snd_device, &neo1973); ret = platform_device_add(neo1973_snd_device); if (ret) goto err_put_device; return 0; err_put_device: platform_device_put(neo1973_snd_device); err_gpio_free: if (machine_is_neo1973_gta02()) { gpio_free_array(neo1973_gta02_gpios, ARRAY_SIZE(neo1973_gta02_gpios)); } return ret; } module_init(neo1973_init); static void __exit neo1973_exit(void) { platform_device_unregister(neo1973_snd_device); if (machine_is_neo1973_gta02()) { gpio_free_array(neo1973_gta02_gpios, ARRAY_SIZE(neo1973_gta02_gpios)); } } module_exit(neo1973_exit); /* Module information */ MODULE_AUTHOR("Graeme Gregory, graeme@openmoko.org, www.openmoko.org"); MODULE_DESCRIPTION("ALSA SoC WM8753 Neo1973 and Frerunner"); MODULE_LICENSE("GPL");
gpl-2.0
tizbac/mediacomMP82S4
kernel/arch/parisc/kernel/sys_parisc.c
2343
6943
/* * PARISC specific syscalls * * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org> * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org> * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/uaccess.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/linkage.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/shm.h> #include <linux/syscalls.h> #include <linux/utsname.h> #include <linux/personality.h> static unsigned long get_unshared_area(unsigned long addr, unsigned long len) { struct vm_area_struct *vma; addr = PAGE_ALIGN(addr); for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; if (!vma || addr + len <= vma->vm_start) return addr; addr = vma->vm_end; } } #define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1)) /* * We need to know the offset to use. Old scheme was to look for * existing mapping and use the same offset. New scheme is to use the * address of the kernel data structure as the seed for the offset. * We'll see how that works... * * The mapping is cacheline aligned, so there's no information in the bottom * few bits of the address. We're looking for 10 bits (4MB / 4k), so let's * drop the bottom 8 bits and use bits 8-17. */ static int get_offset(struct address_space *mapping) { int offset = (unsigned long) mapping << (PAGE_SHIFT - 8); return offset & 0x3FF000; } static unsigned long get_shared_area(struct address_space *mapping, unsigned long addr, unsigned long len, unsigned long pgoff) { struct vm_area_struct *vma; int offset = mapping ? get_offset(mapping) : 0; offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000; addr = DCACHE_ALIGN(addr - offset) + offset; for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; if (!vma || addr + len <= vma->vm_start) return addr; addr = DCACHE_ALIGN(vma->vm_end - offset) + offset; if (addr < vma->vm_end) /* handle wraparound */ return -ENOMEM; } } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { if (len > TASK_SIZE) return -ENOMEM; /* Might want to check for cache aliasing issues for MAP_FIXED case * like ARM or MIPS ??? --BenH. */ if (flags & MAP_FIXED) return addr; if (!addr) addr = TASK_UNMAPPED_BASE; if (filp) { addr = get_shared_area(filp->f_mapping, addr, len, pgoff); } else if(flags & MAP_SHARED) { addr = get_shared_area(NULL, addr, len, pgoff); } else { addr = get_unshared_area(addr, len); } return addr; } asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE we have. */ return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12)); } asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long offset) { if (!(offset & ~PAGE_MASK)) { return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); } else { return -EINVAL; } } /* Fucking broken ABI */ #ifdef CONFIG_64BIT asmlinkage long parisc_truncate64(const char __user * path, unsigned int high, unsigned int low) { return sys_truncate(path, (long)high << 32 | low); } asmlinkage long parisc_ftruncate64(unsigned int fd, unsigned int high, unsigned int low) { return sys_ftruncate(fd, (long)high << 32 | low); } /* stubs for the benefit of the syscall_table since truncate64 and truncate * are identical on LP64 */ asmlinkage long sys_truncate64(const char __user * path, unsigned long length) { return sys_truncate(path, length); } asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length) { return sys_ftruncate(fd, length); } asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg) { return sys_fcntl(fd, cmd, arg); } #else asmlinkage long parisc_truncate64(const char __user * path, unsigned int high, unsigned int low) { return sys_truncate64(path, (loff_t)high << 32 | low); } asmlinkage long parisc_ftruncate64(unsigned int fd, unsigned int high, unsigned int low) { return sys_ftruncate64(fd, (loff_t)high << 32 | low); } #endif asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count, unsigned int high, unsigned int low) { return sys_pread64(fd, buf, count, (loff_t)high << 32 | low); } asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf, size_t count, unsigned int high, unsigned int low) { return sys_pwrite64(fd, buf, count, (loff_t)high << 32 | low); } asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low, size_t count) { return sys_readahead(fd, (loff_t)high << 32 | low, count); } asmlinkage long parisc_fadvise64_64(int fd, unsigned int high_off, unsigned int low_off, unsigned int high_len, unsigned int low_len, int advice) { return sys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off, (loff_t)high_len << 32 | low_len, advice); } asmlinkage long parisc_sync_file_range(int fd, u32 hi_off, u32 lo_off, u32 hi_nbytes, u32 lo_nbytes, unsigned int flags) { return sys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off, (loff_t)hi_nbytes << 32 | lo_nbytes, flags); } asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag) { return -ENOMEM; } asmlinkage int sys_free_hugepages(unsigned long addr) { return -EINVAL; } long parisc_personality(unsigned long personality) { long err; if (personality(current->personality) == PER_LINUX32 && personality == PER_LINUX) personality = PER_LINUX32; err = sys_personality(personality); if (err == PER_LINUX32) err = PER_LINUX; return err; }
gpl-2.0
786228836/linux
drivers/net/wireless/st/cw1200/hwio.c
2599
7385
/* * Low-level device IO routines for ST-Ericsson CW1200 drivers * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> * * Based on: * ST-Ericsson UMAC CW1200 driver, which is * Copyright (c) 2010, ST-Ericsson * Author: Ajitpal Singh <ajitpal.singh@lockless.no> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include "cw1200.h" #include "hwio.h" #include "hwbus.h" /* Sdio addr is 4*spi_addr */ #define SPI_REG_ADDR_TO_SDIO(spi_reg_addr) ((spi_reg_addr) << 2) #define SDIO_ADDR17BIT(buf_id, mpf, rfu, reg_id_ofs) \ ((((buf_id) & 0x1F) << 7) \ | (((mpf) & 1) << 6) \ | (((rfu) & 1) << 5) \ | (((reg_id_ofs) & 0x1F) << 0)) #define MAX_RETRY 3 static int __cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf, size_t buf_len, int buf_id) { u16 addr_sdio; u32 sdio_reg_addr_17bit; /* Check if buffer is aligned to 4 byte boundary */ if (WARN_ON(((unsigned long)buf & 3) && (buf_len > 4))) { pr_err("buffer is not aligned.\n"); return -EINVAL; } /* Convert to SDIO Register Address */ addr_sdio = SPI_REG_ADDR_TO_SDIO(addr); sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio); return priv->hwbus_ops->hwbus_memcpy_fromio(priv->hwbus_priv, sdio_reg_addr_17bit, buf, buf_len); } static int __cw1200_reg_write(struct cw1200_common *priv, u16 addr, const void *buf, size_t buf_len, int buf_id) { u16 addr_sdio; u32 sdio_reg_addr_17bit; /* Convert to SDIO Register Address */ addr_sdio = SPI_REG_ADDR_TO_SDIO(addr); sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio); return priv->hwbus_ops->hwbus_memcpy_toio(priv->hwbus_priv, sdio_reg_addr_17bit, buf, buf_len); } static inline int __cw1200_reg_read_32(struct cw1200_common *priv, u16 addr, u32 *val) { __le32 tmp; int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0); *val = le32_to_cpu(tmp); return i; } static inline int __cw1200_reg_write_32(struct cw1200_common *priv, u16 addr, u32 val) { __le32 tmp = cpu_to_le32(val); return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0); } static inline int __cw1200_reg_read_16(struct cw1200_common *priv, u16 addr, u16 *val) { __le16 tmp; int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0); *val = le16_to_cpu(tmp); return i; } static inline int __cw1200_reg_write_16(struct cw1200_common *priv, u16 addr, u16 val) { __le16 tmp = cpu_to_le16(val); return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0); } int cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf, size_t buf_len) { int ret; priv->hwbus_ops->lock(priv->hwbus_priv); ret = __cw1200_reg_read(priv, addr, buf, buf_len, 0); priv->hwbus_ops->unlock(priv->hwbus_priv); return ret; } int cw1200_reg_write(struct cw1200_common *priv, u16 addr, const void *buf, size_t buf_len) { int ret; priv->hwbus_ops->lock(priv->hwbus_priv); ret = __cw1200_reg_write(priv, addr, buf, buf_len, 0); priv->hwbus_ops->unlock(priv->hwbus_priv); return ret; } int cw1200_data_read(struct cw1200_common *priv, void *buf, size_t buf_len) { int ret, retry = 1; int buf_id_rx = priv->buf_id_rx; priv->hwbus_ops->lock(priv->hwbus_priv); while (retry <= MAX_RETRY) { ret = __cw1200_reg_read(priv, ST90TDS_IN_OUT_QUEUE_REG_ID, buf, buf_len, buf_id_rx + 1); if (!ret) { buf_id_rx = (buf_id_rx + 1) & 3; priv->buf_id_rx = buf_id_rx; break; } else { retry++; mdelay(1); pr_err("error :[%d]\n", ret); } } priv->hwbus_ops->unlock(priv->hwbus_priv); return ret; } int cw1200_data_write(struct cw1200_common *priv, const void *buf, size_t buf_len) { int ret, retry = 1; int buf_id_tx = priv->buf_id_tx; priv->hwbus_ops->lock(priv->hwbus_priv); while (retry <= MAX_RETRY) { ret = __cw1200_reg_write(priv, ST90TDS_IN_OUT_QUEUE_REG_ID, buf, buf_len, buf_id_tx); if (!ret) { buf_id_tx = (buf_id_tx + 1) & 31; priv->buf_id_tx = buf_id_tx; break; } else { retry++; mdelay(1); pr_err("error :[%d]\n", ret); } } priv->hwbus_ops->unlock(priv->hwbus_priv); return ret; } int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf, size_t buf_len, u32 prefetch, u16 port_addr) { u32 val32 = 0; int i, ret; if ((buf_len / 2) >= 0x1000) { pr_err("Can't read more than 0xfff words.\n"); return -EINVAL; } priv->hwbus_ops->lock(priv->hwbus_priv); /* Write address */ ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr); if (ret < 0) { pr_err("Can't write address register.\n"); goto out; } /* Read CONFIG Register Value - We will read 32 bits */ ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); if (ret < 0) { pr_err("Can't read config register.\n"); goto out; } /* Set PREFETCH bit */ ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val32 | prefetch); if (ret < 0) { pr_err("Can't write prefetch bit.\n"); goto out; } /* Check for PRE-FETCH bit to be cleared */ for (i = 0; i < 20; i++) { ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); if (ret < 0) { pr_err("Can't check prefetch bit.\n"); goto out; } if (!(val32 & prefetch)) break; mdelay(i); } if (val32 & prefetch) { pr_err("Prefetch bit is not cleared.\n"); goto out; } /* Read data port */ ret = __cw1200_reg_read(priv, port_addr, buf, buf_len, 0); if (ret < 0) { pr_err("Can't read data port.\n"); goto out; } out: priv->hwbus_ops->unlock(priv->hwbus_priv); return ret; } int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf, size_t buf_len) { int ret; if ((buf_len / 2) >= 0x1000) { pr_err("Can't write more than 0xfff words.\n"); return -EINVAL; } priv->hwbus_ops->lock(priv->hwbus_priv); /* Write address */ ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr); if (ret < 0) { pr_err("Can't write address register.\n"); goto out; } /* Write data port */ ret = __cw1200_reg_write(priv, ST90TDS_SRAM_DPORT_REG_ID, buf, buf_len, 0); if (ret < 0) { pr_err("Can't write data port.\n"); goto out; } out: priv->hwbus_ops->unlock(priv->hwbus_priv); return ret; } int __cw1200_irq_enable(struct cw1200_common *priv, int enable) { u32 val32; u16 val16; int ret; if (HIF_8601_SILICON == priv->hw_type) { ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); if (ret < 0) { pr_err("Can't read config register.\n"); return ret; } if (enable) val32 |= ST90TDS_CONF_IRQ_RDY_ENABLE; else val32 &= ~ST90TDS_CONF_IRQ_RDY_ENABLE; ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val32); if (ret < 0) { pr_err("Can't write config register.\n"); return ret; } } else { ret = __cw1200_reg_read_16(priv, ST90TDS_CONFIG_REG_ID, &val16); if (ret < 0) { pr_err("Can't read control register.\n"); return ret; } if (enable) val16 |= ST90TDS_CONT_IRQ_RDY_ENABLE; else val16 &= ~ST90TDS_CONT_IRQ_RDY_ENABLE; ret = __cw1200_reg_write_16(priv, ST90TDS_CONFIG_REG_ID, val16); if (ret < 0) { pr_err("Can't write control register.\n"); return ret; } } return 0; }
gpl-2.0
friedrich420/N4-AEL-KERNEL-LOLLIPOP
drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
2855
33817
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> ******************************************************************************/ #include <linux/string.h> #include "rtl_core.h" #define RATE_COUNT 12 static u32 rtl8192_rates[] = { 1000000, 2000000, 5500000, 11000000, 6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000 }; #ifndef ENETDOWN #define ENETDOWN 1 #endif static int r8192_wx_get_freq(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = rtllib_priv(dev); return rtllib_wx_get_freq(priv->rtllib, a, wrqu, b); } static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = rtllib_priv(dev); return rtllib_wx_get_mode(priv->rtllib, a, wrqu, b); } static int r8192_wx_get_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); return rtllib_wx_get_rate(priv->rtllib, info, wrqu, extra); } static int r8192_wx_set_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8192_priv *priv = rtllib_priv(dev); if (priv->bHwRadioOff == true) return 0; down(&priv->wx_sem); ret = rtllib_wx_set_rate(priv->rtllib, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8192_wx_set_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8192_priv *priv = rtllib_priv(dev); if (priv->bHwRadioOff == true) return 0; down(&priv->wx_sem); ret = rtllib_wx_set_rts(priv->rtllib, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8192_wx_get_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); return rtllib_wx_get_rts(priv->rtllib, info, wrqu, extra); } static int r8192_wx_set_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8192_priv *priv = rtllib_priv(dev); if (priv->bHwRadioOff == true) { RT_TRACE(COMP_ERR, "%s():Hw is Radio Off, we can't set " "Power,return\n", __func__); return 0; } down(&priv->wx_sem); ret = rtllib_wx_set_power(priv->rtllib, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8192_wx_get_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); return rtllib_wx_get_power(priv->rtllib, info, wrqu, extra); } static int r8192_wx_set_rawtx(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); int ret; if (priv->bHwRadioOff == true) return 0; down(&priv->wx_sem); ret = rtllib_wx_set_rawtx(priv->rtllib, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8192_wx_force_reset(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); down(&priv->wx_sem); RT_TRACE(COMP_DBG, "%s(): force reset ! extra is %d\n", __func__, *extra); priv->force_reset = *extra; up(&priv->wx_sem); return 0; } static int r8192_wx_force_mic_error(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; down(&priv->wx_sem); RT_TRACE(COMP_DBG, "%s(): force mic error !\n", __func__); ieee->force_mic_error = true; up(&priv->wx_sem); return 0; } #define MAX_ADHOC_PEER_NUM 64 struct adhoc_peer_entry { unsigned char MacAddr[ETH_ALEN]; unsigned char WirelessMode; unsigned char bCurTxBW40MHz; }; struct adhoc_peers_info { struct adhoc_peer_entry Entry[MAX_ADHOC_PEER_NUM]; unsigned char num; }; static int r8192_wx_get_adhoc_peers(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { return 0; } static int r8191se_wx_get_firm_version(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrqu, char *extra) { return 0; } static int r8192_wx_adapter_power_status(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) (&(priv->rtllib->PowerSaveControl)); struct rtllib_device *ieee = priv->rtllib; down(&priv->wx_sem); RT_TRACE(COMP_POWER, "%s(): %s\n", __func__, (*extra == 6) ? "DC power" : "AC power"); if (*extra || priv->force_lps) { priv->ps_force = false; pPSC->bLeisurePs = true; } else { if (priv->rtllib->state == RTLLIB_LINKED) LeisurePSLeave(dev); priv->ps_force = true; pPSC->bLeisurePs = false; ieee->ps = *extra; } up(&priv->wx_sem); return 0; } static int r8192se_wx_set_radio(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); down(&priv->wx_sem); printk(KERN_INFO "%s(): set radio ! extra is %d\n", __func__, *extra); if ((*extra != 0) && (*extra != 1)) { RT_TRACE(COMP_ERR, "%s(): set radio an err value,must 0(radio " "off) or 1(radio on)\n", __func__); up(&priv->wx_sem); return -1; } priv->sw_radio_on = *extra; up(&priv->wx_sem); return 0; } static int r8192se_wx_set_lps_awake_interval(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *) (&(priv->rtllib->PowerSaveControl)); down(&priv->wx_sem); printk(KERN_INFO "%s(): set lps awake interval ! extra is %d\n", __func__, *extra); pPSC->RegMaxLPSAwakeIntvl = *extra; up(&priv->wx_sem); return 0; } static int r8192se_wx_set_force_lps(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); down(&priv->wx_sem); printk(KERN_INFO "%s(): force LPS ! extra is %d (1 is open 0 is " "close)\n", __func__, *extra); priv->force_lps = *extra; up(&priv->wx_sem); return 0; } static int r8192_wx_set_debugflag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); u8 c = *extra; if (priv->bHwRadioOff == true) return 0; printk(KERN_INFO "=====>%s(), *extra:%x, debugflag:%x\n", __func__, *extra, rt_global_debug_component); if (c > 0) rt_global_debug_component |= (1<<c); else rt_global_debug_component &= BIT31; return 0; } static int r8192_wx_set_mode(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = netdev_priv_rsl(dev); enum rt_rf_power_state rtState; int ret; if (priv->bHwRadioOff == true) return 0; rtState = priv->rtllib->eRFPowerState; down(&priv->wx_sem); if (wrqu->mode == IW_MODE_ADHOC || wrqu->mode == IW_MODE_MONITOR || ieee->bNetPromiscuousMode) { if (priv->rtllib->PowerSaveControl.bInactivePs) { if (rtState == eRfOff) { if (priv->rtllib->RfOffReason > RF_CHANGE_BY_IPS) { RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n", __func__); up(&priv->wx_sem); return -1; } else { printk(KERN_INFO "=========>%s(): " "IPSLeave\n", __func__); down(&priv->rtllib->ips_sem); IPSLeave(dev); up(&priv->rtllib->ips_sem); } } } } ret = rtllib_wx_set_mode(priv->rtllib, a, wrqu, b); up(&priv->wx_sem); return ret; } struct iw_range_with_scan_capa { /* Informative stuff (to choose between different interface) */ __u32 throughput; /* To give an idea... */ /* In theory this value should be the maximum benchmarked * TCP/IP throughput, because with most of these devices the * bit rate is meaningless (overhead an co) to estimate how * fast the connection will go and pick the fastest one. * I suggest people to play with Netperf or any benchmark... */ /* NWID (or domain id) */ __u32 min_nwid; /* Minimal NWID we are able to set */ __u32 max_nwid; /* Maximal NWID we are able to set */ /* Old Frequency (backward compat - moved lower ) */ __u16 old_num_channels; __u8 old_num_frequency; /* Scan capabilities */ __u8 scan_capa; }; static int rtl8192_wx_get_range(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_range *range = (struct iw_range *)extra; struct r8192_priv *priv = rtllib_priv(dev); u16 val; int i; wrqu->data.length = sizeof(*range); memset(range, 0, sizeof(*range)); /* ~130 Mb/s real (802.11n) */ range->throughput = 130 * 1000 * 1000; if (priv->rf_set_sens != NULL) { /* signal level threshold range */ range->sensitivity = priv->max_sens; } range->max_qual.qual = 100; range->max_qual.level = 0; range->max_qual.noise = 0; range->max_qual.updated = 7; /* Updated all three */ range->avg_qual.qual = 70; /* > 8% missed beacons is 'bad' */ range->avg_qual.level = 0; range->avg_qual.noise = 0; range->avg_qual.updated = 7; /* Updated all three */ range->num_bitrates = min(RATE_COUNT, IW_MAX_BITRATES); for (i = 0; i < range->num_bitrates; i++) range->bitrate[i] = rtl8192_rates[i]; range->max_rts = DEFAULT_RTS_THRESHOLD; range->min_frag = MIN_FRAG_THRESHOLD; range->max_frag = MAX_FRAG_THRESHOLD; range->min_pmp = 0; range->max_pmp = 5000000; range->min_pmt = 0; range->max_pmt = 65535*1000; range->pmp_flags = IW_POWER_PERIOD; range->pmt_flags = IW_POWER_TIMEOUT; range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R; range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 18; for (i = 0, val = 0; i < 14; i++) { if ((priv->rtllib->active_channel_map)[i+1]) { range->freq[val].i = i + 1; range->freq[val].m = rtllib_wlan_frequencies[i] * 100000; range->freq[val].e = 1; val++; } if (val == IW_MAX_FREQUENCIES) break; } range->num_frequency = val; range->num_channels = val; range->enc_capa = IW_ENC_CAPA_WPA|IW_ENC_CAPA_WPA2| IW_ENC_CAPA_CIPHER_TKIP|IW_ENC_CAPA_CIPHER_CCMP; range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE; /* Event capability (kernel + driver) */ return 0; } static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; enum rt_rf_power_state rtState; int ret; if (!(ieee->softmac_features & IEEE_SOFTMAC_SCAN)) { if ((ieee->state >= RTLLIB_ASSOCIATING) && (ieee->state <= RTLLIB_ASSOCIATING_AUTHENTICATED)) return 0; if ((priv->rtllib->state == RTLLIB_LINKED) && (priv->rtllib->CntAfterLink < 2)) return 0; } if (priv->bHwRadioOff == true) { printk(KERN_INFO "================>%s(): hwradio off\n", __func__); return 0; } rtState = priv->rtllib->eRFPowerState; if (!priv->up) return -ENETDOWN; if (priv->rtllib->LinkDetectInfo.bBusyTraffic == true) return -EAGAIN; if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { struct iw_scan_req *req = (struct iw_scan_req *)b; if (req->essid_len) { ieee->current_network.ssid_len = req->essid_len; memcpy(ieee->current_network.ssid, req->essid, req->essid_len); } } down(&priv->wx_sem); priv->rtllib->FirstIe_InScan = true; if (priv->rtllib->state != RTLLIB_LINKED) { if (priv->rtllib->PowerSaveControl.bInactivePs) { if (rtState == eRfOff) { if (priv->rtllib->RfOffReason > RF_CHANGE_BY_IPS) { RT_TRACE(COMP_ERR, "%s(): RF is " "OFF.\n", __func__); up(&priv->wx_sem); return -1; } else { RT_TRACE(COMP_PS, "=========>%s(): " "IPSLeave\n", __func__); down(&priv->rtllib->ips_sem); IPSLeave(dev); up(&priv->rtllib->ips_sem); } } } rtllib_stop_scan(priv->rtllib); if (priv->rtllib->LedControlHandler) priv->rtllib->LedControlHandler(dev, LED_CTL_SITE_SURVEY); if (priv->rtllib->eRFPowerState != eRfOff) { priv->rtllib->actscanning = true; if (ieee->ScanOperationBackupHandler) ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP); rtllib_start_scan_syncro(priv->rtllib, 0); if (ieee->ScanOperationBackupHandler) ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_RESTORE); } ret = 0; } else { priv->rtllib->actscanning = true; ret = rtllib_wx_set_scan(priv->rtllib, a, wrqu, b); } up(&priv->wx_sem); return ret; } static int r8192_wx_get_scan(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8192_priv *priv = rtllib_priv(dev); if (!priv->up) return -ENETDOWN; if (priv->bHwRadioOff == true) return 0; down(&priv->wx_sem); ret = rtllib_wx_get_scan(priv->rtllib, a, wrqu, b); up(&priv->wx_sem); return ret; } static int r8192_wx_set_essid(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = rtllib_priv(dev); int ret; if ((rtllib_act_scanning(priv->rtllib, false)) && !(priv->rtllib->softmac_features & IEEE_SOFTMAC_SCAN)) { ; /* TODO - get rid of if */ } if (priv->bHwRadioOff == true) { printk(KERN_INFO "=========>%s():hw radio off,or Rf state is " "eRfOff, return\n", __func__); return 0; } down(&priv->wx_sem); ret = rtllib_wx_set_essid(priv->rtllib, a, wrqu, b); up(&priv->wx_sem); return ret; } static int r8192_wx_get_essid(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8192_priv *priv = rtllib_priv(dev); down(&priv->wx_sem); ret = rtllib_wx_get_essid(priv->rtllib, a, wrqu, b); up(&priv->wx_sem); return ret; } static int r8192_wx_set_nick(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); if (wrqu->data.length > IW_ESSID_MAX_SIZE) return -E2BIG; down(&priv->wx_sem); wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick)); memset(priv->nick, 0, sizeof(priv->nick)); memcpy(priv->nick, extra, wrqu->data.length); up(&priv->wx_sem); return 0; } static int r8192_wx_get_nick(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); down(&priv->wx_sem); wrqu->data.length = strlen(priv->nick); memcpy(extra, priv->nick, wrqu->data.length); wrqu->data.flags = 1; /* active */ up(&priv->wx_sem); return 0; } static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8192_priv *priv = rtllib_priv(dev); if (priv->bHwRadioOff == true) return 0; down(&priv->wx_sem); ret = rtllib_wx_set_freq(priv->rtllib, a, wrqu, b); up(&priv->wx_sem); return ret; } static int r8192_wx_get_name(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); return rtllib_wx_get_name(priv->rtllib, info, wrqu, extra); } static int r8192_wx_set_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->bHwRadioOff == true) return 0; if (wrqu->frag.disabled) priv->rtllib->fts = DEFAULT_FRAG_THRESHOLD; else { if (wrqu->frag.value < MIN_FRAG_THRESHOLD || wrqu->frag.value > MAX_FRAG_THRESHOLD) return -EINVAL; priv->rtllib->fts = wrqu->frag.value & ~0x1; } return 0; } static int r8192_wx_get_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); wrqu->frag.value = priv->rtllib->fts; wrqu->frag.fixed = 0; /* no auto select */ wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD); return 0; } static int r8192_wx_set_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *awrq, char *extra) { int ret; struct r8192_priv *priv = rtllib_priv(dev); if ((rtllib_act_scanning(priv->rtllib, false)) && !(priv->rtllib->softmac_features & IEEE_SOFTMAC_SCAN)) { ; /* TODO - get rid of if */ } if (priv->bHwRadioOff == true) return 0; down(&priv->wx_sem); ret = rtllib_wx_set_wap(priv->rtllib, info, awrq, extra); up(&priv->wx_sem); return ret; } static int r8192_wx_get_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); return rtllib_wx_get_wap(priv->rtllib, info, wrqu, extra); } static int r8192_wx_get_enc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { struct r8192_priv *priv = rtllib_priv(dev); return rtllib_wx_get_encode(priv->rtllib, info, wrqu, key); } static int r8192_wx_set_enc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { struct r8192_priv *priv = rtllib_priv(dev); int ret; struct rtllib_device *ieee = priv->rtllib; u32 hwkey[4] = {0, 0, 0, 0}; u8 mask = 0xff; u32 key_idx = 0; u8 zero_addr[4][6] = {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} }; int i; if ((rtllib_act_scanning(priv->rtllib, false)) && !(priv->rtllib->softmac_features & IEEE_SOFTMAC_SCAN)) ; /* TODO - get rid of if */ if (priv->bHwRadioOff == true) return 0; if (!priv->up) return -ENETDOWN; priv->rtllib->wx_set_enc = 1; down(&priv->rtllib->ips_sem); IPSLeave(dev); up(&priv->rtllib->ips_sem); down(&priv->wx_sem); RT_TRACE(COMP_SEC, "Setting SW wep key"); ret = rtllib_wx_set_encode(priv->rtllib, info, wrqu, key); up(&priv->wx_sem); if (wrqu->encoding.flags & IW_ENCODE_DISABLED) { ieee->pairwise_key_type = ieee->group_key_type = KEY_TYPE_NA; CamResetAllEntry(dev); memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32); goto end_hw_sec; } if (wrqu->encoding.length != 0) { for (i = 0; i < 4; i++) { hwkey[i] |= key[4*i+0]&mask; if (i == 1 && (4 * i + 1) == wrqu->encoding.length) mask = 0x00; if (i == 3 && (4 * i + 1) == wrqu->encoding.length) mask = 0x00; hwkey[i] |= (key[4 * i + 1] & mask) << 8; hwkey[i] |= (key[4 * i + 2] & mask) << 16; hwkey[i] |= (key[4 * i + 3] & mask) << 24; } #define CONF_WEP40 0x4 #define CONF_WEP104 0x14 switch (wrqu->encoding.flags & IW_ENCODE_INDEX) { case 0: key_idx = ieee->crypt_info.tx_keyidx; break; case 1: key_idx = 0; break; case 2: key_idx = 1; break; case 3: key_idx = 2; break; case 4: key_idx = 3; break; default: break; } if (wrqu->encoding.length == 0x5) { ieee->pairwise_key_type = KEY_TYPE_WEP40; EnableHWSecurityConfig8192(dev); } else if (wrqu->encoding.length == 0xd) { ieee->pairwise_key_type = KEY_TYPE_WEP104; EnableHWSecurityConfig8192(dev); setKey(dev, key_idx, key_idx, KEY_TYPE_WEP104, zero_addr[key_idx], 0, hwkey); set_swcam(dev, key_idx, key_idx, KEY_TYPE_WEP104, zero_addr[key_idx], 0, hwkey, 0); } else { printk(KERN_INFO "wrong type in WEP, not WEP40 and WEP104\n"); } } end_hw_sec: priv->rtllib->wx_set_enc = 0; return ret; } static int r8192_wx_set_scan_type(struct net_device *dev, struct iw_request_info *aa, union iwreq_data *wrqu, char *p) { struct r8192_priv *priv = rtllib_priv(dev); int *parms = (int *)p; int mode = parms[0]; if (priv->bHwRadioOff == true) return 0; priv->rtllib->active_scan = mode; return 1; } #define R8192_MAX_RETRY 255 static int r8192_wx_set_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); int err = 0; if (priv->bHwRadioOff == true) return 0; down(&priv->wx_sem); if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled) { err = -EINVAL; goto exit; } if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) { err = -EINVAL; goto exit; } if (wrqu->retry.value > R8192_MAX_RETRY) { err = -EINVAL; goto exit; } if (wrqu->retry.flags & IW_RETRY_MAX) { priv->retry_rts = wrqu->retry.value; DMESG("Setting retry for RTS/CTS data to %d", wrqu->retry.value); } else { priv->retry_data = wrqu->retry.value; DMESG("Setting retry for non RTS/CTS data to %d", wrqu->retry.value); } rtl8192_commit(dev); exit: up(&priv->wx_sem); return err; } static int r8192_wx_get_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); wrqu->retry.disabled = 0; /* can't be disabled */ if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) return -EINVAL; if (wrqu->retry.flags & IW_RETRY_MAX) { wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX; wrqu->retry.value = priv->retry_rts; } else { wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN; wrqu->retry.value = priv->retry_data; } return 0; } static int r8192_wx_get_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); if (priv->rf_set_sens == NULL) return -1; /* we have not this support for this radio */ wrqu->sens.value = priv->sens; return 0; } static int r8192_wx_set_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); short err = 0; if (priv->bHwRadioOff == true) return 0; down(&priv->wx_sem); if (priv->rf_set_sens == NULL) { err = -1; /* we have not this support for this radio */ goto exit; } if (priv->rf_set_sens(dev, wrqu->sens.value) == 0) priv->sens = wrqu->sens.value; else err = -EINVAL; exit: up(&priv->wx_sem); return err; } static int r8192_wx_set_enc_ext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; if (priv->bHwRadioOff == true) return 0; down(&priv->wx_sem); priv->rtllib->wx_set_enc = 1; down(&priv->rtllib->ips_sem); IPSLeave(dev); up(&priv->rtllib->ips_sem); ret = rtllib_wx_set_encode_ext(ieee, info, wrqu, extra); { u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; u8 zero[6] = {0}; u32 key[4] = {0}; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; struct iw_point *encoding = &wrqu->encoding; u8 idx = 0, alg = 0, group = 0; if ((encoding->flags & IW_ENCODE_DISABLED) || ext->alg == IW_ENCODE_ALG_NONE) { ieee->pairwise_key_type = ieee->group_key_type = KEY_TYPE_NA; CamResetAllEntry(dev); memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32); goto end_hw_sec; } alg = (ext->alg == IW_ENCODE_ALG_CCMP) ? KEY_TYPE_CCMP : ext->alg; idx = encoding->flags & IW_ENCODE_INDEX; if (idx) idx--; group = ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY; if ((!group) || (IW_MODE_ADHOC == ieee->iw_mode) || (alg == KEY_TYPE_WEP40)) { if ((ext->key_len == 13) && (alg == KEY_TYPE_WEP40)) alg = KEY_TYPE_WEP104; ieee->pairwise_key_type = alg; EnableHWSecurityConfig8192(dev); } memcpy((u8 *)key, ext->key, 16); if ((alg & KEY_TYPE_WEP40) && (ieee->auth_mode != 2)) { if (ext->key_len == 13) ieee->pairwise_key_type = alg = KEY_TYPE_WEP104; setKey(dev, idx, idx, alg, zero, 0, key); set_swcam(dev, idx, idx, alg, zero, 0, key, 0); } else if (group) { ieee->group_key_type = alg; setKey(dev, idx, idx, alg, broadcast_addr, 0, key); set_swcam(dev, idx, idx, alg, broadcast_addr, 0, key, 0); } else { if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) && ieee->pHTInfo->bCurrentHTSupport) write_nic_byte(dev, 0x173, 1); setKey(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr, 0, key); set_swcam(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr, 0, key, 0); } } end_hw_sec: priv->rtllib->wx_set_enc = 0; up(&priv->wx_sem); return ret; } static int r8192_wx_set_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *data, char *extra) { int ret = 0; struct r8192_priv *priv = rtllib_priv(dev); if (priv->bHwRadioOff == true) return 0; down(&priv->wx_sem); ret = rtllib_wx_set_auth(priv->rtllib, info, &(data->param), extra); up(&priv->wx_sem); return ret; } static int r8192_wx_set_mlme(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; struct r8192_priv *priv = rtllib_priv(dev); if (priv->bHwRadioOff == true) return 0; down(&priv->wx_sem); ret = rtllib_wx_set_mlme(priv->rtllib, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8192_wx_set_gen_ie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *data, char *extra) { int ret = 0; struct r8192_priv *priv = rtllib_priv(dev); if (priv->bHwRadioOff == true) return 0; down(&priv->wx_sem); ret = rtllib_wx_set_gen_ie(priv->rtllib, extra, data->data.length); up(&priv->wx_sem); return ret; } static int r8192_wx_get_gen_ie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *data, char *extra) { int ret = 0; struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) { data->data.length = 0; return 0; } if (data->data.length < ieee->wpa_ie_len) return -E2BIG; data->data.length = ieee->wpa_ie_len; memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); return ret; } #define OID_RT_INTEL_PROMISCUOUS_MODE 0xFF0101F6 static int r8192_wx_set_PromiscuousMode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; u32 *info_buf = (u32 *)(wrqu->data.pointer); u32 oid = info_buf[0]; u32 bPromiscuousOn = info_buf[1]; u32 bFilterSourceStationFrame = info_buf[2]; if (OID_RT_INTEL_PROMISCUOUS_MODE == oid) { ieee->IntelPromiscuousModeInfo.bPromiscuousOn = (bPromiscuousOn) ? (true) : (false); ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame = (bFilterSourceStationFrame) ? (true) : (false); (bPromiscuousOn) ? (rtllib_EnableIntelPromiscuousMode(dev, false)) : (rtllib_DisableIntelPromiscuousMode(dev, false)); printk(KERN_INFO "=======>%s(), on = %d, filter src sta = %d\n", __func__, bPromiscuousOn, bFilterSourceStationFrame); } else { return -1; } return 0; } static int r8192_wx_get_PromiscuousMode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; down(&priv->wx_sem); snprintf(extra, 45, "PromiscuousMode:%d, FilterSrcSTAFrame:%d", ieee->IntelPromiscuousModeInfo.bPromiscuousOn, ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame); wrqu->data.length = strlen(extra) + 1; up(&priv->wx_sem); return 0; } #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT] static iw_handler r8192_wx_handlers[] = { IW_IOCTL(SIOCGIWNAME) = r8192_wx_get_name, IW_IOCTL(SIOCSIWFREQ) = r8192_wx_set_freq, IW_IOCTL(SIOCGIWFREQ) = r8192_wx_get_freq, IW_IOCTL(SIOCSIWMODE) = r8192_wx_set_mode, IW_IOCTL(SIOCGIWMODE) = r8192_wx_get_mode, IW_IOCTL(SIOCSIWSENS) = r8192_wx_set_sens, IW_IOCTL(SIOCGIWSENS) = r8192_wx_get_sens, IW_IOCTL(SIOCGIWRANGE) = rtl8192_wx_get_range, IW_IOCTL(SIOCSIWAP) = r8192_wx_set_wap, IW_IOCTL(SIOCGIWAP) = r8192_wx_get_wap, IW_IOCTL(SIOCSIWSCAN) = r8192_wx_set_scan, IW_IOCTL(SIOCGIWSCAN) = r8192_wx_get_scan, IW_IOCTL(SIOCSIWESSID) = r8192_wx_set_essid, IW_IOCTL(SIOCGIWESSID) = r8192_wx_get_essid, IW_IOCTL(SIOCSIWNICKN) = r8192_wx_set_nick, IW_IOCTL(SIOCGIWNICKN) = r8192_wx_get_nick, IW_IOCTL(SIOCSIWRATE) = r8192_wx_set_rate, IW_IOCTL(SIOCGIWRATE) = r8192_wx_get_rate, IW_IOCTL(SIOCSIWRTS) = r8192_wx_set_rts, IW_IOCTL(SIOCGIWRTS) = r8192_wx_get_rts, IW_IOCTL(SIOCSIWFRAG) = r8192_wx_set_frag, IW_IOCTL(SIOCGIWFRAG) = r8192_wx_get_frag, IW_IOCTL(SIOCSIWRETRY) = r8192_wx_set_retry, IW_IOCTL(SIOCGIWRETRY) = r8192_wx_get_retry, IW_IOCTL(SIOCSIWENCODE) = r8192_wx_set_enc, IW_IOCTL(SIOCGIWENCODE) = r8192_wx_get_enc, IW_IOCTL(SIOCSIWPOWER) = r8192_wx_set_power, IW_IOCTL(SIOCGIWPOWER) = r8192_wx_get_power, IW_IOCTL(SIOCSIWGENIE) = r8192_wx_set_gen_ie, IW_IOCTL(SIOCGIWGENIE) = r8192_wx_get_gen_ie, IW_IOCTL(SIOCSIWMLME) = r8192_wx_set_mlme, IW_IOCTL(SIOCSIWAUTH) = r8192_wx_set_auth, IW_IOCTL(SIOCSIWENCODEEXT) = r8192_wx_set_enc_ext, }; /* * the following rule need to be follwing, * Odd : get (world access), * even : set (root access) * */ static const struct iw_priv_args r8192_private_args[] = { { SIOCIWFIRSTPRIV + 0x0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_debugflag" }, { SIOCIWFIRSTPRIV + 0x1, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan" }, { SIOCIWFIRSTPRIV + 0x2, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx" }, { SIOCIWFIRSTPRIV + 0x3, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "forcereset" }, { SIOCIWFIRSTPRIV + 0x4, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "force_mic_error" }, { SIOCIWFIRSTPRIV + 0x5, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_INT|IW_PRIV_SIZE_FIXED|1, "firm_ver" }, { SIOCIWFIRSTPRIV + 0x6, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE, "set_power" }, { SIOCIWFIRSTPRIV + 0x9, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE, "radio" }, { SIOCIWFIRSTPRIV + 0xa, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE, "lps_interv" }, { SIOCIWFIRSTPRIV + 0xb, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE, "lps_force" }, { SIOCIWFIRSTPRIV + 0xc, 0, IW_PRIV_TYPE_CHAR|2047, "adhoc_peer_list" }, { SIOCIWFIRSTPRIV + 0x16, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0, "setpromisc" }, { SIOCIWFIRSTPRIV + 0x17, 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 45, "getpromisc" } }; static iw_handler r8192_private_handler[] = { (iw_handler)r8192_wx_set_debugflag, /*SIOCIWSECONDPRIV*/ (iw_handler)r8192_wx_set_scan_type, (iw_handler)r8192_wx_set_rawtx, (iw_handler)r8192_wx_force_reset, (iw_handler)r8192_wx_force_mic_error, (iw_handler)r8191se_wx_get_firm_version, (iw_handler)r8192_wx_adapter_power_status, (iw_handler)NULL, (iw_handler)NULL, (iw_handler)r8192se_wx_set_radio, (iw_handler)r8192se_wx_set_lps_awake_interval, (iw_handler)r8192se_wx_set_force_lps, (iw_handler)r8192_wx_get_adhoc_peers, (iw_handler)NULL, (iw_handler)NULL, (iw_handler)NULL, (iw_handler)NULL, (iw_handler)NULL, (iw_handler)NULL, (iw_handler)NULL, (iw_handler)NULL, (iw_handler)NULL, (iw_handler)r8192_wx_set_PromiscuousMode, (iw_handler)r8192_wx_get_PromiscuousMode, }; static struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev) { struct r8192_priv *priv = rtllib_priv(dev); struct rtllib_device *ieee = priv->rtllib; struct iw_statistics *wstats = &priv->wstats; int tmp_level = 0; int tmp_qual = 0; int tmp_noise = 0; if (ieee->state < RTLLIB_LINKED) { wstats->qual.qual = 10; wstats->qual.level = 0; wstats->qual.noise = -100; wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; return wstats; } tmp_level = (&ieee->current_network)->stats.rssi; tmp_qual = (&ieee->current_network)->stats.signal; tmp_noise = (&ieee->current_network)->stats.noise; wstats->qual.level = tmp_level; wstats->qual.qual = tmp_qual; wstats->qual.noise = tmp_noise; wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; return wstats; } struct iw_handler_def r8192_wx_handlers_def = { .standard = r8192_wx_handlers, .num_standard = ARRAY_SIZE(r8192_wx_handlers), .private = r8192_private_handler, .num_private = ARRAY_SIZE(r8192_private_handler), .num_private_args = sizeof(r8192_private_args) / sizeof(struct iw_priv_args), .get_wireless_stats = r8192_get_wireless_stats, .private_args = (struct iw_priv_args *)r8192_private_args, };
gpl-2.0
NeverLEX/linux
drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
4391
10526
/* * OMAP4 thermal driver. * * Copyright (C) 2011-2012 Texas Instruments Inc. * Contact: * Eduardo Valentin <eduardo.valentin@ti.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "ti-thermal.h" #include "ti-bandgap.h" #include "omap4xxx-bandgap.h" /* * OMAP4430 has one instance of thermal sensor for MPU * need to describe the individual bit fields */ static struct temp_sensor_registers omap4430_mpu_temp_sensor_registers = { .temp_sensor_ctrl = OMAP4430_TEMP_SENSOR_CTRL_OFFSET, .bgap_tempsoff_mask = OMAP4430_BGAP_TEMPSOFF_MASK, .bgap_soc_mask = OMAP4430_BGAP_TEMP_SENSOR_SOC_MASK, .bgap_eocz_mask = OMAP4430_BGAP_TEMP_SENSOR_EOCZ_MASK, .bgap_dtemp_mask = OMAP4430_BGAP_TEMP_SENSOR_DTEMP_MASK, .bgap_mode_ctrl = OMAP4430_TEMP_SENSOR_CTRL_OFFSET, .mode_ctrl_mask = OMAP4430_SINGLE_MODE_MASK, .bgap_efuse = OMAP4430_FUSE_OPP_BGAP, }; /* Thresholds and limits for OMAP4430 MPU temperature sensor */ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = { .min_freq = OMAP4430_MIN_FREQ, .max_freq = OMAP4430_MAX_FREQ, .max_temp = OMAP4430_MAX_TEMP, .min_temp = OMAP4430_MIN_TEMP, .hyst_val = OMAP4430_HYST_VAL, }; /* * Temperature values in milli degree celsius * ADC code values from 530 to 923 */ static const int omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = { -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000, -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000, -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000, 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000, 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000, 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000, 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000, 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000, 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000, 117000, 118000, 120000, 122000, 123000, }; /* OMAP4430 data */ const struct ti_bandgap_data omap4430_data = { .features = TI_BANDGAP_FEATURE_MODE_CONFIG | TI_BANDGAP_FEATURE_CLK_CTRL | TI_BANDGAP_FEATURE_POWER_SWITCH, .fclock_name = "bandgap_fclk", .div_ck_name = "bandgap_fclk", .conv_table = omap4430_adc_to_temp, .adc_start_val = OMAP4430_ADC_START_VALUE, .adc_end_val = OMAP4430_ADC_END_VALUE, .expose_sensor = ti_thermal_expose_sensor, .remove_sensor = ti_thermal_remove_sensor, .sensors = { { .registers = &omap4430_mpu_temp_sensor_registers, .ts_data = &omap4430_mpu_temp_sensor_data, .domain = "cpu", .slope = OMAP_GRADIENT_SLOPE_4430, .constant = OMAP_GRADIENT_CONST_4430, .slope_pcb = OMAP_GRADIENT_SLOPE_W_PCB_4430, .constant_pcb = OMAP_GRADIENT_CONST_W_PCB_4430, .register_cooling = ti_thermal_register_cpu_cooling, .unregister_cooling = ti_thermal_unregister_cpu_cooling, }, }, .sensor_count = 1, }; /* * OMAP4460 has one instance of thermal sensor for MPU * need to describe the individual bit fields */ static struct temp_sensor_registers omap4460_mpu_temp_sensor_registers = { .temp_sensor_ctrl = OMAP4460_TEMP_SENSOR_CTRL_OFFSET, .bgap_tempsoff_mask = OMAP4460_BGAP_TEMPSOFF_MASK, .bgap_soc_mask = OMAP4460_BGAP_TEMP_SENSOR_SOC_MASK, .bgap_eocz_mask = OMAP4460_BGAP_TEMP_SENSOR_EOCZ_MASK, .bgap_dtemp_mask = OMAP4460_BGAP_TEMP_SENSOR_DTEMP_MASK, .bgap_mask_ctrl = OMAP4460_BGAP_CTRL_OFFSET, .mask_hot_mask = OMAP4460_MASK_HOT_MASK, .mask_cold_mask = OMAP4460_MASK_COLD_MASK, .bgap_mode_ctrl = OMAP4460_BGAP_CTRL_OFFSET, .mode_ctrl_mask = OMAP4460_SINGLE_MODE_MASK, .bgap_counter = OMAP4460_BGAP_COUNTER_OFFSET, .counter_mask = OMAP4460_COUNTER_MASK, .bgap_threshold = OMAP4460_BGAP_THRESHOLD_OFFSET, .threshold_thot_mask = OMAP4460_T_HOT_MASK, .threshold_tcold_mask = OMAP4460_T_COLD_MASK, .tshut_threshold = OMAP4460_BGAP_TSHUT_OFFSET, .tshut_hot_mask = OMAP4460_TSHUT_HOT_MASK, .tshut_cold_mask = OMAP4460_TSHUT_COLD_MASK, .bgap_status = OMAP4460_BGAP_STATUS_OFFSET, .status_clean_stop_mask = OMAP4460_CLEAN_STOP_MASK, .status_bgap_alert_mask = OMAP4460_BGAP_ALERT_MASK, .status_hot_mask = OMAP4460_HOT_FLAG_MASK, .status_cold_mask = OMAP4460_COLD_FLAG_MASK, .bgap_efuse = OMAP4460_FUSE_OPP_BGAP, }; /* Thresholds and limits for OMAP4460 MPU temperature sensor */ static struct temp_sensor_data omap4460_mpu_temp_sensor_data = { .tshut_hot = OMAP4460_TSHUT_HOT, .tshut_cold = OMAP4460_TSHUT_COLD, .t_hot = OMAP4460_T_HOT, .t_cold = OMAP4460_T_COLD, .min_freq = OMAP4460_MIN_FREQ, .max_freq = OMAP4460_MAX_FREQ, .max_temp = OMAP4460_MAX_TEMP, .min_temp = OMAP4460_MIN_TEMP, .hyst_val = OMAP4460_HYST_VAL, .update_int1 = 1000, .update_int2 = 2000, }; /* * Temperature values in milli degree celsius * ADC code values from 530 to 923 */ static const int omap4460_adc_to_temp[OMAP4460_ADC_END_VALUE - OMAP4460_ADC_START_VALUE + 1] = { -40000, -40000, -40000, -40000, -39800, -39400, -39000, -38600, -38200, -37800, -37300, -36800, -36400, -36000, -35600, -35200, -34800, -34300, -33800, -33400, -33000, -32600, -32200, -31800, -31300, -30800, -30400, -30000, -29600, -29200, -28700, -28200, -27800, -27400, -27000, -26600, -26200, -25700, -25200, -24800, -24400, -24000, -23600, -23200, -22700, -22200, -21800, -21400, -21000, -20600, -20200, -19700, -19200, -18800, -18400, -18000, -17600, -17200, -16700, -16200, -15800, -15400, -15000, -14600, -14200, -13700, -13200, -12800, -12400, -12000, -11600, -11200, -10700, -10200, -9800, -9400, -9000, -8600, -8200, -7700, -7200, -6800, -6400, -6000, -5600, -5200, -4800, -4300, -3800, -3400, -3000, -2600, -2200, -1800, -1300, -800, -400, 0, 400, 800, 1200, 1600, 2100, 2600, 3000, 3400, 3800, 4200, 4600, 5100, 5600, 6000, 6400, 6800, 7200, 7600, 8000, 8500, 9000, 9400, 9800, 10200, 10600, 11000, 11400, 11900, 12400, 12800, 13200, 13600, 14000, 14400, 14800, 15300, 15800, 16200, 16600, 17000, 17400, 17800, 18200, 18700, 19200, 19600, 20000, 20400, 20800, 21200, 21600, 22100, 22600, 23000, 23400, 23800, 24200, 24600, 25000, 25400, 25900, 26400, 26800, 27200, 27600, 28000, 28400, 28800, 29300, 29800, 30200, 30600, 31000, 31400, 31800, 32200, 32600, 33100, 33600, 34000, 34400, 34800, 35200, 35600, 36000, 36400, 36800, 37300, 37800, 38200, 38600, 39000, 39400, 39800, 40200, 40600, 41100, 41600, 42000, 42400, 42800, 43200, 43600, 44000, 44400, 44800, 45300, 45800, 46200, 46600, 47000, 47400, 47800, 48200, 48600, 49000, 49500, 50000, 50400, 50800, 51200, 51600, 52000, 52400, 52800, 53200, 53700, 54200, 54600, 55000, 55400, 55800, 56200, 56600, 57000, 57400, 57800, 58200, 58700, 59200, 59600, 60000, 60400, 60800, 61200, 61600, 62000, 62400, 62800, 63300, 63800, 64200, 64600, 65000, 65400, 65800, 66200, 66600, 67000, 67400, 67800, 68200, 68700, 69200, 69600, 70000, 70400, 70800, 71200, 71600, 72000, 72400, 72800, 73200, 73600, 74100, 74600, 75000, 75400, 75800, 76200, 76600, 77000, 77400, 77800, 78200, 78600, 79000, 79400, 79800, 80300, 80800, 81200, 81600, 82000, 82400, 82800, 83200, 83600, 84000, 84400, 84800, 85200, 85600, 86000, 86400, 86800, 87300, 87800, 88200, 88600, 89000, 89400, 89800, 90200, 90600, 91000, 91400, 91800, 92200, 92600, 93000, 93400, 93800, 94200, 94600, 95000, 95500, 96000, 96400, 96800, 97200, 97600, 98000, 98400, 98800, 99200, 99600, 100000, 100400, 100800, 101200, 101600, 102000, 102400, 102800, 103200, 103600, 104000, 104400, 104800, 105200, 105600, 106100, 106600, 107000, 107400, 107800, 108200, 108600, 109000, 109400, 109800, 110200, 110600, 111000, 111400, 111800, 112200, 112600, 113000, 113400, 113800, 114200, 114600, 115000, 115400, 115800, 116200, 116600, 117000, 117400, 117800, 118200, 118600, 119000, 119400, 119800, 120200, 120600, 121000, 121400, 121800, 122200, 122600, 123000, 123400, 123800, 124200, 124600, 124900, 125000, 125000, 125000, 125000 }; /* OMAP4460 data */ const struct ti_bandgap_data omap4460_data = { .features = TI_BANDGAP_FEATURE_TSHUT | TI_BANDGAP_FEATURE_TSHUT_CONFIG | TI_BANDGAP_FEATURE_TALERT | TI_BANDGAP_FEATURE_MODE_CONFIG | TI_BANDGAP_FEATURE_POWER_SWITCH | TI_BANDGAP_FEATURE_CLK_CTRL | TI_BANDGAP_FEATURE_COUNTER, .fclock_name = "bandgap_ts_fclk", .div_ck_name = "div_ts_ck", .conv_table = omap4460_adc_to_temp, .adc_start_val = OMAP4460_ADC_START_VALUE, .adc_end_val = OMAP4460_ADC_END_VALUE, .expose_sensor = ti_thermal_expose_sensor, .remove_sensor = ti_thermal_remove_sensor, .report_temperature = ti_thermal_report_sensor_temperature, .sensors = { { .registers = &omap4460_mpu_temp_sensor_registers, .ts_data = &omap4460_mpu_temp_sensor_data, .domain = "cpu", .slope = OMAP_GRADIENT_SLOPE_4460, .constant = OMAP_GRADIENT_CONST_4460, .slope_pcb = OMAP_GRADIENT_SLOPE_W_PCB_4460, .constant_pcb = OMAP_GRADIENT_CONST_W_PCB_4460, .register_cooling = ti_thermal_register_cpu_cooling, .unregister_cooling = ti_thermal_unregister_cpu_cooling, }, }, .sensor_count = 1, }; /* OMAP4470 data */ const struct ti_bandgap_data omap4470_data = { .features = TI_BANDGAP_FEATURE_TSHUT | TI_BANDGAP_FEATURE_TSHUT_CONFIG | TI_BANDGAP_FEATURE_TALERT | TI_BANDGAP_FEATURE_MODE_CONFIG | TI_BANDGAP_FEATURE_POWER_SWITCH | TI_BANDGAP_FEATURE_CLK_CTRL | TI_BANDGAP_FEATURE_COUNTER, .fclock_name = "bandgap_ts_fclk", .div_ck_name = "div_ts_ck", .conv_table = omap4460_adc_to_temp, .adc_start_val = OMAP4460_ADC_START_VALUE, .adc_end_val = OMAP4460_ADC_END_VALUE, .expose_sensor = ti_thermal_expose_sensor, .remove_sensor = ti_thermal_remove_sensor, .report_temperature = ti_thermal_report_sensor_temperature, .sensors = { { .registers = &omap4460_mpu_temp_sensor_registers, .ts_data = &omap4460_mpu_temp_sensor_data, .domain = "cpu", .slope = OMAP_GRADIENT_SLOPE_4470, .constant = OMAP_GRADIENT_CONST_4470, .slope_pcb = OMAP_GRADIENT_SLOPE_W_PCB_4470, .constant_pcb = OMAP_GRADIENT_CONST_W_PCB_4470, .register_cooling = ti_thermal_register_cpu_cooling, .unregister_cooling = ti_thermal_unregister_cpu_cooling, }, }, .sensor_count = 1, };
gpl-2.0
davidmueller13/Gear_S_Kernel
arch/blackfin/mach-bf538/boards/ezkit.c
4391
23431
/* * Copyright 2004-2009 Analog Devices Inc. * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 */ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/physmap.h> #include <linux/mtd/partitions.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <asm/bfin5xx_spi.h> #include <asm/dma.h> #include <asm/gpio.h> #include <asm/nand.h> #include <asm/portmux.h> #include <asm/dpmc.h> #include <linux/input.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "ADI BF538-EZKIT"; /* * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif /* CONFIG_RTC_DRV_BFIN */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, #ifdef CONFIG_BFIN_UART0_CTSRTS { /* CTS pin */ .start = GPIO_PG7, .end = GPIO_PG7, .flags = IORESOURCE_IO, }, { /* RTS pin */ .start = GPIO_PG6, .end = GPIO_PG6, .flags = IORESOURCE_IO, }, #endif }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif /* CONFIG_SERIAL_BFIN_UART0 */ #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif /* CONFIG_SERIAL_BFIN_UART1 */ #ifdef CONFIG_SERIAL_BFIN_UART2 static struct resource bfin_uart2_resources[] = { { .start = UART2_THR, .end = UART2_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART2_TX, .end = IRQ_UART2_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART2_RX, .end = IRQ_UART2_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART2_ERROR, .end = IRQ_UART2_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART2_TX, .end = CH_UART2_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART2_RX, .end = CH_UART2_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart2_peripherals[] = { P_UART2_TX, P_UART2_RX, 0 }; static struct platform_device bfin_uart2_device = { .name = "bfin-uart", .id = 2, .num_resources = ARRAY_SIZE(bfin_uart2_resources), .resource = bfin_uart2_resources, .dev = { .platform_data = &bfin_uart2_peripherals, /* Passed to driver */ }, }; #endif /* CONFIG_SERIAL_BFIN_UART2 */ #endif /* CONFIG_SERIAL_BFIN */ #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif /* CONFIG_BFIN_SIR0 */ #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif /* CONFIG_BFIN_SIR1 */ #ifdef CONFIG_BFIN_SIR2 static struct resource bfin_sir2_resources[] = { { .start = 0xFFC02100, .end = 0xFFC021FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART2_RX, .end = IRQ_UART2_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART2_RX, .end = CH_UART2_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir2_device = { .name = "bfin_sir", .id = 2, .num_resources = ARRAY_SIZE(bfin_sir2_resources), .resource = bfin_sir2_resources, }; #endif /* CONFIG_BFIN_SIR2 */ #endif /* CONFIG_BFIN_SIR */ #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif /* CONFIG_SERIAL_BFIN_SPORT0_UART */ #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif /* CONFIG_SERIAL_BFIN_SPORT1_UART */ #ifdef CONFIG_SERIAL_BFIN_SPORT2_UART static struct resource bfin_sport2_uart_resources[] = { { .start = SPORT2_TCR1, .end = SPORT2_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT2_RX, .end = IRQ_SPORT2_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT2_ERROR, .end = IRQ_SPORT2_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport2_peripherals[] = { P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0 }; static struct platform_device bfin_sport2_uart_device = { .name = "bfin-sport-uart", .id = 2, .num_resources = ARRAY_SIZE(bfin_sport2_uart_resources), .resource = bfin_sport2_uart_resources, .dev = { .platform_data = &bfin_sport2_peripherals, /* Passed to driver */ }, }; #endif /* CONFIG_SERIAL_BFIN_SPORT2_UART */ #ifdef CONFIG_SERIAL_BFIN_SPORT3_UART static struct resource bfin_sport3_uart_resources[] = { { .start = SPORT3_TCR1, .end = SPORT3_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT3_RX, .end = IRQ_SPORT3_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT3_ERROR, .end = IRQ_SPORT3_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport3_peripherals[] = { P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0 }; static struct platform_device bfin_sport3_uart_device = { .name = "bfin-sport-uart", .id = 3, .num_resources = ARRAY_SIZE(bfin_sport3_uart_resources), .resource = bfin_sport3_uart_resources, .dev = { .platform_data = &bfin_sport3_peripherals, /* Passed to driver */ }, }; #endif /* CONFIG_SERIAL_BFIN_SPORT3_UART */ #endif /* CONFIG_SERIAL_BFIN_SPORT */ #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) static unsigned short bfin_can_peripherals[] = { P_CAN0_RX, P_CAN0_TX, 0 }; static struct resource bfin_can_resources[] = { { .start = 0xFFC02A00, .end = 0xFFC02FFF, .flags = IORESOURCE_MEM, }, { .start = IRQ_CAN_RX, .end = IRQ_CAN_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_CAN_TX, .end = IRQ_CAN_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_CAN_ERROR, .end = IRQ_CAN_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_can_device = { .name = "bfin_can", .num_resources = ARRAY_SIZE(bfin_can_resources), .resource = bfin_can_resources, .dev = { .platform_data = &bfin_can_peripherals, /* Passed to driver */ }, }; #endif /* CONFIG_CAN_BFIN */ /* * USB-LAN EzExtender board * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) #include <linux/smc91x.h> static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { { .name = "smc91x-regs", .start = 0x20310300, .end = 0x20310300 + 16, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF0, .end = IRQ_PF0, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &smc91x_info, }, }; #endif /* CONFIG_SMC91X */ #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* all SPI peripherals info goes here */ #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) /* SPI flash chip (m25p16) */ static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00040000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = 0x1c0000, .offset = 0x40000 } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p16", }; static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif /* CONFIG_MTD_M25P80 */ #endif /* CONFIG_SPI_BFIN5XX */ #if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) #include <linux/spi/ad7879.h> static const struct ad7879_platform_data bfin_ad7879_ts_info = { .model = 7879, /* Model = AD7879 */ .x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */ .pressure_max = 10000, .pressure_min = 0, .first_conversion_delay = 3, /* wait 512us before do a first conversion */ .acquisition_time = 1, /* 4us acquisition time per sample */ .median = 2, /* do 8 measurements */ .averaging = 1, /* take the average of 4 middle samples */ .pen_down_acc_interval = 255, /* 9.4 ms */ .gpio_export = 1, /* Export GPIO to gpiolib */ .gpio_base = -1, /* Dynamic allocation */ }; #endif /* CONFIG_TOUCHSCREEN_AD7879 */ #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) #include <asm/bfin-lq035q1.h> static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, .ppi_mode = USE_RGB565_16_BIT_PPI, .use_bl = 0, /* let something else control the LCD Blacklight */ .gpio_bl = GPIO_PF7, }; static struct resource bfin_lq035q1_resources[] = { { .start = IRQ_PPI_ERROR, .end = IRQ_PPI_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_lq035q1_device = { .name = "bfin-lq035q1", .id = -1, .num_resources = ARRAY_SIZE(bfin_lq035q1_resources), .resource = bfin_lq035q1_resources, .dev = { .platform_data = &bfin_lq035q1_data, }, }; #endif /* CONFIG_FB_BFIN_LQ035Q1 */ static struct spi_board_info bf538_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* SPI_SSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif /* CONFIG_MTD_M25P80 */ #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) { .modalias = "ad7879", .platform_data = &bfin_ad7879_ts_info, .irq = IRQ_PF3, .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .mode = SPI_CPHA | SPI_CPOL, }, #endif /* CONFIG_TOUCHSCREEN_AD7879_SPI */ #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) { .modalias = "bfin-lq035q1-spi", .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 2, .mode = SPI_CPHA | SPI_CPOL, }, #endif /* CONFIG_FB_BFIN_LQ035Q1 */ #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, }, #endif /* CONFIG_SPI_SPIDEV */ }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI0, .end = CH_SPI0, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI0, .end = IRQ_SPI0, .flags = IORESOURCE_IRQ, } }; /* SPI (1) */ static struct resource bfin_spi1_resource[] = { [0] = { .start = SPI1_REGBASE, .end = SPI1_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI1, .end = CH_SPI1, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI1, .end = IRQ_SPI1, .flags = IORESOURCE_IRQ, } }; /* SPI (2) */ static struct resource bfin_spi2_resource[] = { [0] = { .start = SPI2_REGBASE, .end = SPI2_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI2, .end = CH_SPI2, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI2, .end = IRQ_SPI2, .flags = IORESOURCE_IRQ, } }; /* SPI controller data */ static struct bfin5xx_spi_master bf538_spi_master_info0 = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bf538_spi_master0 = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bf538_spi_master_info0, /* Passed to driver */ }, }; static struct bfin5xx_spi_master bf538_spi_master_info1 = { .num_chipselect = 2, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0}, }; static struct platform_device bf538_spi_master1 = { .name = "bfin-spi", .id = 1, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi1_resource), .resource = bfin_spi1_resource, .dev = { .platform_data = &bf538_spi_master_info1, /* Passed to driver */ }, }; static struct bfin5xx_spi_master bf538_spi_master_info2 = { .num_chipselect = 2, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI2_SCK, P_SPI2_MISO, P_SPI2_MOSI, 0}, }; static struct platform_device bf538_spi_master2 = { .name = "bfin-spi", .id = 2, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi2_resource), .resource = bfin_spi2_resource, .dev = { .platform_data = &bf538_spi_master_info2, /* Passed to driver */ }, }; #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI0, .end = IRQ_TWI0, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi0_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, }; #if !defined(CONFIG_BF542) /* The BF542 only has 1 TWI */ static struct resource bfin_twi1_resource[] = { [0] = { .start = TWI1_REGBASE, .end = TWI1_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI1, .end = IRQ_TWI1, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi1_device = { .name = "i2c-bfin-twi", .id = 1, .num_resources = ARRAY_SIZE(bfin_twi1_resource), .resource = bfin_twi1_resource, }; #endif /* CONFIG_BF542 */ #endif /* CONFIG_I2C_BLACKFIN_TWI */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include <linux/gpio_keys.h> static struct gpio_keys_button bfin_gpio_keys_table[] = { {BTN_0, GPIO_PC7, 1, "gpio-keys: BTN0"}, }; static struct gpio_keys_platform_data bfin_gpio_keys_data = { .buttons = bfin_gpio_keys_table, .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table), }; static struct platform_device bfin_device_gpiokeys = { .name = "gpio-keys", .dev = { .platform_data = &bfin_gpio_keys_data, }, }; #endif static const unsigned int cclk_vlev_datasheet[] = { /* * Internal VLEV BF538SBBC1533 ****temporarily using these values until data sheet is updated */ VRPAIR(VLEV_100, 150000000), VRPAIR(VLEV_100, 250000000), VRPAIR(VLEV_110, 276000000), VRPAIR(VLEV_115, 301000000), VRPAIR(VLEV_120, 525000000), VRPAIR(VLEV_125, 550000000), VRPAIR(VLEV_130, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition ezkit_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x180000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data ezkit_flash_data = { .width = 2, .parts = ezkit_partitions, .nr_parts = ARRAY_SIZE(ezkit_partitions), }; static struct resource ezkit_flash_resource = { .start = 0x20000000, #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) .end = 0x202fffff, #else .end = 0x203fffff, #endif .flags = IORESOURCE_MEM, }; static struct platform_device ezkit_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &ezkit_flash_data, }, .num_resources = 1, .resource = &ezkit_flash_resource, }; #endif static struct platform_device *cm_bf538_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART2 &bfin_uart2_device, #endif #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bf538_spi_master0, &bf538_spi_master1, &bf538_spi_master2, #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) &i2c_bfin_twi0_device, &i2c_bfin_twi1_device, #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #ifdef CONFIG_BFIN_SIR2 &bfin_sir2_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT2_UART &bfin_sport2_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT3_UART &bfin_sport3_uart_device, #endif #endif #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) &bfin_can_device, #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) &bfin_lq035q1_device, #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &ezkit_flash_device, #endif }; static int __init ezkit_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); platform_add_devices(cm_bf538_devices, ARRAY_SIZE(cm_bf538_devices)); #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) spi_register_board_info(bf538_spi_board_info, ARRAY_SIZE(bf538_spi_board_info)); #endif return 0; } arch_initcall(ezkit_init); static struct platform_device *ezkit_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART2 &bfin_uart2_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT2_UART &bfin_sport2_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT3_UART &bfin_sport3_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(ezkit_early_devices, ARRAY_SIZE(ezkit_early_devices)); }
gpl-2.0
Sparhawk76/android_kernel_samsung_msm8226
arch/sparc/kernel/sys_sparc32.c
4647
16088
/* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls. * * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net) * * These routines maintain argument size conversion between 32bit and 64bit * environment. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/file.h> #include <linux/signal.h> #include <linux/resource.h> #include <linux/times.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/uio.h> #include <linux/nfs_fs.h> #include <linux/quota.h> #include <linux/poll.h> #include <linux/personality.h> #include <linux/stat.h> #include <linux/filter.h> #include <linux/highmem.h> #include <linux/highuid.h> #include <linux/mman.h> #include <linux/ipv6.h> #include <linux/in.h> #include <linux/icmpv6.h> #include <linux/syscalls.h> #include <linux/sysctl.h> #include <linux/binfmts.h> #include <linux/dnotify.h> #include <linux/security.h> #include <linux/compat.h> #include <linux/vfs.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <asm/types.h> #include <asm/uaccess.h> #include <asm/fpumacro.h> #include <asm/mmu_context.h> #include <asm/compat_signal.h> #ifdef CONFIG_SYSVIPC asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, u32 fifth) { int version; version = call >> 16; /* hack for backward compatibility */ call &= 0xffff; switch (call) { case SEMTIMEDOP: if (fifth) /* sign extend semid */ return compat_sys_semtimedop((int)first, compat_ptr(ptr), second, compat_ptr(fifth)); /* else fall through for normal semop() */ case SEMOP: /* struct sembuf is the same on 32 and 64bit :)) */ /* sign extend semid */ return sys_semtimedop((int)first, compat_ptr(ptr), second, NULL); case SEMGET: /* sign extend key, nsems */ return sys_semget((int)first, (int)second, third); case SEMCTL: /* sign extend semid, semnum */ return compat_sys_semctl((int)first, (int)second, third, compat_ptr(ptr)); case MSGSND: /* sign extend msqid */ return compat_sys_msgsnd((int)first, (int)second, third, compat_ptr(ptr)); case MSGRCV: /* sign extend msqid, msgtyp */ return compat_sys_msgrcv((int)first, second, (int)fifth, third, version, compat_ptr(ptr)); case MSGGET: /* sign extend key */ return sys_msgget((int)first, second); case MSGCTL: /* sign extend msqid */ return compat_sys_msgctl((int)first, second, compat_ptr(ptr)); case SHMAT: /* sign extend shmid */ return compat_sys_shmat((int)first, second, third, version, compat_ptr(ptr)); case SHMDT: return sys_shmdt(compat_ptr(ptr)); case SHMGET: /* sign extend key_t */ return sys_shmget((int)first, second, third); case SHMCTL: /* sign extend shmid */ return compat_sys_shmctl((int)first, second, compat_ptr(ptr)); default: return -ENOSYS; } return -ENOSYS; } #endif asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low) { if ((int)high < 0) return -EINVAL; else return sys_truncate(path, (high << 32) | low); } asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low) { if ((int)high < 0) return -EINVAL; else return sys_ftruncate(fd, (high << 32) | low); } static int cp_compat_stat64(struct kstat *stat, struct compat_stat64 __user *statbuf) { int err; err = put_user(huge_encode_dev(stat->dev), &statbuf->st_dev); err |= put_user(stat->ino, &statbuf->st_ino); err |= put_user(stat->mode, &statbuf->st_mode); err |= put_user(stat->nlink, &statbuf->st_nlink); err |= put_user(stat->uid, &statbuf->st_uid); err |= put_user(stat->gid, &statbuf->st_gid); err |= put_user(huge_encode_dev(stat->rdev), &statbuf->st_rdev); err |= put_user(0, (unsigned long __user *) &statbuf->__pad3[0]); err |= put_user(stat->size, &statbuf->st_size); err |= put_user(stat->blksize, &statbuf->st_blksize); err |= put_user(0, (unsigned int __user *) &statbuf->__pad4[0]); err |= put_user(0, (unsigned int __user *) &statbuf->__pad4[4]); err |= put_user(stat->blocks, &statbuf->st_blocks); err |= put_user(stat->atime.tv_sec, &statbuf->st_atime); err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec); err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime); err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec); err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime); err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec); err |= put_user(0, &statbuf->__unused4); err |= put_user(0, &statbuf->__unused5); return err; } asmlinkage long compat_sys_stat64(const char __user * filename, struct compat_stat64 __user *statbuf) { struct kstat stat; int error = vfs_stat(filename, &stat); if (!error) error = cp_compat_stat64(&stat, statbuf); return error; } asmlinkage long compat_sys_lstat64(const char __user * filename, struct compat_stat64 __user *statbuf) { struct kstat stat; int error = vfs_lstat(filename, &stat); if (!error) error = cp_compat_stat64(&stat, statbuf); return error; } asmlinkage long compat_sys_fstat64(unsigned int fd, struct compat_stat64 __user * statbuf) { struct kstat stat; int error = vfs_fstat(fd, &stat); if (!error) error = cp_compat_stat64(&stat, statbuf); return error; } asmlinkage long compat_sys_fstatat64(unsigned int dfd, const char __user *filename, struct compat_stat64 __user * statbuf, int flag) { struct kstat stat; int error; error = vfs_fstatat(dfd, filename, &stat, flag); if (error) return error; return cp_compat_stat64(&stat, statbuf); } asmlinkage long compat_sys_sysfs(int option, u32 arg1, u32 arg2) { return sys_sysfs(option, arg1, arg2); } asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval) { struct timespec t; int ret; mm_segment_t old_fs = get_fs (); set_fs (KERNEL_DS); ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t); set_fs (old_fs); if (put_compat_timespec(&t, interval)) return -EFAULT; return ret; } asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, compat_sigset_t __user *oset, compat_size_t sigsetsize) { sigset_t s; compat_sigset_t s32; int ret; mm_segment_t old_fs = get_fs(); if (set) { if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) return -EFAULT; switch (_NSIG_WORDS) { case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); } } set_fs (KERNEL_DS); ret = sys_rt_sigprocmask(how, set ? (sigset_t __user *) &s : NULL, oset ? (sigset_t __user *) &s : NULL, sigsetsize); set_fs (old_fs); if (ret) return ret; if (oset) { switch (_NSIG_WORDS) { case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; } if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) return -EFAULT; } return 0; } asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize) { sigset_t s; compat_sigset_t s32; int ret; mm_segment_t old_fs = get_fs(); set_fs (KERNEL_DS); ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); set_fs (old_fs); if (!ret) { switch (_NSIG_WORDS) { case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; } if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) return -EFAULT; } return ret; } asmlinkage long compat_sys_rt_sigqueueinfo(int pid, int sig, struct compat_siginfo __user *uinfo) { siginfo_t info; int ret; mm_segment_t old_fs = get_fs(); if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; set_fs (KERNEL_DS); ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); set_fs (old_fs); return ret; } asmlinkage long compat_sys_sigaction(int sig, struct old_sigaction32 __user *act, struct old_sigaction32 __user *oact) { struct k_sigaction new_ka, old_ka; int ret; WARN_ON_ONCE(sig >= 0); sig = -sig; if (act) { compat_old_sigset_t mask; u32 u_handler, u_restorer; ret = get_user(u_handler, &act->sa_handler); new_ka.sa.sa_handler = compat_ptr(u_handler); ret |= __get_user(u_restorer, &act->sa_restorer); new_ka.sa.sa_restorer = compat_ptr(u_restorer); ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); ret |= __get_user(mask, &act->sa_mask); if (ret) return ret; new_ka.ka_restorer = NULL; siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer); ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); ret |= __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); } return ret; } asmlinkage long compat_sys_rt_sigaction(int sig, struct sigaction32 __user *act, struct sigaction32 __user *oact, void __user *restorer, compat_size_t sigsetsize) { struct k_sigaction new_ka, old_ka; int ret; compat_sigset_t set32; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(compat_sigset_t)) return -EINVAL; if (act) { u32 u_handler, u_restorer; new_ka.ka_restorer = restorer; ret = get_user(u_handler, &act->sa_handler); new_ka.sa.sa_handler = compat_ptr(u_handler); ret |= __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t)); switch (_NSIG_WORDS) { case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | (((long)set32.sig[7]) << 32); case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | (((long)set32.sig[5]) << 32); case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | (((long)set32.sig[3]) << 32); case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | (((long)set32.sig[1]) << 32); } ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); ret |= __get_user(u_restorer, &act->sa_restorer); new_ka.sa.sa_restorer = compat_ptr(u_restorer); if (ret) return -EFAULT; } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { switch (_NSIG_WORDS) { case 4: set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); set32.sig[6] = old_ka.sa.sa_mask.sig[3]; case 3: set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); set32.sig[4] = old_ka.sa.sa_mask.sig[2]; case 2: set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); set32.sig[2] = old_ka.sa.sa_mask.sig[1]; case 1: set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); set32.sig[0] = old_ka.sa.sa_mask.sig[0]; } ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); ret |= __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t)); ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer); if (ret) ret = -EFAULT; } return ret; } /* * sparc32_execve() executes a new program after the asm stub has set * things up for us. This should basically do what I want it to. */ asmlinkage long sparc32_execve(struct pt_regs *regs) { int error, base = 0; char *filename; /* User register window flush is done by entry.S */ /* Check for indirect call. */ if ((u32)regs->u_regs[UREG_G1] == 0) base = 1; filename = getname(compat_ptr(regs->u_regs[base + UREG_I0])); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = compat_do_execve(filename, compat_ptr(regs->u_regs[base + UREG_I1]), compat_ptr(regs->u_regs[base + UREG_I2]), regs); putname(filename); if (!error) { fprs_write(0); current_thread_info()->xfsr[0] = 0; current_thread_info()->fpsaved[0] = 0; regs->tstate &= ~TSTATE_PEF; } out: return error; } #ifdef CONFIG_MODULES asmlinkage long sys32_init_module(void __user *umod, u32 len, const char __user *uargs) { return sys_init_module(umod, len, uargs); } asmlinkage long sys32_delete_module(const char __user *name_user, unsigned int flags) { return sys_delete_module(name_user, flags); } #else /* CONFIG_MODULES */ asmlinkage long sys32_init_module(const char __user *name_user, struct module __user *mod_user) { return -ENOSYS; } asmlinkage long sys32_delete_module(const char __user *name_user) { return -ENOSYS; } #endif /* CONFIG_MODULES */ asmlinkage compat_ssize_t sys32_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, unsigned long poshi, unsigned long poslo) { return sys_pread64(fd, ubuf, count, (poshi << 32) | poslo); } asmlinkage compat_ssize_t sys32_pwrite64(unsigned int fd, char __user *ubuf, compat_size_t count, unsigned long poshi, unsigned long poslo) { return sys_pwrite64(fd, ubuf, count, (poshi << 32) | poslo); } asmlinkage long compat_sys_readahead(int fd, unsigned long offhi, unsigned long offlo, compat_size_t count) { return sys_readahead(fd, (offhi << 32) | offlo, count); } long compat_sys_fadvise64(int fd, unsigned long offhi, unsigned long offlo, compat_size_t len, int advice) { return sys_fadvise64_64(fd, (offhi << 32) | offlo, len, advice); } long compat_sys_fadvise64_64(int fd, unsigned long offhi, unsigned long offlo, unsigned long lenhi, unsigned long lenlo, int advice) { return sys_fadvise64_64(fd, (offhi << 32) | offlo, (lenhi << 32) | lenlo, advice); } asmlinkage long compat_sys_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, compat_size_t count) { mm_segment_t old_fs = get_fs(); int ret; off_t of; if (offset && get_user(of, offset)) return -EFAULT; set_fs(KERNEL_DS); ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count); set_fs(old_fs); if (offset && put_user(of, offset)) return -EFAULT; return ret; } asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, compat_size_t count) { mm_segment_t old_fs = get_fs(); int ret; loff_t lof; if (offset && get_user(lof, offset)) return -EFAULT; set_fs(KERNEL_DS); ret = sys_sendfile64(out_fd, in_fd, offset ? (loff_t __user *) &lof : NULL, count); set_fs(old_fs); if (offset && put_user(lof, offset)) return -EFAULT; return ret; } /* This is just a version for 32-bit applications which does * not force O_LARGEFILE on. */ asmlinkage long sparc32_open(const char __user *filename, int flags, int mode) { return do_sys_open(AT_FDCWD, filename, flags, mode); } long sys32_lookup_dcookie(unsigned long cookie_high, unsigned long cookie_low, char __user *buf, size_t len) { return sys_lookup_dcookie((cookie_high << 32) | cookie_low, buf, len); } long compat_sync_file_range(int fd, unsigned long off_high, unsigned long off_low, unsigned long nb_high, unsigned long nb_low, int flags) { return sys_sync_file_range(fd, (off_high << 32) | off_low, (nb_high << 32) | nb_low, flags); } asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo, u32 lenhi, u32 lenlo) { return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo, ((loff_t)lenhi << 32) | lenlo); }
gpl-2.0