repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
ivanich/android_kernel_oneplus_msm8996 | arch/arm/mach-omap2/clkt_iclk.c | 1018 | 1462 | /*
* OMAP2/3 interface clock control
*
* Copyright (C) 2011 Nokia Corporation
* Paul Walmsley
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include "clock.h"
/* Register offsets */
#define CM_AUTOIDLE 0x30
#define CM_ICLKEN 0x10
/* Private functions */
/* XXX */
void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk)
{
u32 v;
void __iomem *r;
r = (__force void __iomem *)
((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN));
v = omap2_clk_readl(clk, r);
v |= (1 << clk->enable_bit);
omap2_clk_writel(v, clk, r);
}
/* XXX */
void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk)
{
u32 v;
void __iomem *r;
r = (__force void __iomem *)
((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN));
v = omap2_clk_readl(clk, r);
v &= ~(1 << clk->enable_bit);
omap2_clk_writel(v, clk, r);
}
/* Public data */
const struct clk_hw_omap_ops clkhwops_iclk = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
};
const struct clk_hw_omap_ops clkhwops_iclk_wait = {
.allow_idle = omap2_clkt_iclk_allow_idle,
.deny_idle = omap2_clkt_iclk_deny_idle,
.find_idlest = omap2_clk_dflt_find_idlest,
.find_companion = omap2_clk_dflt_find_companion,
};
| gpl-2.0 |
prarit/staging-next-unisys | drivers/watchdog/lantiq_wdt.c | 1274 | 5878 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Copyright (C) 2010 John Crispin <blogic@openwrt.org>
* Based on EP93xx wdt driver
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/of_platform.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <lantiq_soc.h>
/*
* Section 3.4 of the datasheet
* The password sequence protects the WDT control register from unintended
* write actions, which might cause malfunction of the WDT.
*
* essentially the following two magic passwords need to be written to allow
* IO access to the WDT core
*/
#define LTQ_WDT_PW1 0x00BE0000
#define LTQ_WDT_PW2 0x00DC0000
#define LTQ_WDT_CR 0x0 /* watchdog control register */
#define LTQ_WDT_SR 0x8 /* watchdog status register */
#define LTQ_WDT_SR_EN (0x1 << 31) /* enable bit */
#define LTQ_WDT_SR_PWD (0x3 << 26) /* turn on power */
#define LTQ_WDT_SR_CLKDIV (0x3 << 24) /* turn on clock and set */
/* divider to 0x40000 */
#define LTQ_WDT_DIVIDER 0x40000
#define LTQ_MAX_TIMEOUT ((1 << 16) - 1) /* the reload field is 16 bit */
static bool nowayout = WATCHDOG_NOWAYOUT;
static void __iomem *ltq_wdt_membase;
static unsigned long ltq_io_region_clk_rate;
static unsigned long ltq_wdt_bootstatus;
static unsigned long ltq_wdt_in_use;
static int ltq_wdt_timeout = 30;
static int ltq_wdt_ok_to_close;
static void
ltq_wdt_enable(void)
{
unsigned long int timeout = ltq_wdt_timeout *
(ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000;
if (timeout > LTQ_MAX_TIMEOUT)
timeout = LTQ_MAX_TIMEOUT;
/* write the first password magic */
ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
/* write the second magic plus the configuration and new timeout */
ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV |
LTQ_WDT_PW2 | timeout, ltq_wdt_membase + LTQ_WDT_CR);
}
static void
ltq_wdt_disable(void)
{
/* write the first password magic */
ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
/*
* write the second password magic with no config
* this turns the watchdog off
*/
ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR);
}
static ssize_t
ltq_wdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
if (len) {
if (!nowayout) {
size_t i;
ltq_wdt_ok_to_close = 0;
for (i = 0; i != len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V')
ltq_wdt_ok_to_close = 1;
else
ltq_wdt_ok_to_close = 0;
}
}
ltq_wdt_enable();
}
return len;
}
static struct watchdog_info ident = {
.options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
WDIOF_CARDRESET,
.identity = "ltq_wdt",
};
static long
ltq_wdt_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int ret = -ENOTTY;
switch (cmd) {
case WDIOC_GETSUPPORT:
ret = copy_to_user((struct watchdog_info __user *)arg, &ident,
sizeof(ident)) ? -EFAULT : 0;
break;
case WDIOC_GETBOOTSTATUS:
ret = put_user(ltq_wdt_bootstatus, (int __user *)arg);
break;
case WDIOC_GETSTATUS:
ret = put_user(0, (int __user *)arg);
break;
case WDIOC_SETTIMEOUT:
ret = get_user(ltq_wdt_timeout, (int __user *)arg);
if (!ret)
ltq_wdt_enable();
/* intentional drop through */
case WDIOC_GETTIMEOUT:
ret = put_user(ltq_wdt_timeout, (int __user *)arg);
break;
case WDIOC_KEEPALIVE:
ltq_wdt_enable();
ret = 0;
break;
}
return ret;
}
static int
ltq_wdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, <q_wdt_in_use))
return -EBUSY;
ltq_wdt_in_use = 1;
ltq_wdt_enable();
return nonseekable_open(inode, file);
}
static int
ltq_wdt_release(struct inode *inode, struct file *file)
{
if (ltq_wdt_ok_to_close)
ltq_wdt_disable();
else
pr_err("watchdog closed without warning\n");
ltq_wdt_ok_to_close = 0;
clear_bit(0, <q_wdt_in_use);
return 0;
}
static const struct file_operations ltq_wdt_fops = {
.owner = THIS_MODULE,
.write = ltq_wdt_write,
.unlocked_ioctl = ltq_wdt_ioctl,
.open = ltq_wdt_open,
.release = ltq_wdt_release,
.llseek = no_llseek,
};
static struct miscdevice ltq_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = <q_wdt_fops,
};
static int
ltq_wdt_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct clk *clk;
ltq_wdt_membase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ltq_wdt_membase))
return PTR_ERR(ltq_wdt_membase);
/* we do not need to enable the clock as it is always running */
clk = clk_get_io();
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Failed to get clock\n");
return -ENOENT;
}
ltq_io_region_clk_rate = clk_get_rate(clk);
clk_put(clk);
/* find out if the watchdog caused the last reboot */
if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST)
ltq_wdt_bootstatus = WDIOF_CARDRESET;
dev_info(&pdev->dev, "Init done\n");
return misc_register(<q_wdt_miscdev);
}
static int
ltq_wdt_remove(struct platform_device *pdev)
{
misc_deregister(<q_wdt_miscdev);
return 0;
}
static const struct of_device_id ltq_wdt_match[] = {
{ .compatible = "lantiq,wdt" },
{},
};
MODULE_DEVICE_TABLE(of, ltq_wdt_match);
static struct platform_driver ltq_wdt_driver = {
.probe = ltq_wdt_probe,
.remove = ltq_wdt_remove,
.driver = {
.name = "wdt",
.of_match_table = ltq_wdt_match,
},
};
module_platform_driver(ltq_wdt_driver);
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Lantiq SoC Watchdog");
MODULE_LICENSE("GPL");
| gpl-2.0 |
scotthartbti/android_kernel_samsung_trlte | arch/arm/mach-integrator/integrator_cp.c | 1786 | 13661 | /*
* linux/arch/arm/mach-integrator/integrator_cp.c
*
* Copyright (C) 2003 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/amba/kmi.h>
#include <linux/amba/clcd.h>
#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <linux/irqchip/versatile-fpga.h>
#include <linux/gfp.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_data/clk-integrator.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/sys_soc.h>
#include <mach/hardware.h>
#include <mach/platform.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/icst.h>
#include <mach/cm.h>
#include <mach/lm.h>
#include <mach/irqs.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <asm/hardware/timer-sp.h>
#include <plat/clcd.h>
#include <plat/sched_clock.h>
#include "common.h"
/* Base address to the CP controller */
static void __iomem *intcp_con_base;
#define INTCP_PA_FLASH_BASE 0x24000000
#define INTCP_PA_CLCD_BASE 0xc0000000
#define INTCP_FLASHPROG 0x04
#define CINTEGRATOR_FLASHPROG_FLVPPEN (1 << 0)
#define CINTEGRATOR_FLASHPROG_FLWREN (1 << 1)
/*
* Logical Physical
* f1000000 10000000 Core module registers
* f1100000 11000000 System controller registers
* f1200000 12000000 EBI registers
* f1300000 13000000 Counter/Timer
* f1400000 14000000 Interrupt controller
* f1600000 16000000 UART 0
* f1700000 17000000 UART 1
* f1a00000 1a000000 Debug LEDs
* fc900000 c9000000 GPIO
* fca00000 ca000000 SIC
* fcb00000 cb000000 CP system control
*/
static struct map_desc intcp_io_desc[] __initdata __maybe_unused = {
{
.virtual = IO_ADDRESS(INTEGRATOR_HDR_BASE),
.pfn = __phys_to_pfn(INTEGRATOR_HDR_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
.virtual = IO_ADDRESS(INTEGRATOR_EBI_BASE),
.pfn = __phys_to_pfn(INTEGRATOR_EBI_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
.virtual = IO_ADDRESS(INTEGRATOR_CT_BASE),
.pfn = __phys_to_pfn(INTEGRATOR_CT_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
.virtual = IO_ADDRESS(INTEGRATOR_IC_BASE),
.pfn = __phys_to_pfn(INTEGRATOR_IC_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
.virtual = IO_ADDRESS(INTEGRATOR_UART0_BASE),
.pfn = __phys_to_pfn(INTEGRATOR_UART0_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
.virtual = IO_ADDRESS(INTEGRATOR_DBG_BASE),
.pfn = __phys_to_pfn(INTEGRATOR_DBG_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
.virtual = IO_ADDRESS(INTEGRATOR_CP_GPIO_BASE),
.pfn = __phys_to_pfn(INTEGRATOR_CP_GPIO_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}, {
.virtual = IO_ADDRESS(INTEGRATOR_CP_SIC_BASE),
.pfn = __phys_to_pfn(INTEGRATOR_CP_SIC_BASE),
.length = SZ_4K,
.type = MT_DEVICE
}
};
static void __init intcp_map_io(void)
{
iotable_init(intcp_io_desc, ARRAY_SIZE(intcp_io_desc));
}
/*
* Flash handling.
*/
static int intcp_flash_init(struct platform_device *dev)
{
u32 val;
val = readl(intcp_con_base + INTCP_FLASHPROG);
val |= CINTEGRATOR_FLASHPROG_FLWREN;
writel(val, intcp_con_base + INTCP_FLASHPROG);
return 0;
}
static void intcp_flash_exit(struct platform_device *dev)
{
u32 val;
val = readl(intcp_con_base + INTCP_FLASHPROG);
val &= ~(CINTEGRATOR_FLASHPROG_FLVPPEN|CINTEGRATOR_FLASHPROG_FLWREN);
writel(val, intcp_con_base + INTCP_FLASHPROG);
}
static void intcp_flash_set_vpp(struct platform_device *pdev, int on)
{
u32 val;
val = readl(intcp_con_base + INTCP_FLASHPROG);
if (on)
val |= CINTEGRATOR_FLASHPROG_FLVPPEN;
else
val &= ~CINTEGRATOR_FLASHPROG_FLVPPEN;
writel(val, intcp_con_base + INTCP_FLASHPROG);
}
static struct physmap_flash_data intcp_flash_data = {
.width = 4,
.init = intcp_flash_init,
.exit = intcp_flash_exit,
.set_vpp = intcp_flash_set_vpp,
};
/*
* It seems that the card insertion interrupt remains active after
* we've acknowledged it. We therefore ignore the interrupt, and
* rely on reading it from the SIC. This also means that we must
* clear the latched interrupt.
*/
static unsigned int mmc_status(struct device *dev)
{
unsigned int status = readl(__io_address(0xca000000 + 4));
writel(8, intcp_con_base + 8);
return status & 8;
}
static struct mmci_platform_data mmc_data = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.status = mmc_status,
.gpio_wp = -1,
.gpio_cd = -1,
};
/*
* CLCD support
*/
/*
* Ensure VGA is selected.
*/
static void cp_clcd_enable(struct clcd_fb *fb)
{
struct fb_var_screeninfo *var = &fb->fb.var;
u32 val = CM_CTRL_STATIC1 | CM_CTRL_STATIC2
| CM_CTRL_LCDEN0 | CM_CTRL_LCDEN1;
if (var->bits_per_pixel <= 8 ||
(var->bits_per_pixel == 16 && var->green.length == 5))
/* Pseudocolor, RGB555, BGR555 */
val |= CM_CTRL_LCDMUXSEL_VGA555_TFT555;
else if (fb->fb.var.bits_per_pixel <= 16)
/* truecolor RGB565 */
val |= CM_CTRL_LCDMUXSEL_VGA565_TFT555;
else
val = 0; /* no idea for this, don't trust the docs */
cm_control(CM_CTRL_LCDMUXSEL_MASK|
CM_CTRL_LCDEN0|
CM_CTRL_LCDEN1|
CM_CTRL_STATIC1|
CM_CTRL_STATIC2|
CM_CTRL_STATIC|
CM_CTRL_n24BITEN, val);
}
static int cp_clcd_setup(struct clcd_fb *fb)
{
fb->panel = versatile_clcd_get_panel("VGA");
if (!fb->panel)
return -EINVAL;
return versatile_clcd_setup_dma(fb, SZ_1M);
}
static struct clcd_board clcd_data = {
.name = "Integrator/CP",
.caps = CLCD_CAP_5551 | CLCD_CAP_RGB565 | CLCD_CAP_888,
.check = clcdfb_check,
.decode = clcdfb_decode,
.enable = cp_clcd_enable,
.setup = cp_clcd_setup,
.mmap = versatile_clcd_mmap_dma,
.remove = versatile_clcd_remove_dma,
};
#define REFCOUNTER (__io_address(INTEGRATOR_HDR_BASE) + 0x28)
static void __init intcp_init_early(void)
{
#ifdef CONFIG_PLAT_VERSATILE_SCHED_CLOCK
versatile_sched_clock_init(REFCOUNTER, 24000000);
#endif
}
#ifdef CONFIG_OF
static const struct of_device_id fpga_irq_of_match[] __initconst = {
{ .compatible = "arm,versatile-fpga-irq", .data = fpga_irq_of_init, },
{ /* Sentinel */ }
};
static void __init intcp_init_irq_of(void)
{
of_irq_init(fpga_irq_of_match);
integrator_clk_init(true);
}
/*
* For the Device Tree, add in the UART, MMC and CLCD specifics as AUXDATA
* and enforce the bus names since these are used for clock lookups.
*/
static struct of_dev_auxdata intcp_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_RTC_BASE,
"rtc", NULL),
OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_UART0_BASE,
"uart0", NULL),
OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_UART1_BASE,
"uart1", NULL),
OF_DEV_AUXDATA("arm,primecell", KMI0_BASE,
"kmi0", NULL),
OF_DEV_AUXDATA("arm,primecell", KMI1_BASE,
"kmi1", NULL),
OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_CP_MMC_BASE,
"mmci", &mmc_data),
OF_DEV_AUXDATA("arm,primecell", INTEGRATOR_CP_AACI_BASE,
"aaci", &mmc_data),
OF_DEV_AUXDATA("arm,primecell", INTCP_PA_CLCD_BASE,
"clcd", &clcd_data),
OF_DEV_AUXDATA("cfi-flash", INTCP_PA_FLASH_BASE,
"physmap-flash", &intcp_flash_data),
{ /* sentinel */ },
};
static void __init intcp_init_of(void)
{
struct device_node *root;
struct device_node *cpcon;
struct device *parent;
struct soc_device *soc_dev;
struct soc_device_attribute *soc_dev_attr;
u32 intcp_sc_id;
int err;
/* Here we create an SoC device for the root node */
root = of_find_node_by_path("/");
if (!root)
return;
cpcon = of_find_node_by_path("/cpcon");
if (!cpcon)
return;
intcp_con_base = of_iomap(cpcon, 0);
if (!intcp_con_base)
return;
intcp_sc_id = readl(intcp_con_base);
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return;
err = of_property_read_string(root, "compatible",
&soc_dev_attr->soc_id);
if (err)
return;
err = of_property_read_string(root, "model", &soc_dev_attr->machine);
if (err)
return;
soc_dev_attr->family = "Integrator";
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c",
'A' + (intcp_sc_id & 0x0f));
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr->revision);
kfree(soc_dev_attr);
return;
}
parent = soc_device_to_device(soc_dev);
integrator_init_sysfs(parent, intcp_sc_id);
of_platform_populate(root, of_default_bus_match_table,
intcp_auxdata_lookup, parent);
}
static const char * intcp_dt_board_compat[] = {
"arm,integrator-cp",
NULL,
};
DT_MACHINE_START(INTEGRATOR_CP_DT, "ARM Integrator/CP (Device Tree)")
.reserve = integrator_reserve,
.map_io = intcp_map_io,
.init_early = intcp_init_early,
.init_irq = intcp_init_irq_of,
.handle_irq = fpga_handle_irq,
.init_machine = intcp_init_of,
.restart = integrator_restart,
.dt_compat = intcp_dt_board_compat,
MACHINE_END
#endif
#ifdef CONFIG_ATAGS
/*
* For the ATAG boot some static mappings are needed. This will
* go away with the ATAG support down the road.
*/
static struct map_desc intcp_io_desc_atag[] __initdata = {
{
.virtual = IO_ADDRESS(INTEGRATOR_CP_CTL_BASE),
.pfn = __phys_to_pfn(INTEGRATOR_CP_CTL_BASE),
.length = SZ_4K,
.type = MT_DEVICE
},
};
static void __init intcp_map_io_atag(void)
{
iotable_init(intcp_io_desc_atag, ARRAY_SIZE(intcp_io_desc_atag));
intcp_con_base = __io_address(INTEGRATOR_CP_CTL_BASE);
intcp_map_io();
}
/*
* This is where non-devicetree initialization code is collected and stashed
* for eventual deletion.
*/
#define INTCP_FLASH_SIZE SZ_32M
static struct resource intcp_flash_resource = {
.start = INTCP_PA_FLASH_BASE,
.end = INTCP_PA_FLASH_BASE + INTCP_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device intcp_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &intcp_flash_data,
},
.num_resources = 1,
.resource = &intcp_flash_resource,
};
#define INTCP_ETH_SIZE 0x10
static struct resource smc91x_resources[] = {
[0] = {
.start = INTEGRATOR_CP_ETH_BASE,
.end = INTEGRATOR_CP_ETH_BASE + INTCP_ETH_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_CP_ETHINT,
.end = IRQ_CP_ETHINT,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
};
static struct platform_device *intcp_devs[] __initdata = {
&intcp_flash_device,
&smc91x_device,
};
#define INTCP_VA_CIC_BASE __io_address(INTEGRATOR_HDR_BASE + 0x40)
#define INTCP_VA_PIC_BASE __io_address(INTEGRATOR_IC_BASE)
#define INTCP_VA_SIC_BASE __io_address(INTEGRATOR_CP_SIC_BASE)
static void __init intcp_init_irq(void)
{
u32 pic_mask, cic_mask, sic_mask;
/* These masks are for the HW IRQ registers */
pic_mask = ~((~0u) << (11 - 0));
pic_mask |= (~((~0u) << (29 - 22))) << 22;
cic_mask = ~((~0u) << (1 + IRQ_CIC_END - IRQ_CIC_START));
sic_mask = ~((~0u) << (1 + IRQ_SIC_END - IRQ_SIC_START));
/*
* Disable all interrupt sources
*/
writel(0xffffffff, INTCP_VA_PIC_BASE + IRQ_ENABLE_CLEAR);
writel(0xffffffff, INTCP_VA_PIC_BASE + FIQ_ENABLE_CLEAR);
writel(0xffffffff, INTCP_VA_CIC_BASE + IRQ_ENABLE_CLEAR);
writel(0xffffffff, INTCP_VA_CIC_BASE + FIQ_ENABLE_CLEAR);
writel(sic_mask, INTCP_VA_SIC_BASE + IRQ_ENABLE_CLEAR);
writel(sic_mask, INTCP_VA_SIC_BASE + FIQ_ENABLE_CLEAR);
fpga_irq_init(INTCP_VA_PIC_BASE, "PIC", IRQ_PIC_START,
-1, pic_mask, NULL);
fpga_irq_init(INTCP_VA_CIC_BASE, "CIC", IRQ_CIC_START,
-1, cic_mask, NULL);
fpga_irq_init(INTCP_VA_SIC_BASE, "SIC", IRQ_SIC_START,
IRQ_CP_CPPLDINT, sic_mask, NULL);
integrator_clk_init(true);
}
#define TIMER0_VA_BASE __io_address(INTEGRATOR_TIMER0_BASE)
#define TIMER1_VA_BASE __io_address(INTEGRATOR_TIMER1_BASE)
#define TIMER2_VA_BASE __io_address(INTEGRATOR_TIMER2_BASE)
static void __init cp_timer_init(void)
{
writel(0, TIMER0_VA_BASE + TIMER_CTRL);
writel(0, TIMER1_VA_BASE + TIMER_CTRL);
writel(0, TIMER2_VA_BASE + TIMER_CTRL);
sp804_clocksource_init(TIMER2_VA_BASE, "timer2");
sp804_clockevents_init(TIMER1_VA_BASE, IRQ_TIMERINT1, "timer1");
}
#define INTEGRATOR_CP_MMC_IRQS { IRQ_CP_MMCIINT0, IRQ_CP_MMCIINT1 }
#define INTEGRATOR_CP_AACI_IRQS { IRQ_CP_AACIINT }
static AMBA_APB_DEVICE(mmc, "mmci", 0, INTEGRATOR_CP_MMC_BASE,
INTEGRATOR_CP_MMC_IRQS, &mmc_data);
static AMBA_APB_DEVICE(aaci, "aaci", 0, INTEGRATOR_CP_AACI_BASE,
INTEGRATOR_CP_AACI_IRQS, NULL);
static AMBA_AHB_DEVICE(clcd, "clcd", 0, INTCP_PA_CLCD_BASE,
{ IRQ_CP_CLCDCINT }, &clcd_data);
static struct amba_device *amba_devs[] __initdata = {
&mmc_device,
&aaci_device,
&clcd_device,
};
static void __init intcp_init(void)
{
int i;
platform_add_devices(intcp_devs, ARRAY_SIZE(intcp_devs));
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
amba_device_register(d, &iomem_resource);
}
integrator_init(true);
}
MACHINE_START(CINTEGRATOR, "ARM-IntegratorCP")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.atag_offset = 0x100,
.reserve = integrator_reserve,
.map_io = intcp_map_io_atag,
.init_early = intcp_init_early,
.init_irq = intcp_init_irq,
.handle_irq = fpga_handle_irq,
.init_time = cp_timer_init,
.init_machine = intcp_init,
.restart = integrator_restart,
MACHINE_END
#endif
| gpl-2.0 |
Gandi/ktrill | drivers/regulator/pcf50633-regulator.c | 2042 | 3972 | /* NXP PCF50633 PMIC Driver
*
* (C) 2006-2008 by Openmoko, Inc.
* Author: Balaji Rao <balajirrao@openmoko.org>
* All rights reserved.
*
* Broken down from monstrous PCF50633 driver mainly by
* Harald Welte and Andy Green and Werner Almesberger
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/mfd/pcf50633/core.h>
#include <linux/mfd/pcf50633/pmic.h>
#define PCF50633_REGULATOR(_name, _id, _min_uV, _uV_step, _min_sel, _n) \
{ \
.name = _name, \
.id = PCF50633_REGULATOR_##_id, \
.ops = &pcf50633_regulator_ops, \
.n_voltages = _n, \
.min_uV = _min_uV, \
.uV_step = _uV_step, \
.linear_min_sel = _min_sel, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.vsel_reg = PCF50633_REG_##_id##OUT, \
.vsel_mask = 0xff, \
.enable_reg = PCF50633_REG_##_id##OUT + 1, \
.enable_mask = PCF50633_REGULATOR_ON, \
}
static struct regulator_ops pcf50633_regulator_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
static const struct regulator_desc regulators[] = {
[PCF50633_REGULATOR_AUTO] =
PCF50633_REGULATOR("auto", AUTO, 1800000, 25000, 0x2f, 128),
[PCF50633_REGULATOR_DOWN1] =
PCF50633_REGULATOR("down1", DOWN1, 625000, 25000, 0, 96),
[PCF50633_REGULATOR_DOWN2] =
PCF50633_REGULATOR("down2", DOWN2, 625000, 25000, 0, 96),
[PCF50633_REGULATOR_LDO1] =
PCF50633_REGULATOR("ldo1", LDO1, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_LDO2] =
PCF50633_REGULATOR("ldo2", LDO2, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_LDO3] =
PCF50633_REGULATOR("ldo3", LDO3, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_LDO4] =
PCF50633_REGULATOR("ldo4", LDO4, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_LDO5] =
PCF50633_REGULATOR("ldo5", LDO5, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_LDO6] =
PCF50633_REGULATOR("ldo6", LDO6, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_HCLDO] =
PCF50633_REGULATOR("hcldo", HCLDO, 900000, 100000, 0, 28),
[PCF50633_REGULATOR_MEMLDO] =
PCF50633_REGULATOR("memldo", MEMLDO, 900000, 100000, 0, 28),
};
static int pcf50633_regulator_probe(struct platform_device *pdev)
{
struct regulator_dev *rdev;
struct pcf50633 *pcf;
struct regulator_config config = { };
/* Already set by core driver */
pcf = dev_to_pcf50633(pdev->dev.parent);
config.dev = &pdev->dev;
config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = pcf;
config.regmap = pcf->regmap;
rdev = devm_regulator_register(&pdev->dev, ®ulators[pdev->id],
&config);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
platform_set_drvdata(pdev, rdev);
if (pcf->pdata->regulator_registered)
pcf->pdata->regulator_registered(pcf, pdev->id);
return 0;
}
static struct platform_driver pcf50633_regulator_driver = {
.driver = {
.name = "pcf50633-regulator",
},
.probe = pcf50633_regulator_probe,
};
static int __init pcf50633_regulator_init(void)
{
return platform_driver_register(&pcf50633_regulator_driver);
}
subsys_initcall(pcf50633_regulator_init);
static void __exit pcf50633_regulator_exit(void)
{
platform_driver_unregister(&pcf50633_regulator_driver);
}
module_exit(pcf50633_regulator_exit);
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
MODULE_DESCRIPTION("PCF50633 regulator driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pcf50633-regulator");
| gpl-2.0 |
desaishivam26/android_kernel_motorola_msm8916 | kernel/debug/kdb/kdb_bt.c | 2810 | 5297 | /*
* Kernel Debugger Architecture Independent Stack Traceback
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
*/
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/kdb.h>
#include <linux/nmi.h>
#include "kdb_private.h"
static void kdb_show_stack(struct task_struct *p, void *addr)
{
int old_lvl = console_loglevel;
console_loglevel = 15;
kdb_trap_printk++;
kdb_set_current_task(p);
if (addr) {
show_stack((struct task_struct *)p, addr);
} else if (kdb_current_regs) {
#ifdef CONFIG_X86
show_stack(p, &kdb_current_regs->sp);
#else
show_stack(p, NULL);
#endif
} else {
show_stack(p, NULL);
}
console_loglevel = old_lvl;
kdb_trap_printk--;
}
/*
* kdb_bt
*
* This function implements the 'bt' command. Print a stack
* traceback.
*
* bt [<address-expression>] (addr-exp is for alternate stacks)
* btp <pid> Kernel stack for <pid>
* btt <address-expression> Kernel stack for task structure at
* <address-expression>
* bta [DRSTCZEUIMA] All useful processes, optionally
* filtered by state
* btc [<cpu>] The current process on one cpu,
* default is all cpus
*
* bt <address-expression> refers to a address on the stack, that location
* is assumed to contain a return address.
*
* btt <address-expression> refers to the address of a struct task.
*
* Inputs:
* argc argument count
* argv argument vector
* Outputs:
* None.
* Returns:
* zero for success, a kdb diagnostic if error
* Locking:
* none.
* Remarks:
* Backtrack works best when the code uses frame pointers. But even
* without frame pointers we should get a reasonable trace.
*
* mds comes in handy when examining the stack to do a manual traceback or
* to get a starting point for bt <address-expression>.
*/
static int
kdb_bt1(struct task_struct *p, unsigned long mask,
int argcount, int btaprompt)
{
char buffer[2];
if (kdb_getarea(buffer[0], (unsigned long)p) ||
kdb_getarea(buffer[0], (unsigned long)(p+1)-1))
return KDB_BADADDR;
if (!kdb_task_state(p, mask))
return 0;
kdb_printf("Stack traceback for pid %d\n", p->pid);
kdb_ps1(p);
kdb_show_stack(p, NULL);
if (btaprompt) {
kdb_getstr(buffer, sizeof(buffer),
"Enter <q> to end, <cr> to continue:");
if (buffer[0] == 'q') {
kdb_printf("\n");
return 1;
}
}
touch_nmi_watchdog();
return 0;
}
int
kdb_bt(int argc, const char **argv)
{
int diag;
int argcount = 5;
int btaprompt = 1;
int nextarg;
unsigned long addr;
long offset;
/* Prompt after each proc in bta */
kdbgetintenv("BTAPROMPT", &btaprompt);
if (strcmp(argv[0], "bta") == 0) {
struct task_struct *g, *p;
unsigned long cpu;
unsigned long mask = kdb_task_state_string(argc ? argv[1] :
NULL);
if (argc == 0)
kdb_ps_suppressed();
/* Run the active tasks first */
for_each_online_cpu(cpu) {
p = kdb_curr_task(cpu);
if (kdb_bt1(p, mask, argcount, btaprompt))
return 0;
}
/* Now the inactive tasks */
kdb_do_each_thread(g, p) {
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
if (task_curr(p))
continue;
if (kdb_bt1(p, mask, argcount, btaprompt))
return 0;
} kdb_while_each_thread(g, p);
} else if (strcmp(argv[0], "btp") == 0) {
struct task_struct *p;
unsigned long pid;
if (argc != 1)
return KDB_ARGCOUNT;
diag = kdbgetularg((char *)argv[1], &pid);
if (diag)
return diag;
p = find_task_by_pid_ns(pid, &init_pid_ns);
if (p) {
kdb_set_current_task(p);
return kdb_bt1(p, ~0UL, argcount, 0);
}
kdb_printf("No process with pid == %ld found\n", pid);
return 0;
} else if (strcmp(argv[0], "btt") == 0) {
if (argc != 1)
return KDB_ARGCOUNT;
diag = kdbgetularg((char *)argv[1], &addr);
if (diag)
return diag;
kdb_set_current_task((struct task_struct *)addr);
return kdb_bt1((struct task_struct *)addr, ~0UL, argcount, 0);
} else if (strcmp(argv[0], "btc") == 0) {
unsigned long cpu = ~0;
struct task_struct *save_current_task = kdb_current_task;
char buf[80];
if (argc > 1)
return KDB_ARGCOUNT;
if (argc == 1) {
diag = kdbgetularg((char *)argv[1], &cpu);
if (diag)
return diag;
}
/* Recursive use of kdb_parse, do not use argv after
* this point */
argv = NULL;
if (cpu != ~0) {
if (cpu >= num_possible_cpus() || !cpu_online(cpu)) {
kdb_printf("no process for cpu %ld\n", cpu);
return 0;
}
sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
kdb_parse(buf);
return 0;
}
kdb_printf("btc: cpu status: ");
kdb_parse("cpu\n");
for_each_online_cpu(cpu) {
sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
kdb_parse(buf);
touch_nmi_watchdog();
}
kdb_set_current_task(save_current_task);
return 0;
} else {
if (argc) {
nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg, &addr,
&offset, NULL);
if (diag)
return diag;
kdb_show_stack(kdb_current_task, (void *)addr);
return 0;
} else {
return kdb_bt1(kdb_current_task, ~0UL, argcount, 0);
}
}
/* NOTREACHED */
return 0;
}
| gpl-2.0 |
shaumux/semc-kernel-qsd8k-jb | crypto/algapi.c | 3066 | 19443 | /*
* Cryptographic API for algorithms (i.e., low-level API).
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "internal.h"
static void crypto_remove_final(struct list_head *list);
static LIST_HEAD(crypto_template_list);
void crypto_larval_error(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
alg = crypto_alg_lookup(name, type, mask);
if (alg) {
if (crypto_is_larval(alg)) {
struct crypto_larval *larval = (void *)alg;
complete_all(&larval->completion);
}
crypto_mod_put(alg);
}
}
EXPORT_SYMBOL_GPL(crypto_larval_error);
static inline int crypto_set_driver_name(struct crypto_alg *alg)
{
static const char suffix[] = "-generic";
char *driver_name = alg->cra_driver_name;
int len;
if (*driver_name)
return 0;
len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
memcpy(driver_name + len, suffix, sizeof(suffix));
return 0;
}
static int crypto_check_alg(struct crypto_alg *alg)
{
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
return -EINVAL;
if (alg->cra_blocksize > PAGE_SIZE / 8)
return -EINVAL;
if (alg->cra_priority < 0)
return -EINVAL;
return crypto_set_driver_name(alg);
}
static void crypto_destroy_instance(struct crypto_alg *alg)
{
struct crypto_instance *inst = (void *)alg;
struct crypto_template *tmpl = inst->tmpl;
tmpl->free(inst);
crypto_tmpl_put(tmpl);
}
static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
struct list_head *stack,
struct list_head *top,
struct list_head *secondary_spawns)
{
struct crypto_spawn *spawn, *n;
if (list_empty(stack))
return NULL;
spawn = list_first_entry(stack, struct crypto_spawn, list);
n = list_entry(spawn->list.next, struct crypto_spawn, list);
if (spawn->alg && &n->list != stack && !n->alg)
n->alg = (n->list.next == stack) ? alg :
&list_entry(n->list.next, struct crypto_spawn,
list)->inst->alg;
list_move(&spawn->list, secondary_spawns);
return &n->list == stack ? top : &n->inst->alg.cra_users;
}
static void crypto_remove_spawn(struct crypto_spawn *spawn,
struct list_head *list)
{
struct crypto_instance *inst = spawn->inst;
struct crypto_template *tmpl = inst->tmpl;
if (crypto_is_dead(&inst->alg))
return;
inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
if (hlist_unhashed(&inst->list))
return;
if (!tmpl || !crypto_tmpl_get(tmpl))
return;
crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg);
list_move(&inst->alg.cra_list, list);
hlist_del(&inst->list);
inst->alg.cra_destroy = crypto_destroy_instance;
BUG_ON(!list_empty(&inst->alg.cra_users));
}
static void crypto_remove_spawns(struct crypto_alg *alg,
struct list_head *list,
struct crypto_alg *nalg)
{
u32 new_type = (nalg ?: alg)->cra_flags;
struct crypto_spawn *spawn, *n;
LIST_HEAD(secondary_spawns);
struct list_head *spawns;
LIST_HEAD(stack);
LIST_HEAD(top);
spawns = &alg->cra_users;
list_for_each_entry_safe(spawn, n, spawns, list) {
if ((spawn->alg->cra_flags ^ new_type) & spawn->mask)
continue;
list_move(&spawn->list, &top);
}
spawns = ⊤
do {
while (!list_empty(spawns)) {
struct crypto_instance *inst;
spawn = list_first_entry(spawns, struct crypto_spawn,
list);
inst = spawn->inst;
BUG_ON(&inst->alg == alg);
list_move(&spawn->list, &stack);
if (&inst->alg == nalg)
break;
spawn->alg = NULL;
spawns = &inst->alg.cra_users;
}
} while ((spawns = crypto_more_spawns(alg, &stack, &top,
&secondary_spawns)));
list_for_each_entry_safe(spawn, n, &secondary_spawns, list) {
if (spawn->alg)
list_move(&spawn->list, &spawn->alg->cra_users);
else
crypto_remove_spawn(spawn, list);
}
}
static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
{
struct crypto_alg *q;
struct crypto_larval *larval;
int ret = -EAGAIN;
if (crypto_is_dead(alg))
goto err;
INIT_LIST_HEAD(&alg->cra_users);
/* No cheating! */
alg->cra_flags &= ~CRYPTO_ALG_TESTED;
ret = -EEXIST;
atomic_set(&alg->cra_refcnt, 1);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (q == alg)
goto err;
if (crypto_is_moribund(q))
continue;
if (crypto_is_larval(q)) {
if (!strcmp(alg->cra_driver_name, q->cra_driver_name))
goto err;
continue;
}
if (!strcmp(q->cra_driver_name, alg->cra_name) ||
!strcmp(q->cra_name, alg->cra_driver_name))
goto err;
}
larval = crypto_larval_alloc(alg->cra_name,
alg->cra_flags | CRYPTO_ALG_TESTED, 0);
if (IS_ERR(larval))
goto out;
ret = -ENOENT;
larval->adult = crypto_mod_get(alg);
if (!larval->adult)
goto free_larval;
atomic_set(&larval->alg.cra_refcnt, 1);
memcpy(larval->alg.cra_driver_name, alg->cra_driver_name,
CRYPTO_MAX_ALG_NAME);
larval->alg.cra_priority = alg->cra_priority;
list_add(&alg->cra_list, &crypto_alg_list);
list_add(&larval->alg.cra_list, &crypto_alg_list);
out:
return larval;
free_larval:
kfree(larval);
err:
larval = ERR_PTR(ret);
goto out;
}
void crypto_alg_tested(const char *name, int err)
{
struct crypto_larval *test;
struct crypto_alg *alg;
struct crypto_alg *q;
LIST_HEAD(list);
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (crypto_is_moribund(q) || !crypto_is_larval(q))
continue;
test = (struct crypto_larval *)q;
if (!strcmp(q->cra_driver_name, name))
goto found;
}
printk(KERN_ERR "alg: Unexpected test result for %s: %d\n", name, err);
goto unlock;
found:
q->cra_flags |= CRYPTO_ALG_DEAD;
alg = test->adult;
if (err || list_empty(&alg->cra_list))
goto complete;
alg->cra_flags |= CRYPTO_ALG_TESTED;
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (q == alg)
continue;
if (crypto_is_moribund(q))
continue;
if (crypto_is_larval(q)) {
struct crypto_larval *larval = (void *)q;
/*
* Check to see if either our generic name or
* specific name can satisfy the name requested
* by the larval entry q.
*/
if (strcmp(alg->cra_name, q->cra_name) &&
strcmp(alg->cra_driver_name, q->cra_name))
continue;
if (larval->adult)
continue;
if ((q->cra_flags ^ alg->cra_flags) & larval->mask)
continue;
if (!crypto_mod_get(alg))
continue;
larval->adult = alg;
complete_all(&larval->completion);
continue;
}
if (strcmp(alg->cra_name, q->cra_name))
continue;
if (strcmp(alg->cra_driver_name, q->cra_driver_name) &&
q->cra_priority > alg->cra_priority)
continue;
crypto_remove_spawns(q, &list, alg);
}
complete:
complete_all(&test->completion);
unlock:
up_write(&crypto_alg_sem);
crypto_remove_final(&list);
}
EXPORT_SYMBOL_GPL(crypto_alg_tested);
static void crypto_remove_final(struct list_head *list)
{
struct crypto_alg *alg;
struct crypto_alg *n;
list_for_each_entry_safe(alg, n, list, cra_list) {
list_del_init(&alg->cra_list);
crypto_alg_put(alg);
}
}
static void crypto_wait_for_test(struct crypto_larval *larval)
{
int err;
err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult);
if (err != NOTIFY_STOP) {
if (WARN_ON(err != NOTIFY_DONE))
goto out;
crypto_alg_tested(larval->alg.cra_driver_name, 0);
}
err = wait_for_completion_interruptible(&larval->completion);
WARN_ON(err);
out:
crypto_larval_kill(&larval->alg);
}
int crypto_register_alg(struct crypto_alg *alg)
{
struct crypto_larval *larval;
int err;
err = crypto_check_alg(alg);
if (err)
return err;
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(alg);
up_write(&crypto_alg_sem);
if (IS_ERR(larval))
return PTR_ERR(larval);
crypto_wait_for_test(larval);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_register_alg);
static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
{
if (unlikely(list_empty(&alg->cra_list)))
return -ENOENT;
alg->cra_flags |= CRYPTO_ALG_DEAD;
crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg);
list_del_init(&alg->cra_list);
crypto_remove_spawns(alg, list, NULL);
return 0;
}
int crypto_unregister_alg(struct crypto_alg *alg)
{
int ret;
LIST_HEAD(list);
down_write(&crypto_alg_sem);
ret = crypto_remove_alg(alg, &list);
up_write(&crypto_alg_sem);
if (ret)
return ret;
BUG_ON(atomic_read(&alg->cra_refcnt) != 1);
if (alg->cra_destroy)
alg->cra_destroy(alg);
crypto_remove_final(&list);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
int crypto_register_template(struct crypto_template *tmpl)
{
struct crypto_template *q;
int err = -EEXIST;
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_template_list, list) {
if (q == tmpl)
goto out;
}
list_add(&tmpl->list, &crypto_template_list);
crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl);
err = 0;
out:
up_write(&crypto_alg_sem);
return err;
}
EXPORT_SYMBOL_GPL(crypto_register_template);
void crypto_unregister_template(struct crypto_template *tmpl)
{
struct crypto_instance *inst;
struct hlist_node *p, *n;
struct hlist_head *list;
LIST_HEAD(users);
down_write(&crypto_alg_sem);
BUG_ON(list_empty(&tmpl->list));
list_del_init(&tmpl->list);
list = &tmpl->instances;
hlist_for_each_entry(inst, p, list, list) {
int err = crypto_remove_alg(&inst->alg, &users);
BUG_ON(err);
}
crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl);
up_write(&crypto_alg_sem);
hlist_for_each_entry_safe(inst, p, n, list, list) {
BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
tmpl->free(inst);
}
crypto_remove_final(&users);
}
EXPORT_SYMBOL_GPL(crypto_unregister_template);
static struct crypto_template *__crypto_lookup_template(const char *name)
{
struct crypto_template *q, *tmpl = NULL;
down_read(&crypto_alg_sem);
list_for_each_entry(q, &crypto_template_list, list) {
if (strcmp(q->name, name))
continue;
if (unlikely(!crypto_tmpl_get(q)))
continue;
tmpl = q;
break;
}
up_read(&crypto_alg_sem);
return tmpl;
}
struct crypto_template *crypto_lookup_template(const char *name)
{
return try_then_request_module(__crypto_lookup_template(name), name);
}
EXPORT_SYMBOL_GPL(crypto_lookup_template);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst)
{
struct crypto_larval *larval;
int err;
err = crypto_check_alg(&inst->alg);
if (err)
goto err;
inst->alg.cra_module = tmpl->module;
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(&inst->alg);
if (IS_ERR(larval))
goto unlock;
hlist_add_head(&inst->list, &tmpl->instances);
inst->tmpl = tmpl;
unlock:
up_write(&crypto_alg_sem);
err = PTR_ERR(larval);
if (IS_ERR(larval))
goto err;
crypto_wait_for_test(larval);
err = 0;
err:
return err;
}
EXPORT_SYMBOL_GPL(crypto_register_instance);
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst, u32 mask)
{
int err = -EAGAIN;
spawn->inst = inst;
spawn->mask = mask;
down_write(&crypto_alg_sem);
if (!crypto_is_moribund(alg)) {
list_add(&spawn->list, &alg->cra_users);
spawn->alg = alg;
err = 0;
}
up_write(&crypto_alg_sem);
return err;
}
EXPORT_SYMBOL_GPL(crypto_init_spawn);
int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst,
const struct crypto_type *frontend)
{
int err = -EINVAL;
if ((alg->cra_flags ^ frontend->type) & frontend->maskset)
goto out;
spawn->frontend = frontend;
err = crypto_init_spawn(spawn, alg, inst, frontend->maskset);
out:
return err;
}
EXPORT_SYMBOL_GPL(crypto_init_spawn2);
void crypto_drop_spawn(struct crypto_spawn *spawn)
{
if (!spawn->alg)
return;
down_write(&crypto_alg_sem);
list_del(&spawn->list);
up_write(&crypto_alg_sem);
}
EXPORT_SYMBOL_GPL(crypto_drop_spawn);
static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
{
struct crypto_alg *alg;
struct crypto_alg *alg2;
down_read(&crypto_alg_sem);
alg = spawn->alg;
alg2 = alg;
if (alg2)
alg2 = crypto_mod_get(alg2);
up_read(&crypto_alg_sem);
if (!alg2) {
if (alg)
crypto_shoot_alg(alg);
return ERR_PTR(-EAGAIN);
}
return alg;
}
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
u32 mask)
{
struct crypto_alg *alg;
struct crypto_tfm *tfm;
alg = crypto_spawn_alg(spawn);
if (IS_ERR(alg))
return ERR_CAST(alg);
tfm = ERR_PTR(-EINVAL);
if (unlikely((alg->cra_flags ^ type) & mask))
goto out_put_alg;
tfm = __crypto_alloc_tfm(alg, type, mask);
if (IS_ERR(tfm))
goto out_put_alg;
return tfm;
out_put_alg:
crypto_mod_put(alg);
return tfm;
}
EXPORT_SYMBOL_GPL(crypto_spawn_tfm);
void *crypto_spawn_tfm2(struct crypto_spawn *spawn)
{
struct crypto_alg *alg;
struct crypto_tfm *tfm;
alg = crypto_spawn_alg(spawn);
if (IS_ERR(alg))
return ERR_CAST(alg);
tfm = crypto_create_tfm(alg, spawn->frontend);
if (IS_ERR(tfm))
goto out_put_alg;
return tfm;
out_put_alg:
crypto_mod_put(alg);
return tfm;
}
EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
int crypto_register_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&crypto_chain, nb);
}
EXPORT_SYMBOL_GPL(crypto_register_notifier);
int crypto_unregister_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&crypto_chain, nb);
}
EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb)
{
struct rtattr *rta = tb[0];
struct crypto_attr_type *algt;
if (!rta)
return ERR_PTR(-ENOENT);
if (RTA_PAYLOAD(rta) < sizeof(*algt))
return ERR_PTR(-EINVAL);
if (rta->rta_type != CRYPTOA_TYPE)
return ERR_PTR(-EINVAL);
algt = RTA_DATA(rta);
return algt;
}
EXPORT_SYMBOL_GPL(crypto_get_attr_type);
int crypto_check_attr_type(struct rtattr **tb, u32 type)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
if ((algt->type ^ type) & algt->mask)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_check_attr_type);
const char *crypto_attr_alg_name(struct rtattr *rta)
{
struct crypto_attr_alg *alga;
if (!rta)
return ERR_PTR(-ENOENT);
if (RTA_PAYLOAD(rta) < sizeof(*alga))
return ERR_PTR(-EINVAL);
if (rta->rta_type != CRYPTOA_ALG)
return ERR_PTR(-EINVAL);
alga = RTA_DATA(rta);
alga->name[CRYPTO_MAX_ALG_NAME - 1] = 0;
return alga->name;
}
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
const struct crypto_type *frontend,
u32 type, u32 mask)
{
const char *name;
int err;
name = crypto_attr_alg_name(rta);
err = PTR_ERR(name);
if (IS_ERR(name))
return ERR_PTR(err);
return crypto_find_alg(name, frontend, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_attr_alg2);
int crypto_attr_u32(struct rtattr *rta, u32 *num)
{
struct crypto_attr_u32 *nu32;
if (!rta)
return -ENOENT;
if (RTA_PAYLOAD(rta) < sizeof(*nu32))
return -EINVAL;
if (rta->rta_type != CRYPTOA_U32)
return -EINVAL;
nu32 = RTA_DATA(rta);
*num = nu32->num;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_attr_u32);
void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
unsigned int head)
{
struct crypto_instance *inst;
char *p;
int err;
p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn),
GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
inst = (void *)(p + head);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
return p;
err_free_inst:
kfree(p);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_instance2);
struct crypto_instance *crypto_alloc_instance(const char *name,
struct crypto_alg *alg)
{
struct crypto_instance *inst;
struct crypto_spawn *spawn;
int err;
inst = crypto_alloc_instance2(name, alg, 0);
if (IS_ERR(inst))
goto out;
spawn = crypto_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, inst,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
if (err)
goto err_free_inst;
return inst;
err_free_inst:
kfree(inst);
inst = ERR_PTR(err);
out:
return inst;
}
EXPORT_SYMBOL_GPL(crypto_alloc_instance);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)
{
INIT_LIST_HEAD(&queue->list);
queue->backlog = &queue->list;
queue->qlen = 0;
queue->max_qlen = max_qlen;
}
EXPORT_SYMBOL_GPL(crypto_init_queue);
int crypto_enqueue_request(struct crypto_queue *queue,
struct crypto_async_request *request)
{
int err = -EINPROGRESS;
if (unlikely(queue->qlen >= queue->max_qlen)) {
err = -EBUSY;
if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
goto out;
if (queue->backlog == &queue->list)
queue->backlog = &request->list;
}
queue->qlen++;
list_add_tail(&request->list, &queue->list);
out:
return err;
}
EXPORT_SYMBOL_GPL(crypto_enqueue_request);
void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset)
{
struct list_head *request;
if (unlikely(!queue->qlen))
return NULL;
queue->qlen--;
if (queue->backlog != &queue->list)
queue->backlog = queue->backlog->next;
request = queue->list.next;
list_del(request);
return (char *)list_entry(request, struct crypto_async_request, list) -
offset;
}
EXPORT_SYMBOL_GPL(__crypto_dequeue_request);
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
{
return __crypto_dequeue_request(queue, 0);
}
EXPORT_SYMBOL_GPL(crypto_dequeue_request);
int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm)
{
struct crypto_async_request *req;
list_for_each_entry(req, &queue->list, list) {
if (req->tfm == tfm)
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(crypto_tfm_in_queue);
static inline void crypto_inc_byte(u8 *a, unsigned int size)
{
u8 *b = (a + size);
u8 c;
for (; size; size--) {
c = *--b + 1;
*b = c;
if (c)
break;
}
}
void crypto_inc(u8 *a, unsigned int size)
{
__be32 *b = (__be32 *)(a + size);
u32 c;
for (; size >= 4; size -= 4) {
c = be32_to_cpu(*--b) + 1;
*b = cpu_to_be32(c);
if (c)
return;
}
crypto_inc_byte(a, size);
}
EXPORT_SYMBOL_GPL(crypto_inc);
static inline void crypto_xor_byte(u8 *a, const u8 *b, unsigned int size)
{
for (; size; size--)
*a++ ^= *b++;
}
void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
{
u32 *a = (u32 *)dst;
u32 *b = (u32 *)src;
for (; size >= 4; size -= 4)
*a++ ^= *b++;
crypto_xor_byte((u8 *)a, (u8 *)b, size);
}
EXPORT_SYMBOL_GPL(crypto_xor);
static int __init crypto_algapi_init(void)
{
crypto_init_proc();
return 0;
}
static void __exit crypto_algapi_exit(void)
{
crypto_exit_proc();
}
module_init(crypto_algapi_init);
module_exit(crypto_algapi_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cryptographic algorithms API");
| gpl-2.0 |
S3neos/android_kernel_samsung_s3ve3g | drivers/atm/iphase.c | 4858 | 110639 | /******************************************************************************
iphase.c: Device driver for Interphase ATM PCI adapter cards
Author: Peter Wang <pwang@iphase.com>
Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Interphase Corporation <www.iphase.com>
Version: 1.0
*******************************************************************************
This software may be used and distributed according to the terms
of the GNU General Public License (GPL), incorporated herein by reference.
Drivers based on this skeleton fall under the GPL and must retain
the authorship (implicit copyright) notice.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
was originally written by Monalisa Agrawal at UNH. Now this driver
supports a variety of varients of Interphase ATM PCI (i)Chip adapter
card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
in terms of PHY type, the size of control memory and the size of
packet memory. The followings are the change log and history:
Bugfix the Mona's UBR driver.
Modify the basic memory allocation and dma logic.
Port the driver to the latest kernel from 2.0.46.
Complete the ABR logic of the driver, and added the ABR work-
around for the hardware anormalies.
Add the CBR support.
Add the flow control logic to the driver to allow rate-limit VC.
Add 4K VC support to the board with 512K control memory.
Add the support of all the variants of the Interphase ATM PCI
(i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
(25M UTP25) and x531 (DS3 and E3).
Add SMP support.
Support and updates available at: ftp://ftp.iphase.com/pub/atm
*******************************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/sonet.h>
#include <linux/skbuff.h>
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/uio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
#include <linux/vmalloc.h>
#include <linux/jiffies.h>
#include "iphase.h"
#include "suni.h"
#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
#define PRIV(dev) ((struct suni_priv *) dev->phy_data)
static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
static void desc_dbg(IADEV *iadev);
static IADEV *ia_dev[8];
static struct atm_dev *_ia_dev[8];
static int iadev_count;
static void ia_led_timer(unsigned long arg);
static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
|IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
module_param(IA_TX_BUF, int, 0);
module_param(IA_TX_BUF_SZ, int, 0);
module_param(IA_RX_BUF, int, 0);
module_param(IA_RX_BUF_SZ, int, 0);
module_param(IADebugFlag, uint, 0644);
MODULE_LICENSE("GPL");
/**************************** IA_LIB **********************************/
static void ia_init_rtn_q (IARTN_Q *que)
{
que->next = NULL;
que->tail = NULL;
}
static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
{
data->next = NULL;
if (que->next == NULL)
que->next = que->tail = data;
else {
data->next = que->next;
que->next = data;
}
return;
}
static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) return -1;
entry->data = data;
entry->next = NULL;
if (que->next == NULL)
que->next = que->tail = entry;
else {
que->tail->next = entry;
que->tail = que->tail->next;
}
return 1;
}
static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
IARTN_Q *tmpdata;
if (que->next == NULL)
return NULL;
tmpdata = que->next;
if ( que->next == que->tail)
que->next = que->tail = NULL;
else
que->next = que->next->next;
return tmpdata;
}
static void ia_hack_tcq(IADEV *dev) {
u_short desc1;
u_short tcq_wr;
struct ia_vcc *iavcc_r = NULL;
tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
while (dev->host_tcq_wr != tcq_wr) {
desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
if (!desc1) ;
else if (!dev->desc_tbl[desc1 -1].timestamp) {
IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
*(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
}
else if (dev->desc_tbl[desc1 -1].timestamp) {
if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
printk("IA: Fatal err in get_desc\n");
continue;
}
iavcc_r->vc_desc_cnt--;
dev->desc_tbl[desc1 -1].timestamp = 0;
IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
dev->desc_tbl[desc1 -1].txskb, desc1);)
if (iavcc_r->pcr < dev->rate_limit) {
IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
printk("ia_hack_tcq: No memory available\n");
}
dev->desc_tbl[desc1 -1].iavcc = NULL;
dev->desc_tbl[desc1 -1].txskb = NULL;
}
dev->host_tcq_wr += 2;
if (dev->host_tcq_wr > dev->ffL.tcq_ed)
dev->host_tcq_wr = dev->ffL.tcq_st;
}
} /* ia_hack_tcq */
static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
u_short desc_num, i;
struct sk_buff *skb;
struct ia_vcc *iavcc_r = NULL;
unsigned long delta;
static unsigned long timer = 0;
int ltimeout;
ia_hack_tcq (dev);
if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
timer = jiffies;
i=0;
while (i < dev->num_tx_desc) {
if (!dev->desc_tbl[i].timestamp) {
i++;
continue;
}
ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
delta = jiffies - dev->desc_tbl[i].timestamp;
if (delta >= ltimeout) {
IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
dev->ffL.tcq_rd = dev->ffL.tcq_ed;
else
dev->ffL.tcq_rd -= 2;
*(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
if (!(skb = dev->desc_tbl[i].txskb) ||
!(iavcc_r = dev->desc_tbl[i].iavcc))
printk("Fatal err, desc table vcc or skb is NULL\n");
else
iavcc_r->vc_desc_cnt--;
dev->desc_tbl[i].timestamp = 0;
dev->desc_tbl[i].iavcc = NULL;
dev->desc_tbl[i].txskb = NULL;
}
i++;
} /* while */
}
if (dev->ffL.tcq_rd == dev->host_tcq_wr)
return 0xFFFF;
/* Get the next available descriptor number from TCQ */
desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
dev->ffL.tcq_rd += 2;
if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
dev->ffL.tcq_rd = dev->ffL.tcq_st;
if (dev->ffL.tcq_rd == dev->host_tcq_wr)
return 0xFFFF;
desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
}
/* get system time */
dev->desc_tbl[desc_num -1].timestamp = jiffies;
return desc_num;
}
static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
u_char foundLockUp;
vcstatus_t *vcstatus;
u_short *shd_tbl;
u_short tempCellSlot, tempFract;
struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
u_int i;
if (vcc->qos.txtp.traffic_class == ATM_ABR) {
vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
vcstatus->cnt++;
foundLockUp = 0;
if( vcstatus->cnt == 0x05 ) {
abr_vc += vcc->vci;
eabr_vc += vcc->vci;
if( eabr_vc->last_desc ) {
if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
/* Wait for 10 Micro sec */
udelay(10);
if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
foundLockUp = 1;
}
else {
tempCellSlot = abr_vc->last_cell_slot;
tempFract = abr_vc->fraction;
if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
&& (tempFract == dev->testTable[vcc->vci]->fract))
foundLockUp = 1;
dev->testTable[vcc->vci]->lastTime = tempCellSlot;
dev->testTable[vcc->vci]->fract = tempFract;
}
} /* last descriptor */
vcstatus->cnt = 0;
} /* vcstatus->cnt */
if (foundLockUp) {
IF_ABR(printk("LOCK UP found\n");)
writew(0xFFFD, dev->seg_reg+MODE_REG_0);
/* Wait for 10 Micro sec */
udelay(10);
abr_vc->status &= 0xFFF8;
abr_vc->status |= 0x0001; /* state is idle */
shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
if (i < dev->num_vc)
shd_tbl[i] = vcc->vci;
else
IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
vcstatus->cnt = 0;
} /* foundLockUp */
} /* if an ABR VC */
}
/*
** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
**
** +----+----+------------------+-------------------------------+
** | R | NZ | 5-bit exponent | 9-bit mantissa |
** +----+----+------------------+-------------------------------+
**
** R = reserved (written as 0)
** NZ = 0 if 0 cells/sec; 1 otherwise
**
** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
*/
static u16
cellrate_to_float(u32 cr)
{
#define NZ 0x4000
#define M_BITS 9 /* Number of bits in mantissa */
#define E_BITS 5 /* Number of bits in exponent */
#define M_MASK 0x1ff
#define E_MASK 0x1f
u16 flot;
u32 tmp = cr & 0x00ffffff;
int i = 0;
if (cr == 0)
return 0;
while (tmp != 1) {
tmp >>= 1;
i++;
}
if (i == M_BITS)
flot = NZ | (i << M_BITS) | (cr & M_MASK);
else if (i < M_BITS)
flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
else
flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
return flot;
}
#if 0
/*
** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
*/
static u32
float_to_cellrate(u16 rate)
{
u32 exp, mantissa, cps;
if ((rate & NZ) == 0)
return 0;
exp = (rate >> M_BITS) & E_MASK;
mantissa = rate & M_MASK;
if (exp == 0)
return 1;
cps = (1 << M_BITS) | mantissa;
if (exp == M_BITS)
cps = cps;
else if (exp > M_BITS)
cps <<= (exp - M_BITS);
else
cps >>= (M_BITS - exp);
return cps;
}
#endif
static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
srv_p->class_type = ATM_ABR;
srv_p->pcr = dev->LineRate;
srv_p->mcr = 0;
srv_p->icr = 0x055cb7;
srv_p->tbe = 0xffffff;
srv_p->frtt = 0x3a;
srv_p->rif = 0xf;
srv_p->rdf = 0xb;
srv_p->nrm = 0x4;
srv_p->trm = 0x7;
srv_p->cdf = 0x3;
srv_p->adtf = 50;
}
static int
ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
struct atm_vcc *vcc, u8 flag)
{
f_vc_abr_entry *f_abr_vc;
r_vc_abr_entry *r_abr_vc;
u32 icr;
u8 trm, nrm, crm;
u16 adtf, air, *ptr16;
f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
f_abr_vc += vcc->vci;
switch (flag) {
case 1: /* FFRED initialization */
#if 0 /* sanity check */
if (srv_p->pcr == 0)
return INVALID_PCR;
if (srv_p->pcr > dev->LineRate)
srv_p->pcr = dev->LineRate;
if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
return MCR_UNAVAILABLE;
if (srv_p->mcr > srv_p->pcr)
return INVALID_MCR;
if (!(srv_p->icr))
srv_p->icr = srv_p->pcr;
if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
return INVALID_ICR;
if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
return INVALID_TBE;
if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
return INVALID_FRTT;
if (srv_p->nrm > MAX_NRM)
return INVALID_NRM;
if (srv_p->trm > MAX_TRM)
return INVALID_TRM;
if (srv_p->adtf > MAX_ADTF)
return INVALID_ADTF;
else if (srv_p->adtf == 0)
srv_p->adtf = 1;
if (srv_p->cdf > MAX_CDF)
return INVALID_CDF;
if (srv_p->rif > MAX_RIF)
return INVALID_RIF;
if (srv_p->rdf > MAX_RDF)
return INVALID_RDF;
#endif
memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
f_abr_vc->f_vc_type = ABR;
nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */
/* i.e 2**n = 2 << (n-1) */
f_abr_vc->f_nrm = nrm << 8 | nrm;
trm = 100000/(2 << (16 - srv_p->trm));
if ( trm == 0) trm = 1;
f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
crm = srv_p->tbe / nrm;
if (crm == 0) crm = 1;
f_abr_vc->f_crm = crm & 0xff;
f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
((srv_p->tbe/srv_p->frtt)*1000000) :
(1000000/(srv_p->frtt/srv_p->tbe)));
f_abr_vc->f_icr = cellrate_to_float(icr);
adtf = (10000 * srv_p->adtf)/8192;
if (adtf == 0) adtf = 1;
f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
f_abr_vc->f_acr = f_abr_vc->f_icr;
f_abr_vc->f_status = 0x0042;
break;
case 0: /* RFRED initialization */
ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
*(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
r_abr_vc += vcc->vci;
r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
air = srv_p->pcr << (15 - srv_p->rif);
if (air == 0) air = 1;
r_abr_vc->r_air = cellrate_to_float(air);
dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
dev->sum_mcr += srv_p->mcr;
dev->n_abr++;
break;
default:
break;
}
return 0;
}
static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
u32 rateLow=0, rateHigh, rate;
int entries;
struct ia_vcc *ia_vcc;
int idealSlot =0, testSlot, toBeAssigned, inc;
u32 spacing;
u16 *SchedTbl, *TstSchedTbl;
u16 cbrVC, vcIndex;
u32 fracSlot = 0;
u32 sp_mod = 0;
u32 sp_mod2 = 0;
/* IpAdjustTrafficParams */
if (vcc->qos.txtp.max_pcr <= 0) {
IF_ERR(printk("PCR for CBR not defined\n");)
return -1;
}
rate = vcc->qos.txtp.max_pcr;
entries = rate / dev->Granularity;
IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
entries, rate, dev->Granularity);)
if (entries < 1)
IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
rateLow = entries * dev->Granularity;
rateHigh = (entries + 1) * dev->Granularity;
if (3*(rate - rateLow) > (rateHigh - rate))
entries++;
if (entries > dev->CbrRemEntries) {
IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
entries, dev->CbrRemEntries);)
return -EBUSY;
}
ia_vcc = INPH_IA_VCC(vcc);
ia_vcc->NumCbrEntry = entries;
dev->sum_mcr += entries * dev->Granularity;
/* IaFFrednInsertCbrSched */
// Starting at an arbitrary location, place the entries into the table
// as smoothly as possible
cbrVC = 0;
spacing = dev->CbrTotEntries / entries;
sp_mod = dev->CbrTotEntries % entries; // get modulo
toBeAssigned = entries;
fracSlot = 0;
vcIndex = vcc->vci;
IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
while (toBeAssigned)
{
// If this is the first time, start the table loading for this connection
// as close to entryPoint as possible.
if (toBeAssigned == entries)
{
idealSlot = dev->CbrEntryPt;
dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping
if (dev->CbrEntryPt >= dev->CbrTotEntries)
dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
} else {
idealSlot += (u32)(spacing + fracSlot); // Point to the next location
// in the table that would be smoothest
fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
}
if (idealSlot >= (int)dev->CbrTotEntries)
idealSlot -= dev->CbrTotEntries;
// Continuously check around this ideal value until a null
// location is encountered.
SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
inc = 0;
testSlot = idealSlot;
TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
testSlot, TstSchedTbl,toBeAssigned);)
memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
while (cbrVC) // If another VC at this location, we have to keep looking
{
inc++;
testSlot = idealSlot - inc;
if (testSlot < 0) { // Wrap if necessary
testSlot += dev->CbrTotEntries;
IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
SchedTbl,testSlot);)
}
TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
if (!cbrVC)
break;
testSlot = idealSlot + inc;
if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
testSlot -= dev->CbrTotEntries;
IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
testSlot, toBeAssigned);)
}
// set table index and read in value
TstSchedTbl = (u16*)(SchedTbl + testSlot);
IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
TstSchedTbl,cbrVC,inc);)
memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
} /* while */
// Move this VCI number into this location of the CBR Sched table.
memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
dev->CbrRemEntries--;
toBeAssigned--;
} /* while */
/* IaFFrednCbrEnable */
dev->NumEnabledCBR++;
if (dev->NumEnabledCBR == 1) {
writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
IF_CBR(printk("CBR is enabled\n");)
}
return 0;
}
static void ia_cbrVc_close (struct atm_vcc *vcc) {
IADEV *iadev;
u16 *SchedTbl, NullVci = 0;
u32 i, NumFound;
iadev = INPH_IA_DEV(vcc->dev);
iadev->NumEnabledCBR--;
SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
if (iadev->NumEnabledCBR == 0) {
writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
IF_CBR (printk("CBR support disabled\n");)
}
NumFound = 0;
for (i=0; i < iadev->CbrTotEntries; i++)
{
if (*SchedTbl == vcc->vci) {
iadev->CbrRemEntries++;
*SchedTbl = NullVci;
IF_CBR(NumFound++;)
}
SchedTbl++;
}
IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
}
static int ia_avail_descs(IADEV *iadev) {
int tmp = 0;
ia_hack_tcq(iadev);
if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
else
tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
iadev->ffL.tcq_st) / 2;
return tmp;
}
static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
static int ia_que_tx (IADEV *iadev) {
struct sk_buff *skb;
int num_desc;
struct atm_vcc *vcc;
num_desc = ia_avail_descs(iadev);
while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
if (!(vcc = ATM_SKB(skb)->vcc)) {
dev_kfree_skb_any(skb);
printk("ia_que_tx: Null vcc\n");
break;
}
if (!test_bit(ATM_VF_READY,&vcc->flags)) {
dev_kfree_skb_any(skb);
printk("Free the SKB on closed vci %d \n", vcc->vci);
break;
}
if (ia_pkt_tx (vcc, skb)) {
skb_queue_head(&iadev->tx_backlog, skb);
}
num_desc--;
}
return 0;
}
static void ia_tx_poll (IADEV *iadev) {
struct atm_vcc *vcc = NULL;
struct sk_buff *skb = NULL, *skb1 = NULL;
struct ia_vcc *iavcc;
IARTN_Q * rtne;
ia_hack_tcq(iadev);
while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
skb = rtne->data.txskb;
if (!skb) {
printk("ia_tx_poll: skb is null\n");
goto out;
}
vcc = ATM_SKB(skb)->vcc;
if (!vcc) {
printk("ia_tx_poll: vcc is null\n");
dev_kfree_skb_any(skb);
goto out;
}
iavcc = INPH_IA_VCC(vcc);
if (!iavcc) {
printk("ia_tx_poll: iavcc is null\n");
dev_kfree_skb_any(skb);
goto out;
}
skb1 = skb_dequeue(&iavcc->txing_skb);
while (skb1 && (skb1 != skb)) {
if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
}
IF_ERR(printk("Release the SKB not match\n");)
if ((vcc->pop) && (skb1->len != 0))
{
vcc->pop(vcc, skb1);
IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
(long)skb1);)
}
else
dev_kfree_skb_any(skb1);
skb1 = skb_dequeue(&iavcc->txing_skb);
}
if (!skb1) {
IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
break;
}
if ((vcc->pop) && (skb->len != 0))
{
vcc->pop(vcc, skb);
IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
}
else
dev_kfree_skb_any(skb);
kfree(rtne);
}
ia_que_tx(iadev);
out:
return;
}
#if 0
static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
{
u32 t;
int i;
/*
* Issue a command to enable writes to the NOVRAM
*/
NVRAM_CMD (EXTEND + EWEN);
NVRAM_CLR_CE;
/*
* issue the write command
*/
NVRAM_CMD(IAWRITE + addr);
/*
* Send the data, starting with D15, then D14, and so on for 16 bits
*/
for (i=15; i>=0; i--) {
NVRAM_CLKOUT (val & 0x8000);
val <<= 1;
}
NVRAM_CLR_CE;
CFG_OR(NVCE);
t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
while (!(t & NVDO))
t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
NVRAM_CLR_CE;
/*
* disable writes again
*/
NVRAM_CMD(EXTEND + EWDS)
NVRAM_CLR_CE;
CFG_AND(~NVDI);
}
#endif
static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
{
u_short val;
u32 t;
int i;
/*
* Read the first bit that was clocked with the falling edge of the
* the last command data clock
*/
NVRAM_CMD(IAREAD + addr);
/*
* Now read the rest of the bits, the next bit read is D14, then D13,
* and so on.
*/
val = 0;
for (i=15; i>=0; i--) {
NVRAM_CLKIN(t);
val |= (t << i);
}
NVRAM_CLR_CE;
CFG_AND(~NVDI);
return val;
}
static void ia_hw_type(IADEV *iadev) {
u_short memType = ia_eeprom_get(iadev, 25);
iadev->memType = memType;
if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
iadev->num_tx_desc = IA_TX_BUF;
iadev->tx_buf_sz = IA_TX_BUF_SZ;
iadev->num_rx_desc = IA_RX_BUF;
iadev->rx_buf_sz = IA_RX_BUF_SZ;
} else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
if (IA_TX_BUF == DFL_TX_BUFFERS)
iadev->num_tx_desc = IA_TX_BUF / 2;
else
iadev->num_tx_desc = IA_TX_BUF;
iadev->tx_buf_sz = IA_TX_BUF_SZ;
if (IA_RX_BUF == DFL_RX_BUFFERS)
iadev->num_rx_desc = IA_RX_BUF / 2;
else
iadev->num_rx_desc = IA_RX_BUF;
iadev->rx_buf_sz = IA_RX_BUF_SZ;
}
else {
if (IA_TX_BUF == DFL_TX_BUFFERS)
iadev->num_tx_desc = IA_TX_BUF / 8;
else
iadev->num_tx_desc = IA_TX_BUF;
iadev->tx_buf_sz = IA_TX_BUF_SZ;
if (IA_RX_BUF == DFL_RX_BUFFERS)
iadev->num_rx_desc = IA_RX_BUF / 8;
else
iadev->num_rx_desc = IA_RX_BUF;
iadev->rx_buf_sz = IA_RX_BUF_SZ;
}
iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
iadev->rx_buf_sz, iadev->rx_pkt_ram);)
#if 0
if ((memType & FE_MASK) == FE_SINGLE_MODE) {
iadev->phy_type = PHY_OC3C_S;
else if ((memType & FE_MASK) == FE_UTP_OPTION)
iadev->phy_type = PHY_UTP155;
else
iadev->phy_type = PHY_OC3C_M;
#endif
iadev->phy_type = memType & FE_MASK;
IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
memType,iadev->phy_type);)
if (iadev->phy_type == FE_25MBIT_PHY)
iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
else if (iadev->phy_type == FE_DS3_PHY)
iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
else if (iadev->phy_type == FE_E3_PHY)
iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
else
iadev->LineRate = (u32)(ATM_OC3_PCR);
IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
}
static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
{
return readl(ia->phy + (reg >> 2));
}
static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
{
writel(val, ia->phy + (reg >> 2));
}
static void ia_frontend_intr(struct iadev_priv *iadev)
{
u32 status;
if (iadev->phy_type & FE_25MBIT_PHY) {
status = ia_phy_read32(iadev, MB25_INTR_STATUS);
iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
} else if (iadev->phy_type & FE_DS3_PHY) {
ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
} else if (iadev->phy_type & FE_E3_PHY) {
ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
} else {
status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
}
printk(KERN_INFO "IA: SUNI carrier %s\n",
iadev->carrier_detect ? "detected" : "lost signal");
}
static void ia_mb25_init(struct iadev_priv *iadev)
{
#if 0
mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
#endif
ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
iadev->carrier_detect =
(ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
}
struct ia_reg {
u16 reg;
u16 val;
};
static void ia_phy_write(struct iadev_priv *iadev,
const struct ia_reg *regs, int len)
{
while (len--) {
ia_phy_write32(iadev, regs->reg, regs->val);
regs++;
}
}
static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
{
static const struct ia_reg suni_ds3_init [] = {
{ SUNI_DS3_FRM_INTR_ENBL, 0x17 },
{ SUNI_DS3_FRM_CFG, 0x01 },
{ SUNI_DS3_TRAN_CFG, 0x01 },
{ SUNI_CONFIG, 0 },
{ SUNI_SPLR_CFG, 0 },
{ SUNI_SPLT_CFG, 0 }
};
u32 status;
status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
}
static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
{
static const struct ia_reg suni_e3_init [] = {
{ SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
{ SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
{ SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
{ SUNI_E3_FRM_MAINT_INTR_ENBL, 0x30 },
{ SUNI_E3_TRAN_STAT_DIAG_OPTIONS, 0 },
{ SUNI_E3_TRAN_FRAM_OPTIONS, 0x01 },
{ SUNI_CONFIG, SUNI_PM7345_E3ENBL },
{ SUNI_SPLR_CFG, 0x41 },
{ SUNI_SPLT_CFG, 0x41 }
};
u32 status;
status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
}
static void ia_suni_pm7345_init(struct iadev_priv *iadev)
{
static const struct ia_reg suni_init [] = {
/* Enable RSOP loss of signal interrupt. */
{ SUNI_INTR_ENBL, 0x28 },
/* Clear error counters. */
{ SUNI_ID_RESET, 0 },
/* Clear "PMCTST" in master test register. */
{ SUNI_MASTER_TEST, 0 },
{ SUNI_RXCP_CTRL, 0x2c },
{ SUNI_RXCP_FCTRL, 0x81 },
{ SUNI_RXCP_IDLE_PAT_H1, 0 },
{ SUNI_RXCP_IDLE_PAT_H2, 0 },
{ SUNI_RXCP_IDLE_PAT_H3, 0 },
{ SUNI_RXCP_IDLE_PAT_H4, 0x01 },
{ SUNI_RXCP_IDLE_MASK_H1, 0xff },
{ SUNI_RXCP_IDLE_MASK_H2, 0xff },
{ SUNI_RXCP_IDLE_MASK_H3, 0xff },
{ SUNI_RXCP_IDLE_MASK_H4, 0xfe },
{ SUNI_RXCP_CELL_PAT_H1, 0 },
{ SUNI_RXCP_CELL_PAT_H2, 0 },
{ SUNI_RXCP_CELL_PAT_H3, 0 },
{ SUNI_RXCP_CELL_PAT_H4, 0x01 },
{ SUNI_RXCP_CELL_MASK_H1, 0xff },
{ SUNI_RXCP_CELL_MASK_H2, 0xff },
{ SUNI_RXCP_CELL_MASK_H3, 0xff },
{ SUNI_RXCP_CELL_MASK_H4, 0xff },
{ SUNI_TXCP_CTRL, 0xa4 },
{ SUNI_TXCP_INTR_EN_STS, 0x10 },
{ SUNI_TXCP_IDLE_PAT_H5, 0x55 }
};
if (iadev->phy_type & FE_DS3_PHY)
ia_suni_pm7345_init_ds3(iadev);
else
ia_suni_pm7345_init_e3(iadev);
ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
SUNI_PM7345_DLB | SUNI_PM7345_PLB));
#ifdef __SNMP__
suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
#endif /* __SNMP__ */
return;
}
/***************************** IA_LIB END *****************************/
#ifdef CONFIG_ATM_IA_DEBUG
static int tcnter = 0;
static void xdump( u_char* cp, int length, char* prefix )
{
int col, count;
u_char prntBuf[120];
u_char* pBuf = prntBuf;
count = 0;
while(count < length){
pBuf += sprintf( pBuf, "%s", prefix );
for(col = 0;count + col < length && col < 16; col++){
if (col != 0 && (col % 4) == 0)
pBuf += sprintf( pBuf, " " );
pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
}
while(col++ < 16){ /* pad end of buffer with blanks */
if ((col % 4) == 0)
sprintf( pBuf, " " );
pBuf += sprintf( pBuf, " " );
}
pBuf += sprintf( pBuf, " " );
for(col = 0;count + col < length && col < 16; col++){
if (isprint((int)cp[count + col]))
pBuf += sprintf( pBuf, "%c", cp[count + col] );
else
pBuf += sprintf( pBuf, "." );
}
printk("%s\n", prntBuf);
count += col;
pBuf = prntBuf;
}
} /* close xdump(... */
#endif /* CONFIG_ATM_IA_DEBUG */
static struct atm_dev *ia_boards = NULL;
#define ACTUAL_RAM_BASE \
RAM_BASE*((iadev->mem)/(128 * 1024))
#define ACTUAL_SEG_RAM_BASE \
IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
#define ACTUAL_REASS_RAM_BASE \
IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
/*-- some utilities and memory allocation stuff will come here -------------*/
static void desc_dbg(IADEV *iadev) {
u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
u32 i;
void __iomem *tmp;
// regval = readl((u32)ia_cmds->maddr);
tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR);
printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
readw(iadev->seg_ram+tcq_wr_ptr-2));
printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr,
iadev->ffL.tcq_rd);
tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR);
tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR);
printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
i = 0;
while (tcq_st_ptr != tcq_ed_ptr) {
tmp = iadev->seg_ram+tcq_st_ptr;
printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp);
tcq_st_ptr += 2;
}
for(i=0; i <iadev->num_tx_desc; i++)
printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
}
/*----------------------------- Receiving side stuff --------------------------*/
static void rx_excp_rcvd(struct atm_dev *dev)
{
#if 0 /* closing the receiving size will cause too many excp int */
IADEV *iadev;
u_short state;
u_short excpq_rd_ptr;
//u_short *ptr;
int vci, error = 1;
iadev = INPH_IA_DEV(dev);
state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
{ printk("state = %x \n", state);
excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
// TODO: update exception stat
vci = readw(iadev->reass_ram+excpq_rd_ptr);
error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
// pwang_test
excpq_rd_ptr += 4;
if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
}
#endif
}
static void free_desc(struct atm_dev *dev, int desc)
{
IADEV *iadev;
iadev = INPH_IA_DEV(dev);
writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
iadev->rfL.fdq_wr +=2;
if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
iadev->rfL.fdq_wr = iadev->rfL.fdq_st;
writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
}
static int rx_pkt(struct atm_dev *dev)
{
IADEV *iadev;
struct atm_vcc *vcc;
unsigned short status;
struct rx_buf_desc __iomem *buf_desc_ptr;
int desc;
struct dle* wr_ptr;
int len;
struct sk_buff *skb;
u_int buf_addr, dma_addr;
iadev = INPH_IA_DEV(dev);
if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
{
printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
return -EINVAL;
}
/* mask 1st 3 bits to get the actual descno. */
desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
iadev->reass_ram, iadev->rfL.pcq_rd, desc);
printk(" pcq_wr_ptr = 0x%x\n",
readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
/* update the read pointer - maybe we shud do this in the end*/
if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
else
iadev->rfL.pcq_rd += 2;
writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
/* get the buffer desc entry.
update stuff. - doesn't seem to be any update necessary
*/
buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
/* make the ptr point to the corresponding buffer desc entry */
buf_desc_ptr += desc;
if (!desc || (desc > iadev->num_rx_desc) ||
((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
free_desc(dev, desc);
IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
return -1;
}
vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
if (!vcc)
{
free_desc(dev, desc);
printk("IA: null vcc, drop PDU\n");
return -1;
}
/* might want to check the status bits for errors */
status = (u_short) (buf_desc_ptr->desc_mode);
if (status & (RX_CER | RX_PTE | RX_OFL))
{
atomic_inc(&vcc->stats->rx_err);
IF_ERR(printk("IA: bad packet, dropping it");)
if (status & RX_CER) {
IF_ERR(printk(" cause: packet CRC error\n");)
}
else if (status & RX_PTE) {
IF_ERR(printk(" cause: packet time out\n");)
}
else {
IF_ERR(printk(" cause: buffer overflow\n");)
}
goto out_free_desc;
}
/*
build DLE.
*/
buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
len = dma_addr - buf_addr;
if (len > iadev->rx_buf_sz) {
printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
atomic_inc(&vcc->stats->rx_err);
goto out_free_desc;
}
if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
if (vcc->vci < 32)
printk("Drop control packets\n");
goto out_free_desc;
}
skb_put(skb,len);
// pwang_test
ATM_SKB(skb)->vcc = vcc;
ATM_DESC(skb) = desc;
skb_queue_tail(&iadev->rx_dma_q, skb);
/* Build the DLE structure */
wr_ptr = iadev->rx_dle_q.write;
wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
len, PCI_DMA_FROMDEVICE);
wr_ptr->local_pkt_addr = buf_addr;
wr_ptr->bytes = len; /* We don't know this do we ?? */
wr_ptr->mode = DMA_INT_ENABLE;
/* shud take care of wrap around here too. */
if(++wr_ptr == iadev->rx_dle_q.end)
wr_ptr = iadev->rx_dle_q.start;
iadev->rx_dle_q.write = wr_ptr;
udelay(1);
/* Increment transaction counter */
writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
out: return 0;
out_free_desc:
free_desc(dev, desc);
goto out;
}
static void rx_intr(struct atm_dev *dev)
{
IADEV *iadev;
u_short status;
u_short state, i;
iadev = INPH_IA_DEV(dev);
status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
if (status & RX_PKT_RCVD)
{
/* do something */
/* Basically recvd an interrupt for receiving a packet.
A descriptor would have been written to the packet complete
queue. Get all the descriptors and set up dma to move the
packets till the packet complete queue is empty..
*/
state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
while(!(state & PCQ_EMPTY))
{
rx_pkt(dev);
state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
}
iadev->rxing = 1;
}
if (status & RX_FREEQ_EMPT)
{
if (iadev->rxing) {
iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
iadev->rx_tmp_jif = jiffies;
iadev->rxing = 0;
}
else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
for (i = 1; i <= iadev->num_rx_desc; i++)
free_desc(dev, i);
printk("Test logic RUN!!!!\n");
writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
iadev->rxing = 1;
}
IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
}
if (status & RX_EXCP_RCVD)
{
/* probably need to handle the exception queue also. */
IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
rx_excp_rcvd(dev);
}
if (status & RX_RAW_RCVD)
{
/* need to handle the raw incoming cells. This deepnds on
whether we have programmed to receive the raw cells or not.
Else ignore. */
IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
}
}
static void rx_dle_intr(struct atm_dev *dev)
{
IADEV *iadev;
struct atm_vcc *vcc;
struct sk_buff *skb;
int desc;
u_short state;
struct dle *dle, *cur_dle;
u_int dle_lp;
int len;
iadev = INPH_IA_DEV(dev);
/* free all the dles done, that is just update our own dle read pointer
- do we really need to do this. Think not. */
/* DMA is done, just get all the recevie buffers from the rx dma queue
and push them up to the higher layer protocol. Also free the desc
associated with the buffer. */
dle = iadev->rx_dle_q.read;
dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
while(dle != cur_dle)
{
/* free the DMAed skb */
skb = skb_dequeue(&iadev->rx_dma_q);
if (!skb)
goto INCR_DLE;
desc = ATM_DESC(skb);
free_desc(dev, desc);
if (!(len = skb->len))
{
printk("rx_dle_intr: skb len 0\n");
dev_kfree_skb_any(skb);
}
else
{
struct cpcs_trailer *trailer;
u_short length;
struct ia_vcc *ia_vcc;
pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
len, PCI_DMA_FROMDEVICE);
/* no VCC related housekeeping done as yet. lets see */
vcc = ATM_SKB(skb)->vcc;
if (!vcc) {
printk("IA: null vcc\n");
dev_kfree_skb_any(skb);
goto INCR_DLE;
}
ia_vcc = INPH_IA_VCC(vcc);
if (ia_vcc == NULL)
{
atomic_inc(&vcc->stats->rx_err);
atm_return(vcc, skb->truesize);
dev_kfree_skb_any(skb);
goto INCR_DLE;
}
// get real pkt length pwang_test
trailer = (struct cpcs_trailer*)((u_char *)skb->data +
skb->len - sizeof(*trailer));
length = swap_byte_order(trailer->length);
if ((length > iadev->rx_buf_sz) || (length >
(skb->len - sizeof(struct cpcs_trailer))))
{
atomic_inc(&vcc->stats->rx_err);
IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
length, skb->len);)
atm_return(vcc, skb->truesize);
dev_kfree_skb_any(skb);
goto INCR_DLE;
}
skb_trim(skb, length);
/* Display the packet */
IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
xdump(skb->data, skb->len, "RX: ");
printk("\n");)
IF_RX(printk("rx_dle_intr: skb push");)
vcc->push(vcc,skb);
atomic_inc(&vcc->stats->rx);
iadev->rx_pkt_cnt++;
}
INCR_DLE:
if (++dle == iadev->rx_dle_q.end)
dle = iadev->rx_dle_q.start;
}
iadev->rx_dle_q.read = dle;
/* if the interrupts are masked because there were no free desc available,
unmask them now. */
if (!iadev->rxing) {
state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
if (!(state & FREEQ_EMPTY)) {
state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
iadev->reass_reg+REASS_MASK_REG);
iadev->rxing++;
}
}
}
static int open_rx(struct atm_vcc *vcc)
{
IADEV *iadev;
u_short __iomem *vc_table;
u_short __iomem *reass_ptr;
IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
iadev = INPH_IA_DEV(vcc->dev);
if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
if (iadev->phy_type & FE_25MBIT_PHY) {
printk("IA: ABR not support\n");
return -EINVAL;
}
}
/* Make only this VCI in the vc table valid and let all
others be invalid entries */
vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
vc_table += vcc->vci;
/* mask the last 6 bits and OR it with 3 for 1K VCs */
*vc_table = vcc->vci << 6;
/* Also keep a list of open rx vcs so that we can attach them with
incoming PDUs later. */
if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
(vcc->qos.txtp.traffic_class == ATM_ABR))
{
srv_cls_param_t srv_p;
init_abr_vc(iadev, &srv_p);
ia_open_abr_vc(iadev, &srv_p, vcc, 0);
}
else { /* for UBR later may need to add CBR logic */
reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
reass_ptr += vcc->vci;
*reass_ptr = NO_AAL5_PKT;
}
if (iadev->rx_open[vcc->vci])
printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
vcc->dev->number, vcc->vci);
iadev->rx_open[vcc->vci] = vcc;
return 0;
}
static int rx_init(struct atm_dev *dev)
{
IADEV *iadev;
struct rx_buf_desc __iomem *buf_desc_ptr;
unsigned long rx_pkt_start = 0;
void *dle_addr;
struct abr_vc_table *abr_vc_table;
u16 *vc_table;
u16 *reass_table;
int i,j, vcsize_sel;
u_short freeq_st_adr;
u_short *freeq_start;
iadev = INPH_IA_DEV(dev);
// spin_lock_init(&iadev->rx_lock);
/* Allocate 4k bytes - more aligned than needed (4k boundary) */
dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
&iadev->rx_dle_dma);
if (!dle_addr) {
printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
goto err_out;
}
iadev->rx_dle_q.start = (struct dle *)dle_addr;
iadev->rx_dle_q.read = iadev->rx_dle_q.start;
iadev->rx_dle_q.write = iadev->rx_dle_q.start;
iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
/* the end of the dle q points to the entry after the last
DLE that can be used. */
/* write the upper 20 bits of the start address to rx list address register */
/* We know this is 32bit bus addressed so the following is safe */
writel(iadev->rx_dle_dma & 0xfffff000,
iadev->dma + IPHASE5575_RX_LIST_ADDR);
IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
iadev->dma+IPHASE5575_TX_LIST_ADDR,
readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
iadev->dma+IPHASE5575_RX_LIST_ADDR,
readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
writew(0, iadev->reass_reg+MODE_REG);
writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
/* Receive side control memory map
-------------------------------
Buffer descr 0x0000 (736 - 23K)
VP Table 0x5c00 (256 - 512)
Except q 0x5e00 (128 - 512)
Free buffer q 0x6000 (1K - 2K)
Packet comp q 0x6800 (1K - 2K)
Reass Table 0x7000 (1K - 2K)
VC Table 0x7800 (1K - 2K)
ABR VC Table 0x8000 (1K - 32K)
*/
/* Base address for Buffer Descriptor Table */
writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
/* Set the buffer size register */
writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
/* Initialize each entry in the Buffer Descriptor Table */
iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
buf_desc_ptr++;
rx_pkt_start = iadev->rx_pkt_ram;
for(i=1; i<=iadev->num_rx_desc; i++)
{
memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
buf_desc_ptr++;
rx_pkt_start += iadev->rx_buf_sz;
}
IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
i = FREE_BUF_DESC_Q*iadev->memSize;
writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE);
writew(i, iadev->reass_reg+FREEQ_ST_ADR);
writew(i+iadev->num_rx_desc*sizeof(u_short),
iadev->reass_reg+FREEQ_ED_ADR);
writew(i, iadev->reass_reg+FREEQ_RD_PTR);
writew(i+iadev->num_rx_desc*sizeof(u_short),
iadev->reass_reg+FREEQ_WR_PTR);
/* Fill the FREEQ with all the free descriptors. */
freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
for(i=1; i<=iadev->num_rx_desc; i++)
{
*freeq_start = (u_short)i;
freeq_start++;
}
IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
/* Packet Complete Queue */
i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
writew(i, iadev->reass_reg+PCQ_ST_ADR);
writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
writew(i, iadev->reass_reg+PCQ_RD_PTR);
writew(i, iadev->reass_reg+PCQ_WR_PTR);
/* Exception Queue */
i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
iadev->reass_reg+EXCP_Q_ED_ADR);
writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
/* Load local copy of FREEQ and PCQ ptrs */
iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
iadev->rfL.pcq_wr);)
/* just for check - no VP TBL */
/* VP Table */
/* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
/* initialize VP Table for invalid VPIs
- I guess we can write all 1s or 0x000f in the entire memory
space or something similar.
*/
/* This seems to work and looks right to me too !!! */
i = REASS_TABLE * iadev->memSize;
writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
/* initialize Reassembly table to I don't know what ???? */
reass_table = (u16 *)(iadev->reass_ram+i);
j = REASS_TABLE_SZ * iadev->memSize;
for(i=0; i < j; i++)
*reass_table++ = NO_AAL5_PKT;
i = 8*1024;
vcsize_sel = 0;
while (i != iadev->num_vc) {
i /= 2;
vcsize_sel++;
}
i = RX_VC_TABLE * iadev->memSize;
writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
j = RX_VC_TABLE_SZ * iadev->memSize;
for(i = 0; i < j; i++)
{
/* shift the reassembly pointer by 3 + lower 3 bits of
vc_lkup_base register (=3 for 1K VCs) and the last byte
is those low 3 bits.
Shall program this later.
*/
*vc_table = (i << 6) | 15; /* for invalid VCI */
vc_table++;
}
/* ABR VC table */
i = ABR_VC_TABLE * iadev->memSize;
writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
i = ABR_VC_TABLE * iadev->memSize;
abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
j = REASS_TABLE_SZ * iadev->memSize;
memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
for(i = 0; i < j; i++) {
abr_vc_table->rdf = 0x0003;
abr_vc_table->air = 0x5eb1;
abr_vc_table++;
}
/* Initialize other registers */
/* VP Filter Register set for VC Reassembly only */
writew(0xff00, iadev->reass_reg+VP_FILTER);
writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
writew(0x1, iadev->reass_reg+PROTOCOL_ID);
/* Packet Timeout Count related Registers :
Set packet timeout to occur in about 3 seconds
Set Packet Aging Interval count register to overflow in about 4 us
*/
writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
i = (j >> 6) & 0xFF;
j += 2 * (j - 1);
i |= ((j << 2) & 0xFF00);
writew(i, iadev->reass_reg+TMOUT_RANGE);
/* initiate the desc_tble */
for(i=0; i<iadev->num_tx_desc;i++)
iadev->desc_tbl[i].timestamp = 0;
/* to clear the interrupt status register - read it */
readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
/* Mask Register - clear it */
writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
skb_queue_head_init(&iadev->rx_dma_q);
iadev->rx_free_desc_qhead = NULL;
iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
if (!iadev->rx_open) {
printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
dev->number);
goto err_free_dle;
}
iadev->rxing = 1;
iadev->rx_pkt_cnt = 0;
/* Mode Register */
writew(R_ONLINE, iadev->reass_reg+MODE_REG);
return 0;
err_free_dle:
pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
iadev->rx_dle_dma);
err_out:
return -ENOMEM;
}
/*
The memory map suggested in appendix A and the coding for it.
Keeping it around just in case we change our mind later.
Buffer descr 0x0000 (128 - 4K)
UBR sched 0x1000 (1K - 4K)
UBR Wait q 0x2000 (1K - 4K)
Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100)
(128 - 256) each
extended VC 0x4000 (1K - 8K)
ABR sched 0x6000 and ABR wait queue (1K - 2K) each
CBR sched 0x7000 (as needed)
VC table 0x8000 (1K - 32K)
*/
static void tx_intr(struct atm_dev *dev)
{
IADEV *iadev;
unsigned short status;
unsigned long flags;
iadev = INPH_IA_DEV(dev);
status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
if (status & TRANSMIT_DONE){
IF_EVENT(printk("Tansmit Done Intr logic run\n");)
spin_lock_irqsave(&iadev->tx_lock, flags);
ia_tx_poll(iadev);
spin_unlock_irqrestore(&iadev->tx_lock, flags);
writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
if (iadev->close_pending)
wake_up(&iadev->close_wait);
}
if (status & TCQ_NOT_EMPTY)
{
IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
}
}
static void tx_dle_intr(struct atm_dev *dev)
{
IADEV *iadev;
struct dle *dle, *cur_dle;
struct sk_buff *skb;
struct atm_vcc *vcc;
struct ia_vcc *iavcc;
u_int dle_lp;
unsigned long flags;
iadev = INPH_IA_DEV(dev);
spin_lock_irqsave(&iadev->tx_lock, flags);
dle = iadev->tx_dle_q.read;
dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
(sizeof(struct dle)*DLE_ENTRIES - 1);
cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
while (dle != cur_dle)
{
/* free the DMAed skb */
skb = skb_dequeue(&iadev->tx_dma_q);
if (!skb) break;
/* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
PCI_DMA_TODEVICE);
}
vcc = ATM_SKB(skb)->vcc;
if (!vcc) {
printk("tx_dle_intr: vcc is null\n");
spin_unlock_irqrestore(&iadev->tx_lock, flags);
dev_kfree_skb_any(skb);
return;
}
iavcc = INPH_IA_VCC(vcc);
if (!iavcc) {
printk("tx_dle_intr: iavcc is null\n");
spin_unlock_irqrestore(&iadev->tx_lock, flags);
dev_kfree_skb_any(skb);
return;
}
if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
if ((vcc->pop) && (skb->len != 0))
{
vcc->pop(vcc, skb);
}
else {
dev_kfree_skb_any(skb);
}
}
else { /* Hold the rate-limited skb for flow control */
IA_SKB_STATE(skb) |= IA_DLED;
skb_queue_tail(&iavcc->txing_skb, skb);
}
IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
if (++dle == iadev->tx_dle_q.end)
dle = iadev->tx_dle_q.start;
}
iadev->tx_dle_q.read = dle;
spin_unlock_irqrestore(&iadev->tx_lock, flags);
}
static int open_tx(struct atm_vcc *vcc)
{
struct ia_vcc *ia_vcc;
IADEV *iadev;
struct main_vc *vc;
struct ext_vc *evc;
int ret;
IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
iadev = INPH_IA_DEV(vcc->dev);
if (iadev->phy_type & FE_25MBIT_PHY) {
if (vcc->qos.txtp.traffic_class == ATM_ABR) {
printk("IA: ABR not support\n");
return -EINVAL;
}
if (vcc->qos.txtp.traffic_class == ATM_CBR) {
printk("IA: CBR not support\n");
return -EINVAL;
}
}
ia_vcc = INPH_IA_VCC(vcc);
memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
if (vcc->qos.txtp.max_sdu >
(iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
printk("IA: SDU size over (%d) the configured SDU size %d\n",
vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
vcc->dev_data = NULL;
kfree(ia_vcc);
return -EINVAL;
}
ia_vcc->vc_desc_cnt = 0;
ia_vcc->txing = 1;
/* find pcr */
if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
vcc->qos.txtp.pcr = iadev->LineRate;
else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
vcc->qos.txtp.pcr = iadev->LineRate;
else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
if (vcc->qos.txtp.pcr > iadev->LineRate)
vcc->qos.txtp.pcr = iadev->LineRate;
ia_vcc->pcr = vcc->qos.txtp.pcr;
if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr;
if (ia_vcc->pcr < iadev->rate_limit)
skb_queue_head_init (&ia_vcc->txing_skb);
if (ia_vcc->pcr < iadev->rate_limit) {
struct sock *sk = sk_atm(vcc);
if (vcc->qos.txtp.max_sdu != 0) {
if (ia_vcc->pcr > 60000)
sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
else if (ia_vcc->pcr > 2000)
sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
else
sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
}
else
sk->sk_sndbuf = 24576;
}
vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
vc += vcc->vci;
evc += vcc->vci;
memset((caddr_t)vc, 0, sizeof(*vc));
memset((caddr_t)evc, 0, sizeof(*evc));
/* store the most significant 4 bits of vci as the last 4 bits
of first part of atm header.
store the last 12 bits of vci as first 12 bits of the second
part of the atm header.
*/
evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
/* check the following for different traffic classes */
if (vcc->qos.txtp.traffic_class == ATM_UBR)
{
vc->type = UBR;
vc->status = CRC_APPEND;
vc->acr = cellrate_to_float(iadev->LineRate);
if (vcc->qos.txtp.pcr > 0)
vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
vcc->qos.txtp.max_pcr,vc->acr);)
}
else if (vcc->qos.txtp.traffic_class == ATM_ABR)
{ srv_cls_param_t srv_p;
IF_ABR(printk("Tx ABR VCC\n");)
init_abr_vc(iadev, &srv_p);
if (vcc->qos.txtp.pcr > 0)
srv_p.pcr = vcc->qos.txtp.pcr;
if (vcc->qos.txtp.min_pcr > 0) {
int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
if (tmpsum > iadev->LineRate)
return -EBUSY;
srv_p.mcr = vcc->qos.txtp.min_pcr;
iadev->sum_mcr += vcc->qos.txtp.min_pcr;
}
else srv_p.mcr = 0;
if (vcc->qos.txtp.icr)
srv_p.icr = vcc->qos.txtp.icr;
if (vcc->qos.txtp.tbe)
srv_p.tbe = vcc->qos.txtp.tbe;
if (vcc->qos.txtp.frtt)
srv_p.frtt = vcc->qos.txtp.frtt;
if (vcc->qos.txtp.rif)
srv_p.rif = vcc->qos.txtp.rif;
if (vcc->qos.txtp.rdf)
srv_p.rdf = vcc->qos.txtp.rdf;
if (vcc->qos.txtp.nrm_pres)
srv_p.nrm = vcc->qos.txtp.nrm;
if (vcc->qos.txtp.trm_pres)
srv_p.trm = vcc->qos.txtp.trm;
if (vcc->qos.txtp.adtf_pres)
srv_p.adtf = vcc->qos.txtp.adtf;
if (vcc->qos.txtp.cdf_pres)
srv_p.cdf = vcc->qos.txtp.cdf;
if (srv_p.icr > srv_p.pcr)
srv_p.icr = srv_p.pcr;
IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
srv_p.pcr, srv_p.mcr);)
ia_open_abr_vc(iadev, &srv_p, vcc, 1);
} else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
if (iadev->phy_type & FE_25MBIT_PHY) {
printk("IA: CBR not support\n");
return -EINVAL;
}
if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
IF_CBR(printk("PCR is not available\n");)
return -1;
}
vc->type = CBR;
vc->status = CRC_APPEND;
if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
return ret;
}
}
else
printk("iadev: Non UBR, ABR and CBR traffic not supportedn");
iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
IF_EVENT(printk("ia open_tx returning \n");)
return 0;
}
static int tx_init(struct atm_dev *dev)
{
IADEV *iadev;
struct tx_buf_desc *buf_desc_ptr;
unsigned int tx_pkt_start;
void *dle_addr;
int i;
u_short tcq_st_adr;
u_short *tcq_start;
u_short prq_st_adr;
u_short *prq_start;
struct main_vc *vc;
struct ext_vc *evc;
u_short tmp16;
u32 vcsize_sel;
iadev = INPH_IA_DEV(dev);
spin_lock_init(&iadev->tx_lock);
IF_INIT(printk("Tx MASK REG: 0x%0x\n",
readw(iadev->seg_reg+SEG_MASK_REG));)
/* Allocate 4k (boundary aligned) bytes */
dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
&iadev->tx_dle_dma);
if (!dle_addr) {
printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
goto err_out;
}
iadev->tx_dle_q.start = (struct dle*)dle_addr;
iadev->tx_dle_q.read = iadev->tx_dle_q.start;
iadev->tx_dle_q.write = iadev->tx_dle_q.start;
iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
/* write the upper 20 bits of the start address to tx list address register */
writel(iadev->tx_dle_dma & 0xfffff000,
iadev->dma + IPHASE5575_TX_LIST_ADDR);
writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
writew(0, iadev->seg_reg+MODE_REG_0);
writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
/*
Transmit side control memory map
--------------------------------
Buffer descr 0x0000 (128 - 4K)
Commn queues 0x1000 Transmit comp, Packet ready(0x1400)
(512 - 1K) each
TCQ - 4K, PRQ - 5K
CBR Table 0x1800 (as needed) - 6K
UBR Table 0x3000 (1K - 4K) - 12K
UBR Wait queue 0x4000 (1K - 4K) - 16K
ABR sched 0x5000 and ABR wait queue (1K - 2K) each
ABR Tbl - 20K, ABR Wq - 22K
extended VC 0x6000 (1K - 8K) - 24K
VC Table 0x8000 (1K - 32K) - 32K
Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
and Wait q, which can be allotted later.
*/
/* Buffer Descriptor Table Base address */
writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
/* initialize each entry in the buffer descriptor table */
buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
buf_desc_ptr++;
tx_pkt_start = TX_PACKET_RAM;
for(i=1; i<=iadev->num_tx_desc; i++)
{
memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
buf_desc_ptr->desc_mode = AAL5;
buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
buf_desc_ptr++;
tx_pkt_start += iadev->tx_buf_sz;
}
iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
if (!iadev->tx_buf) {
printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
goto err_free_dle;
}
for (i= 0; i< iadev->num_tx_desc; i++)
{
struct cpcs_trailer *cpcs;
cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
if(!cpcs) {
printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
goto err_free_tx_bufs;
}
iadev->tx_buf[i].cpcs = cpcs;
iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
}
iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
sizeof(struct desc_tbl_t), GFP_KERNEL);
if (!iadev->desc_tbl) {
printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
goto err_free_all_tx_bufs;
}
/* Communication Queues base address */
i = TX_COMP_Q * iadev->memSize;
writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
/* Transmit Complete Queue */
writew(i, iadev->seg_reg+TCQ_ST_ADR);
writew(i, iadev->seg_reg+TCQ_RD_PTR);
writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
iadev->seg_reg+TCQ_ED_ADR);
/* Fill the TCQ with all the free descriptors. */
tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
for(i=1; i<=iadev->num_tx_desc; i++)
{
*tcq_start = (u_short)i;
tcq_start++;
}
/* Packet Ready Queue */
i = PKT_RDY_Q * iadev->memSize;
writew(i, iadev->seg_reg+PRQ_ST_ADR);
writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
iadev->seg_reg+PRQ_ED_ADR);
writew(i, iadev->seg_reg+PRQ_RD_PTR);
writew(i, iadev->seg_reg+PRQ_WR_PTR);
/* Load local copy of PRQ and TCQ ptrs */
iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
/* Just for safety initializing the queue to have desc 1 always */
/* Fill the PRQ with all the free descriptors. */
prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
for(i=1; i<=iadev->num_tx_desc; i++)
{
*prq_start = (u_short)0; /* desc 1 in all entries */
prq_start++;
}
/* CBR Table */
IF_INIT(printk("Start CBR Init\n");)
#if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */
writew(0,iadev->seg_reg+CBR_PTR_BASE);
#else /* Charlie's logic is wrong ? */
tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
#endif
IF_INIT(printk("value in register = 0x%x\n",
readw(iadev->seg_reg+CBR_PTR_BASE));)
tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
readw(iadev->seg_reg+CBR_TAB_BEG));)
writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
writew(tmp16, iadev->seg_reg+CBR_TAB_END);
IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
readw(iadev->seg_reg+CBR_TAB_END+1));)
/* Initialize the CBR Schedualing Table */
memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
0, iadev->num_vc*6);
iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
iadev->CbrEntryPt = 0;
iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
iadev->NumEnabledCBR = 0;
/* UBR scheduling Table and wait queue */
/* initialize all bytes of UBR scheduler table and wait queue to 0
- SCHEDSZ is 1K (# of entries).
- UBR Table size is 4K
- UBR wait queue is 4K
since the table and wait queues are contiguous, all the bytes
can be initialized by one memeset.
*/
vcsize_sel = 0;
i = 8*1024;
while (i != iadev->num_vc) {
i /= 2;
vcsize_sel++;
}
i = MAIN_VC_TABLE * iadev->memSize;
writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
i = EXT_VC_TABLE * iadev->memSize;
writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
i = UBR_SCHED_TABLE * iadev->memSize;
writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE);
i = UBR_WAIT_Q * iadev->memSize;
writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE);
memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
0, iadev->num_vc*8);
/* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
/* initialize all bytes of ABR scheduler table and wait queue to 0
- SCHEDSZ is 1K (# of entries).
- ABR Table size is 2K
- ABR wait queue is 2K
since the table and wait queues are contiguous, all the bytes
can be initialized by one memeset.
*/
i = ABR_SCHED_TABLE * iadev->memSize;
writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
i = ABR_WAIT_Q * iadev->memSize;
writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
i = ABR_SCHED_TABLE*iadev->memSize;
memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
if (!iadev->testTable) {
printk("Get freepage failed\n");
goto err_free_desc_tbl;
}
for(i=0; i<iadev->num_vc; i++)
{
memset((caddr_t)vc, 0, sizeof(*vc));
memset((caddr_t)evc, 0, sizeof(*evc));
iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
GFP_KERNEL);
if (!iadev->testTable[i])
goto err_free_test_tables;
iadev->testTable[i]->lastTime = 0;
iadev->testTable[i]->fract = 0;
iadev->testTable[i]->vc_status = VC_UBR;
vc++;
evc++;
}
/* Other Initialization */
/* Max Rate Register */
if (iadev->phy_type & FE_25MBIT_PHY) {
writew(RATE25, iadev->seg_reg+MAXRATE);
writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
}
else {
writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
}
/* Set Idle Header Reigisters to be sure */
writew(0, iadev->seg_reg+IDLEHEADHI);
writew(0, iadev->seg_reg+IDLEHEADLO);
/* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */
writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
iadev->close_pending = 0;
init_waitqueue_head(&iadev->close_wait);
init_waitqueue_head(&iadev->timeout_wait);
skb_queue_head_init(&iadev->tx_dma_q);
ia_init_rtn_q(&iadev->tx_return_q);
/* RM Cell Protocol ID and Message Type */
writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
skb_queue_head_init (&iadev->tx_backlog);
/* Mode Register 1 */
writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
/* Mode Register 0 */
writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
/* Interrupt Status Register - read to clear */
readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
/* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
iadev->tx_pkt_cnt = 0;
iadev->rate_limit = iadev->LineRate / 3;
return 0;
err_free_test_tables:
while (--i >= 0)
kfree(iadev->testTable[i]);
kfree(iadev->testTable);
err_free_desc_tbl:
kfree(iadev->desc_tbl);
err_free_all_tx_bufs:
i = iadev->num_tx_desc;
err_free_tx_bufs:
while (--i >= 0) {
struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
pci_unmap_single(iadev->pci, desc->dma_addr,
sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
kfree(desc->cpcs);
}
kfree(iadev->tx_buf);
err_free_dle:
pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
iadev->tx_dle_dma);
err_out:
return -ENOMEM;
}
static irqreturn_t ia_int(int irq, void *dev_id)
{
struct atm_dev *dev;
IADEV *iadev;
unsigned int status;
int handled = 0;
dev = dev_id;
iadev = INPH_IA_DEV(dev);
while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
{
handled = 1;
IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
if (status & STAT_REASSINT)
{
/* do something */
IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
rx_intr(dev);
}
if (status & STAT_DLERINT)
{
/* Clear this bit by writing a 1 to it. */
writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
rx_dle_intr(dev);
}
if (status & STAT_SEGINT)
{
/* do something */
IF_EVENT(printk("IA: tx_intr \n");)
tx_intr(dev);
}
if (status & STAT_DLETINT)
{
writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
tx_dle_intr(dev);
}
if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
{
if (status & STAT_FEINT)
ia_frontend_intr(iadev);
}
}
return IRQ_RETVAL(handled);
}
/*----------------------------- entries --------------------------------*/
static int get_esi(struct atm_dev *dev)
{
IADEV *iadev;
int i;
u32 mac1;
u16 mac2;
iadev = INPH_IA_DEV(dev);
mac1 = cpu_to_be32(le32_to_cpu(readl(
iadev->reg+IPHASE5575_MAC1)));
mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
for (i=0; i<MAC1_LEN; i++)
dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
for (i=0; i<MAC2_LEN; i++)
dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
return 0;
}
static int reset_sar(struct atm_dev *dev)
{
IADEV *iadev;
int i, error = 1;
unsigned int pci[64];
iadev = INPH_IA_DEV(dev);
for(i=0; i<64; i++)
if ((error = pci_read_config_dword(iadev->pci,
i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
return error;
writel(0, iadev->reg+IPHASE5575_EXT_RESET);
for(i=0; i<64; i++)
if ((error = pci_write_config_dword(iadev->pci,
i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
return error;
udelay(5);
return 0;
}
static int __devinit ia_init(struct atm_dev *dev)
{
IADEV *iadev;
unsigned long real_base;
void __iomem *base;
unsigned short command;
int error, i;
/* The device has been identified and registered. Now we read
necessary configuration info like memory base address,
interrupt number etc */
IF_INIT(printk(">ia_init\n");)
dev->ci_range.vpi_bits = 0;
dev->ci_range.vci_bits = NR_VCI_LD;
iadev = INPH_IA_DEV(dev);
real_base = pci_resource_start (iadev->pci, 0);
iadev->irq = iadev->pci->irq;
error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
if (error) {
printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
dev->number,error);
return -EINVAL;
}
IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
dev->number, iadev->pci->revision, real_base, iadev->irq);)
/* find mapping size of board */
iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
if (iadev->pci_map_size == 0x100000){
iadev->num_vc = 4096;
dev->ci_range.vci_bits = NR_VCI_4K_LD;
iadev->memSize = 4;
}
else if (iadev->pci_map_size == 0x40000) {
iadev->num_vc = 1024;
iadev->memSize = 1;
}
else {
printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
return -EINVAL;
}
IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
/* enable bus mastering */
pci_set_master(iadev->pci);
/*
* Delay at least 1us before doing any mem accesses (how 'bout 10?)
*/
udelay(10);
/* mapping the physical address to a virtual address in address space */
base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */
if (!base)
{
printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
dev->number);
return error;
}
IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
dev->number, iadev->pci->revision, base, iadev->irq);)
/* filling the iphase dev structure */
iadev->mem = iadev->pci_map_size /2;
iadev->real_base = real_base;
iadev->base = base;
/* Bus Interface Control Registers */
iadev->reg = base + REG_BASE;
/* Segmentation Control Registers */
iadev->seg_reg = base + SEG_BASE;
/* Reassembly Control Registers */
iadev->reass_reg = base + REASS_BASE;
/* Front end/ DMA control registers */
iadev->phy = base + PHY_BASE;
iadev->dma = base + PHY_BASE;
/* RAM - Segmentation RAm and Reassembly RAM */
iadev->ram = base + ACTUAL_RAM_BASE;
iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
/* lets print out the above */
IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
iadev->reg,iadev->seg_reg,iadev->reass_reg,
iadev->phy, iadev->ram, iadev->seg_ram,
iadev->reass_ram);)
/* lets try reading the MAC address */
error = get_esi(dev);
if (error) {
iounmap(iadev->base);
return error;
}
printk("IA: ");
for (i=0; i < ESI_LEN; i++)
printk("%s%02X",i ? "-" : "",dev->esi[i]);
printk("\n");
/* reset SAR */
if (reset_sar(dev)) {
iounmap(iadev->base);
printk("IA: reset SAR fail, please try again\n");
return 1;
}
return 0;
}
static void ia_update_stats(IADEV *iadev) {
if (!iadev->carrier_detect)
return;
iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
return;
}
static void ia_led_timer(unsigned long arg) {
unsigned long flags;
static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
u_char i;
static u32 ctrl_reg;
for (i = 0; i < iadev_count; i++) {
if (ia_dev[i]) {
ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
if (blinking[i] == 0) {
blinking[i]++;
ctrl_reg &= (~CTRL_LED);
writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
ia_update_stats(ia_dev[i]);
}
else {
blinking[i] = 0;
ctrl_reg |= CTRL_LED;
writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
if (ia_dev[i]->close_pending)
wake_up(&ia_dev[i]->close_wait);
ia_tx_poll(ia_dev[i]);
spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
}
}
}
mod_timer(&ia_timer, jiffies + HZ / 4);
return;
}
static void ia_phy_put(struct atm_dev *dev, unsigned char value,
unsigned long addr)
{
writel(value, INPH_IA_DEV(dev)->phy+addr);
}
static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
{
return readl(INPH_IA_DEV(dev)->phy+addr);
}
static void ia_free_tx(IADEV *iadev)
{
int i;
kfree(iadev->desc_tbl);
for (i = 0; i < iadev->num_vc; i++)
kfree(iadev->testTable[i]);
kfree(iadev->testTable);
for (i = 0; i < iadev->num_tx_desc; i++) {
struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
pci_unmap_single(iadev->pci, desc->dma_addr,
sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
kfree(desc->cpcs);
}
kfree(iadev->tx_buf);
pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
iadev->tx_dle_dma);
}
static void ia_free_rx(IADEV *iadev)
{
kfree(iadev->rx_open);
pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
iadev->rx_dle_dma);
}
static int __devinit ia_start(struct atm_dev *dev)
{
IADEV *iadev;
int error;
unsigned char phy;
u32 ctrl_reg;
IF_EVENT(printk(">ia_start\n");)
iadev = INPH_IA_DEV(dev);
if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
dev->number, iadev->irq);
error = -EAGAIN;
goto err_out;
}
/* @@@ should release IRQ on error */
/* enabling memory + master */
if ((error = pci_write_config_word(iadev->pci,
PCI_COMMAND,
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
{
printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
"master (0x%x)\n",dev->number, error);
error = -EIO;
goto err_free_irq;
}
udelay(10);
/* Maybe we should reset the front end, initialize Bus Interface Control
Registers and see. */
IF_INIT(printk("Bus ctrl reg: %08x\n",
readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
| CTRL_B8
| CTRL_B16
| CTRL_B32
| CTRL_B48
| CTRL_B64
| CTRL_B128
| CTRL_ERRMASK
| CTRL_DLETMASK /* shud be removed l8r */
| CTRL_DLERMASK
| CTRL_SEGMASK
| CTRL_REASSMASK
| CTRL_FEMASK
| CTRL_CSPREEMPT;
writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
printk("Bus status reg after init: %08x\n",
readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
ia_hw_type(iadev);
error = tx_init(dev);
if (error)
goto err_free_irq;
error = rx_init(dev);
if (error)
goto err_free_tx;
ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
phy = 0; /* resolve compiler complaint */
IF_INIT (
if ((phy=ia_phy_get(dev,0)) == 0x30)
printk("IA: pm5346,rev.%d\n",phy&0x0f);
else
printk("IA: utopia,rev.%0x\n",phy);)
if (iadev->phy_type & FE_25MBIT_PHY)
ia_mb25_init(iadev);
else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
ia_suni_pm7345_init(iadev);
else {
error = suni_init(dev);
if (error)
goto err_free_rx;
if (dev->phy->start) {
error = dev->phy->start(dev);
if (error)
goto err_free_rx;
}
/* Get iadev->carrier_detect status */
ia_frontend_intr(iadev);
}
return 0;
err_free_rx:
ia_free_rx(iadev);
err_free_tx:
ia_free_tx(iadev);
err_free_irq:
free_irq(iadev->irq, dev);
err_out:
return error;
}
static void ia_close(struct atm_vcc *vcc)
{
DEFINE_WAIT(wait);
u16 *vc_table;
IADEV *iadev;
struct ia_vcc *ia_vcc;
struct sk_buff *skb = NULL;
struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
unsigned long closetime, flags;
iadev = INPH_IA_DEV(vcc->dev);
ia_vcc = INPH_IA_VCC(vcc);
if (!ia_vcc) return;
IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n",
ia_vcc->vc_desc_cnt,vcc->vci);)
clear_bit(ATM_VF_READY,&vcc->flags);
skb_queue_head_init (&tmp_tx_backlog);
skb_queue_head_init (&tmp_vcc_backlog);
if (vcc->qos.txtp.traffic_class != ATM_NONE) {
iadev->close_pending++;
prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
schedule_timeout(50);
finish_wait(&iadev->timeout_wait, &wait);
spin_lock_irqsave(&iadev->tx_lock, flags);
while((skb = skb_dequeue(&iadev->tx_backlog))) {
if (ATM_SKB(skb)->vcc == vcc){
if (vcc->pop) vcc->pop(vcc, skb);
else dev_kfree_skb_any(skb);
}
else
skb_queue_tail(&tmp_tx_backlog, skb);
}
while((skb = skb_dequeue(&tmp_tx_backlog)))
skb_queue_tail(&iadev->tx_backlog, skb);
IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
closetime = 300000 / ia_vcc->pcr;
if (closetime == 0)
closetime = 1;
spin_unlock_irqrestore(&iadev->tx_lock, flags);
wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
spin_lock_irqsave(&iadev->tx_lock, flags);
iadev->close_pending--;
iadev->testTable[vcc->vci]->lastTime = 0;
iadev->testTable[vcc->vci]->fract = 0;
iadev->testTable[vcc->vci]->vc_status = VC_UBR;
if (vcc->qos.txtp.traffic_class == ATM_ABR) {
if (vcc->qos.txtp.min_pcr > 0)
iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
}
if (vcc->qos.txtp.traffic_class == ATM_CBR) {
ia_vcc = INPH_IA_VCC(vcc);
iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
ia_cbrVc_close (vcc);
}
spin_unlock_irqrestore(&iadev->tx_lock, flags);
}
if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
// reset reass table
vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
vc_table += vcc->vci;
*vc_table = NO_AAL5_PKT;
// reset vc table
vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
vc_table += vcc->vci;
*vc_table = (vcc->vci << 6) | 15;
if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
struct abr_vc_table __iomem *abr_vc_table =
(iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
abr_vc_table += vcc->vci;
abr_vc_table->rdf = 0x0003;
abr_vc_table->air = 0x5eb1;
}
// Drain the packets
rx_dle_intr(vcc->dev);
iadev->rx_open[vcc->vci] = NULL;
}
kfree(INPH_IA_VCC(vcc));
ia_vcc = NULL;
vcc->dev_data = NULL;
clear_bit(ATM_VF_ADDR,&vcc->flags);
return;
}
static int ia_open(struct atm_vcc *vcc)
{
struct ia_vcc *ia_vcc;
int error;
if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
{
IF_EVENT(printk("ia: not partially allocated resources\n");)
vcc->dev_data = NULL;
}
if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
{
IF_EVENT(printk("iphase open: unspec part\n");)
set_bit(ATM_VF_ADDR,&vcc->flags);
}
if (vcc->qos.aal != ATM_AAL5)
return -EINVAL;
IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
vcc->dev->number, vcc->vpi, vcc->vci);)
/* Device dependent initialization */
ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
if (!ia_vcc) return -ENOMEM;
vcc->dev_data = ia_vcc;
if ((error = open_rx(vcc)))
{
IF_EVENT(printk("iadev: error in open_rx, closing\n");)
ia_close(vcc);
return error;
}
if ((error = open_tx(vcc)))
{
IF_EVENT(printk("iadev: error in open_tx, closing\n");)
ia_close(vcc);
return error;
}
set_bit(ATM_VF_READY,&vcc->flags);
#if 0
{
static u8 first = 1;
if (first) {
ia_timer.expires = jiffies + 3*HZ;
add_timer(&ia_timer);
first = 0;
}
}
#endif
IF_EVENT(printk("ia open returning\n");)
return 0;
}
static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
{
IF_EVENT(printk(">ia_change_qos\n");)
return 0;
}
static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
{
IA_CMDBUF ia_cmds;
IADEV *iadev;
int i, board;
u16 __user *tmps;
IF_EVENT(printk(">ia_ioctl\n");)
if (cmd != IA_CMD) {
if (!dev->phy->ioctl) return -EINVAL;
return dev->phy->ioctl(dev,cmd,arg);
}
if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
board = ia_cmds.status;
if ((board < 0) || (board > iadev_count))
board = 0;
iadev = ia_dev[board];
switch (ia_cmds.cmd) {
case MEMDUMP:
{
switch (ia_cmds.sub_cmd) {
case MEMDUMP_DEV:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
return -EFAULT;
ia_cmds.status = 0;
break;
case MEMDUMP_SEGREG:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
tmps = (u16 __user *)ia_cmds.buf;
for(i=0; i<0x80; i+=2, tmps++)
if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
ia_cmds.status = 0;
ia_cmds.len = 0x80;
break;
case MEMDUMP_REASSREG:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
tmps = (u16 __user *)ia_cmds.buf;
for(i=0; i<0x80; i+=2, tmps++)
if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
ia_cmds.status = 0;
ia_cmds.len = 0x80;
break;
case MEMDUMP_FFL:
{
ia_regs_t *regs_local;
ffredn_t *ffL;
rfredn_t *rfL;
if (!capable(CAP_NET_ADMIN)) return -EPERM;
regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
if (!regs_local) return -ENOMEM;
ffL = ®s_local->ffredn;
rfL = ®s_local->rfredn;
/* Copy real rfred registers into the local copy */
for (i=0; i<(sizeof (rfredn_t))/4; i++)
((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
/* Copy real ffred registers into the local copy */
for (i=0; i<(sizeof (ffredn_t))/4; i++)
((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
kfree(regs_local);
return -EFAULT;
}
kfree(regs_local);
printk("Board %d registers dumped\n", board);
ia_cmds.status = 0;
}
break;
case READ_REG:
{
if (!capable(CAP_NET_ADMIN)) return -EPERM;
desc_dbg(iadev);
ia_cmds.status = 0;
}
break;
case 0x6:
{
ia_cmds.status = 0;
printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
}
break;
case 0x8:
{
struct k_sonet_stats *stats;
stats = &PRIV(_ia_dev[board])->sonet_stats;
printk("section_bip: %d\n", atomic_read(&stats->section_bip));
printk("line_bip : %d\n", atomic_read(&stats->line_bip));
printk("path_bip : %d\n", atomic_read(&stats->path_bip));
printk("line_febe : %d\n", atomic_read(&stats->line_febe));
printk("path_febe : %d\n", atomic_read(&stats->path_febe));
printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
}
ia_cmds.status = 0;
break;
case 0x9:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
for (i = 1; i <= iadev->num_rx_desc; i++)
free_desc(_ia_dev[board], i);
writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
iadev->reass_reg+REASS_MASK_REG);
iadev->rxing = 1;
ia_cmds.status = 0;
break;
case 0xb:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
ia_frontend_intr(iadev);
break;
case 0xa:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
{
ia_cmds.status = 0;
IADebugFlag = ia_cmds.maddr;
printk("New debug option loaded\n");
}
break;
default:
ia_cmds.status = 0;
break;
}
}
break;
default:
break;
}
return 0;
}
static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
void __user *optval, int optlen)
{
IF_EVENT(printk(">ia_getsockopt\n");)
return -EINVAL;
}
static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
void __user *optval, unsigned int optlen)
{
IF_EVENT(printk(">ia_setsockopt\n");)
return -EINVAL;
}
static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
IADEV *iadev;
struct dle *wr_ptr;
struct tx_buf_desc __iomem *buf_desc_ptr;
int desc;
int comp_code;
int total_len;
struct cpcs_trailer *trailer;
struct ia_vcc *iavcc;
iadev = INPH_IA_DEV(vcc->dev);
iavcc = INPH_IA_VCC(vcc);
if (!iavcc->txing) {
printk("discard packet on closed VC\n");
if (vcc->pop)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
return 0;
}
if (skb->len > iadev->tx_buf_sz - 8) {
printk("Transmit size over tx buffer size\n");
if (vcc->pop)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
return 0;
}
if ((unsigned long)skb->data & 3) {
printk("Misaligned SKB\n");
if (vcc->pop)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
return 0;
}
/* Get a descriptor number from our free descriptor queue
We get the descr number from the TCQ now, since I am using
the TCQ as a free buffer queue. Initially TCQ will be
initialized with all the descriptors and is hence, full.
*/
desc = get_desc (iadev, iavcc);
if (desc == 0xffff)
return 1;
comp_code = desc >> 13;
desc &= 0x1fff;
if ((desc == 0) || (desc > iadev->num_tx_desc))
{
IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
atomic_inc(&vcc->stats->tx);
if (vcc->pop)
vcc->pop(vcc, skb);
else
dev_kfree_skb_any(skb);
return 0; /* return SUCCESS */
}
if (comp_code)
{
IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
desc, comp_code);)
}
/* remember the desc and vcc mapping */
iavcc->vc_desc_cnt++;
iadev->desc_tbl[desc-1].iavcc = iavcc;
iadev->desc_tbl[desc-1].txskb = skb;
IA_SKB_STATE(skb) = 0;
iadev->ffL.tcq_rd += 2;
if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
iadev->ffL.tcq_rd = iadev->ffL.tcq_st;
writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
/* Put the descriptor number in the packet ready queue
and put the updated write pointer in the DLE field
*/
*(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
iadev->ffL.prq_wr += 2;
if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
iadev->ffL.prq_wr = iadev->ffL.prq_st;
/* Figure out the exact length of the packet and padding required to
make it aligned on a 48 byte boundary. */
total_len = skb->len + sizeof(struct cpcs_trailer);
total_len = ((total_len + 47) / 48) * 48;
IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
/* Put the packet in a tx buffer */
trailer = iadev->tx_buf[desc-1].cpcs;
IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
skb, skb->data, skb->len, desc);)
trailer->control = 0;
/*big endian*/
trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
trailer->crc32 = 0; /* not needed - dummy bytes */
/* Display the packet */
IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
skb->len, tcnter++);
xdump(skb->data, skb->len, "TX: ");
printk("\n");)
/* Build the buffer descriptor */
buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
buf_desc_ptr += desc; /* points to the corresponding entry */
buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
/* Huh ? p.115 of users guide describes this as a read-only register */
writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
buf_desc_ptr->vc_index = vcc->vci;
buf_desc_ptr->bytes = total_len;
if (vcc->qos.txtp.traffic_class == ATM_ABR)
clear_lockup (vcc, iadev);
/* Build the DLE structure */
wr_ptr = iadev->tx_dle_q.write;
memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
skb->len, PCI_DMA_TODEVICE);
wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
buf_desc_ptr->buf_start_lo;
/* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
wr_ptr->bytes = skb->len;
/* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
if ((wr_ptr->bytes >> 2) == 0xb)
wr_ptr->bytes = 0x30;
wr_ptr->mode = TX_DLE_PSI;
wr_ptr->prq_wr_ptr_data = 0;
/* end is not to be used for the DLE q */
if (++wr_ptr == iadev->tx_dle_q.end)
wr_ptr = iadev->tx_dle_q.start;
/* Build trailer dle */
wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
wr_ptr->bytes = sizeof(struct cpcs_trailer);
wr_ptr->mode = DMA_INT_ENABLE;
wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
/* end is not to be used for the DLE q */
if (++wr_ptr == iadev->tx_dle_q.end)
wr_ptr = iadev->tx_dle_q.start;
iadev->tx_dle_q.write = wr_ptr;
ATM_DESC(skb) = vcc->vci;
skb_queue_tail(&iadev->tx_dma_q, skb);
atomic_inc(&vcc->stats->tx);
iadev->tx_pkt_cnt++;
/* Increment transaction counter */
writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
#if 0
/* add flow control logic */
if (atomic_read(&vcc->stats->tx) % 20 == 0) {
if (iavcc->vc_desc_cnt > 10) {
vcc->tx_quota = vcc->tx_quota * 3 / 4;
printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
iavcc->flow_inc = -1;
iavcc->saved_tx_quota = vcc->tx_quota;
} else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
// vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
iavcc->flow_inc = 0;
}
}
#endif
IF_TX(printk("ia send done\n");)
return 0;
}
static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
IADEV *iadev;
unsigned long flags;
iadev = INPH_IA_DEV(vcc->dev);
if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
{
if (!skb)
printk(KERN_CRIT "null skb in ia_send\n");
else dev_kfree_skb_any(skb);
return -EINVAL;
}
spin_lock_irqsave(&iadev->tx_lock, flags);
if (!test_bit(ATM_VF_READY,&vcc->flags)){
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&iadev->tx_lock, flags);
return -EINVAL;
}
ATM_SKB(skb)->vcc = vcc;
if (skb_peek(&iadev->tx_backlog)) {
skb_queue_tail(&iadev->tx_backlog, skb);
}
else {
if (ia_pkt_tx (vcc, skb)) {
skb_queue_tail(&iadev->tx_backlog, skb);
}
}
spin_unlock_irqrestore(&iadev->tx_lock, flags);
return 0;
}
static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
{
int left = *pos, n;
char *tmpPtr;
IADEV *iadev = INPH_IA_DEV(dev);
if(!left--) {
if (iadev->phy_type == FE_25MBIT_PHY) {
n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n");
return n;
}
if (iadev->phy_type == FE_DS3_PHY)
n = sprintf(page, " Board Type : Iphase-ATM-DS3");
else if (iadev->phy_type == FE_E3_PHY)
n = sprintf(page, " Board Type : Iphase-ATM-E3");
else if (iadev->phy_type == FE_UTP_OPTION)
n = sprintf(page, " Board Type : Iphase-ATM-UTP155");
else
n = sprintf(page, " Board Type : Iphase-ATM-OC3");
tmpPtr = page + n;
if (iadev->pci_map_size == 0x40000)
n += sprintf(tmpPtr, "-1KVC-");
else
n += sprintf(tmpPtr, "-4KVC-");
tmpPtr = page + n;
if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
n += sprintf(tmpPtr, "1M \n");
else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
n += sprintf(tmpPtr, "512K\n");
else
n += sprintf(tmpPtr, "128K\n");
return n;
}
if (!left) {
return sprintf(page, " Number of Tx Buffer: %u\n"
" Size of Tx Buffer : %u\n"
" Number of Rx Buffer: %u\n"
" Size of Rx Buffer : %u\n"
" Packets Receiverd : %u\n"
" Packets Transmitted: %u\n"
" Cells Received : %u\n"
" Cells Transmitted : %u\n"
" Board Dropped Cells: %u\n"
" Board Dropped Pkts : %u\n",
iadev->num_tx_desc, iadev->tx_buf_sz,
iadev->num_rx_desc, iadev->rx_buf_sz,
iadev->rx_pkt_cnt, iadev->tx_pkt_cnt,
iadev->rx_cell_cnt, iadev->tx_cell_cnt,
iadev->drop_rxcell, iadev->drop_rxpkt);
}
return 0;
}
static const struct atmdev_ops ops = {
.open = ia_open,
.close = ia_close,
.ioctl = ia_ioctl,
.getsockopt = ia_getsockopt,
.setsockopt = ia_setsockopt,
.send = ia_send,
.phy_put = ia_phy_put,
.phy_get = ia_phy_get,
.change_qos = ia_change_qos,
.proc_read = ia_proc_read,
.owner = THIS_MODULE,
};
static int __devinit ia_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct atm_dev *dev;
IADEV *iadev;
int ret;
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
if (!iadev) {
ret = -ENOMEM;
goto err_out;
}
iadev->pci = pdev;
IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
if (pci_enable_device(pdev)) {
ret = -ENODEV;
goto err_out_free_iadev;
}
dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
if (!dev) {
ret = -ENOMEM;
goto err_out_disable_dev;
}
dev->dev_data = iadev;
IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
iadev->LineRate);)
pci_set_drvdata(pdev, dev);
ia_dev[iadev_count] = iadev;
_ia_dev[iadev_count] = dev;
iadev_count++;
if (ia_init(dev) || ia_start(dev)) {
IF_INIT(printk("IA register failed!\n");)
iadev_count--;
ia_dev[iadev_count] = NULL;
_ia_dev[iadev_count] = NULL;
ret = -EINVAL;
goto err_out_deregister_dev;
}
IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
iadev->next_board = ia_boards;
ia_boards = dev;
return 0;
err_out_deregister_dev:
atm_dev_deregister(dev);
err_out_disable_dev:
pci_disable_device(pdev);
err_out_free_iadev:
kfree(iadev);
err_out:
return ret;
}
static void __devexit ia_remove_one(struct pci_dev *pdev)
{
struct atm_dev *dev = pci_get_drvdata(pdev);
IADEV *iadev = INPH_IA_DEV(dev);
/* Disable phy interrupts */
ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
SUNI_RSOP_CIE);
udelay(1);
if (dev->phy && dev->phy->stop)
dev->phy->stop(dev);
/* De-register device */
free_irq(iadev->irq, dev);
iadev_count--;
ia_dev[iadev_count] = NULL;
_ia_dev[iadev_count] = NULL;
IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
atm_dev_deregister(dev);
iounmap(iadev->base);
pci_disable_device(pdev);
ia_free_rx(iadev);
ia_free_tx(iadev);
kfree(iadev);
}
static struct pci_device_id ia_pci_tbl[] = {
{ PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
static struct pci_driver ia_driver = {
.name = DEV_LABEL,
.id_table = ia_pci_tbl,
.probe = ia_init_one,
.remove = __devexit_p(ia_remove_one),
};
static int __init ia_module_init(void)
{
int ret;
ret = pci_register_driver(&ia_driver);
if (ret >= 0) {
ia_timer.expires = jiffies + 3*HZ;
add_timer(&ia_timer);
} else
printk(KERN_ERR DEV_LABEL ": no adapter found\n");
return ret;
}
static void __exit ia_module_exit(void)
{
pci_unregister_driver(&ia_driver);
del_timer(&ia_timer);
}
module_init(ia_module_init);
module_exit(ia_module_exit);
| gpl-2.0 |
stev47/linux | drivers/input/ff-core.c | 4858 | 9133 | /*
* Force feedback support for Linux input subsystem
*
* Copyright (c) 2006 Anssi Hannula <anssi.hannula@gmail.com>
* Copyright (c) 2006 Dmitry Torokhov <dtor@mail.ru>
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* #define DEBUG */
#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
#include <linux/input.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/slab.h>
/*
* Check that the effect_id is a valid effect and whether the user
* is the owner
*/
static int check_effect_access(struct ff_device *ff, int effect_id,
struct file *file)
{
if (effect_id < 0 || effect_id >= ff->max_effects ||
!ff->effect_owners[effect_id])
return -EINVAL;
if (file && ff->effect_owners[effect_id] != file)
return -EACCES;
return 0;
}
/*
* Checks whether 2 effects can be combined together
*/
static inline int check_effects_compatible(struct ff_effect *e1,
struct ff_effect *e2)
{
return e1->type == e2->type &&
(e1->type != FF_PERIODIC ||
e1->u.periodic.waveform == e2->u.periodic.waveform);
}
/*
* Convert an effect into compatible one
*/
static int compat_effect(struct ff_device *ff, struct ff_effect *effect)
{
int magnitude;
switch (effect->type) {
case FF_RUMBLE:
if (!test_bit(FF_PERIODIC, ff->ffbit))
return -EINVAL;
/*
* calculate manginude of sine wave as average of rumble's
* 2/3 of strong magnitude and 1/3 of weak magnitude
*/
magnitude = effect->u.rumble.strong_magnitude / 3 +
effect->u.rumble.weak_magnitude / 6;
effect->type = FF_PERIODIC;
effect->u.periodic.waveform = FF_SINE;
effect->u.periodic.period = 50;
effect->u.periodic.magnitude = max(magnitude, 0x7fff);
effect->u.periodic.offset = 0;
effect->u.periodic.phase = 0;
effect->u.periodic.envelope.attack_length = 0;
effect->u.periodic.envelope.attack_level = 0;
effect->u.periodic.envelope.fade_length = 0;
effect->u.periodic.envelope.fade_level = 0;
return 0;
default:
/* Let driver handle conversion */
return 0;
}
}
/**
* input_ff_upload() - upload effect into force-feedback device
* @dev: input device
* @effect: effect to be uploaded
* @file: owner of the effect
*/
int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
struct file *file)
{
struct ff_device *ff = dev->ff;
struct ff_effect *old;
int ret = 0;
int id;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
if (effect->type < FF_EFFECT_MIN || effect->type > FF_EFFECT_MAX ||
!test_bit(effect->type, dev->ffbit)) {
pr_debug("invalid or not supported effect type in upload\n");
return -EINVAL;
}
if (effect->type == FF_PERIODIC &&
(effect->u.periodic.waveform < FF_WAVEFORM_MIN ||
effect->u.periodic.waveform > FF_WAVEFORM_MAX ||
!test_bit(effect->u.periodic.waveform, dev->ffbit))) {
pr_debug("invalid or not supported wave form in upload\n");
return -EINVAL;
}
if (!test_bit(effect->type, ff->ffbit)) {
ret = compat_effect(ff, effect);
if (ret)
return ret;
}
mutex_lock(&ff->mutex);
if (effect->id == -1) {
for (id = 0; id < ff->max_effects; id++)
if (!ff->effect_owners[id])
break;
if (id >= ff->max_effects) {
ret = -ENOSPC;
goto out;
}
effect->id = id;
old = NULL;
} else {
id = effect->id;
ret = check_effect_access(ff, id, file);
if (ret)
goto out;
old = &ff->effects[id];
if (!check_effects_compatible(effect, old)) {
ret = -EINVAL;
goto out;
}
}
ret = ff->upload(dev, effect, old);
if (ret)
goto out;
spin_lock_irq(&dev->event_lock);
ff->effects[id] = *effect;
ff->effect_owners[id] = file;
spin_unlock_irq(&dev->event_lock);
out:
mutex_unlock(&ff->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(input_ff_upload);
/*
* Erases the effect if the requester is also the effect owner. The mutex
* should already be locked before calling this function.
*/
static int erase_effect(struct input_dev *dev, int effect_id,
struct file *file)
{
struct ff_device *ff = dev->ff;
int error;
error = check_effect_access(ff, effect_id, file);
if (error)
return error;
spin_lock_irq(&dev->event_lock);
ff->playback(dev, effect_id, 0);
ff->effect_owners[effect_id] = NULL;
spin_unlock_irq(&dev->event_lock);
if (ff->erase) {
error = ff->erase(dev, effect_id);
if (error) {
spin_lock_irq(&dev->event_lock);
ff->effect_owners[effect_id] = file;
spin_unlock_irq(&dev->event_lock);
return error;
}
}
return 0;
}
/**
* input_ff_erase - erase a force-feedback effect from device
* @dev: input device to erase effect from
* @effect_id: id of the ffect to be erased
* @file: purported owner of the request
*
* This function erases a force-feedback effect from specified device.
* The effect will only be erased if it was uploaded through the same
* file handle that is requesting erase.
*/
int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file)
{
struct ff_device *ff = dev->ff;
int ret;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
mutex_lock(&ff->mutex);
ret = erase_effect(dev, effect_id, file);
mutex_unlock(&ff->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(input_ff_erase);
/*
* flush_effects - erase all effects owned by a file handle
*/
static int flush_effects(struct input_dev *dev, struct file *file)
{
struct ff_device *ff = dev->ff;
int i;
pr_debug("flushing now\n");
mutex_lock(&ff->mutex);
for (i = 0; i < ff->max_effects; i++)
erase_effect(dev, i, file);
mutex_unlock(&ff->mutex);
return 0;
}
/**
* input_ff_event() - generic handler for force-feedback events
* @dev: input device to send the effect to
* @type: event type (anything but EV_FF is ignored)
* @code: event code
* @value: event value
*/
int input_ff_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
struct ff_device *ff = dev->ff;
if (type != EV_FF)
return 0;
switch (code) {
case FF_GAIN:
if (!test_bit(FF_GAIN, dev->ffbit) || value > 0xffff)
break;
ff->set_gain(dev, value);
break;
case FF_AUTOCENTER:
if (!test_bit(FF_AUTOCENTER, dev->ffbit) || value > 0xffff)
break;
ff->set_autocenter(dev, value);
break;
default:
if (check_effect_access(ff, code, NULL) == 0)
ff->playback(dev, code, value);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_event);
/**
* input_ff_create() - create force-feedback device
* @dev: input device supporting force-feedback
* @max_effects: maximum number of effects supported by the device
*
* This function allocates all necessary memory for a force feedback
* portion of an input device and installs all default handlers.
* @dev->ffbit should be already set up before calling this function.
* Once ff device is created you need to setup its upload, erase,
* playback and other handlers before registering input device
*/
int input_ff_create(struct input_dev *dev, unsigned int max_effects)
{
struct ff_device *ff;
size_t ff_dev_size;
int i;
if (!max_effects) {
pr_err("cannot allocate device without any effects\n");
return -EINVAL;
}
ff_dev_size = sizeof(struct ff_device) +
max_effects * sizeof(struct file *);
if (ff_dev_size < max_effects) /* overflow */
return -EINVAL;
ff = kzalloc(ff_dev_size, GFP_KERNEL);
if (!ff)
return -ENOMEM;
ff->effects = kcalloc(max_effects, sizeof(struct ff_effect),
GFP_KERNEL);
if (!ff->effects) {
kfree(ff);
return -ENOMEM;
}
ff->max_effects = max_effects;
mutex_init(&ff->mutex);
dev->ff = ff;
dev->flush = flush_effects;
dev->event = input_ff_event;
__set_bit(EV_FF, dev->evbit);
/* Copy "true" bits into ff device bitmap */
for (i = 0; i <= FF_MAX; i++)
if (test_bit(i, dev->ffbit))
__set_bit(i, ff->ffbit);
/* we can emulate RUMBLE with periodic effects */
if (test_bit(FF_PERIODIC, ff->ffbit))
__set_bit(FF_RUMBLE, dev->ffbit);
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_create);
/**
* input_ff_destroy() - frees force feedback portion of input device
* @dev: input device supporting force feedback
*
* This function is only needed in error path as input core will
* automatically free force feedback structures when device is
* destroyed.
*/
void input_ff_destroy(struct input_dev *dev)
{
struct ff_device *ff = dev->ff;
__clear_bit(EV_FF, dev->evbit);
if (ff) {
if (ff->destroy)
ff->destroy(ff);
kfree(ff->private);
kfree(ff->effects);
kfree(ff);
dev->ff = NULL;
}
}
EXPORT_SYMBOL_GPL(input_ff_destroy);
| gpl-2.0 |
ktoonsez/KTManta | drivers/input/ff-core.c | 4858 | 9133 | /*
* Force feedback support for Linux input subsystem
*
* Copyright (c) 2006 Anssi Hannula <anssi.hannula@gmail.com>
* Copyright (c) 2006 Dmitry Torokhov <dtor@mail.ru>
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* #define DEBUG */
#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
#include <linux/input.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/slab.h>
/*
* Check that the effect_id is a valid effect and whether the user
* is the owner
*/
static int check_effect_access(struct ff_device *ff, int effect_id,
struct file *file)
{
if (effect_id < 0 || effect_id >= ff->max_effects ||
!ff->effect_owners[effect_id])
return -EINVAL;
if (file && ff->effect_owners[effect_id] != file)
return -EACCES;
return 0;
}
/*
* Checks whether 2 effects can be combined together
*/
static inline int check_effects_compatible(struct ff_effect *e1,
struct ff_effect *e2)
{
return e1->type == e2->type &&
(e1->type != FF_PERIODIC ||
e1->u.periodic.waveform == e2->u.periodic.waveform);
}
/*
* Convert an effect into compatible one
*/
static int compat_effect(struct ff_device *ff, struct ff_effect *effect)
{
int magnitude;
switch (effect->type) {
case FF_RUMBLE:
if (!test_bit(FF_PERIODIC, ff->ffbit))
return -EINVAL;
/*
* calculate manginude of sine wave as average of rumble's
* 2/3 of strong magnitude and 1/3 of weak magnitude
*/
magnitude = effect->u.rumble.strong_magnitude / 3 +
effect->u.rumble.weak_magnitude / 6;
effect->type = FF_PERIODIC;
effect->u.periodic.waveform = FF_SINE;
effect->u.periodic.period = 50;
effect->u.periodic.magnitude = max(magnitude, 0x7fff);
effect->u.periodic.offset = 0;
effect->u.periodic.phase = 0;
effect->u.periodic.envelope.attack_length = 0;
effect->u.periodic.envelope.attack_level = 0;
effect->u.periodic.envelope.fade_length = 0;
effect->u.periodic.envelope.fade_level = 0;
return 0;
default:
/* Let driver handle conversion */
return 0;
}
}
/**
* input_ff_upload() - upload effect into force-feedback device
* @dev: input device
* @effect: effect to be uploaded
* @file: owner of the effect
*/
int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
struct file *file)
{
struct ff_device *ff = dev->ff;
struct ff_effect *old;
int ret = 0;
int id;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
if (effect->type < FF_EFFECT_MIN || effect->type > FF_EFFECT_MAX ||
!test_bit(effect->type, dev->ffbit)) {
pr_debug("invalid or not supported effect type in upload\n");
return -EINVAL;
}
if (effect->type == FF_PERIODIC &&
(effect->u.periodic.waveform < FF_WAVEFORM_MIN ||
effect->u.periodic.waveform > FF_WAVEFORM_MAX ||
!test_bit(effect->u.periodic.waveform, dev->ffbit))) {
pr_debug("invalid or not supported wave form in upload\n");
return -EINVAL;
}
if (!test_bit(effect->type, ff->ffbit)) {
ret = compat_effect(ff, effect);
if (ret)
return ret;
}
mutex_lock(&ff->mutex);
if (effect->id == -1) {
for (id = 0; id < ff->max_effects; id++)
if (!ff->effect_owners[id])
break;
if (id >= ff->max_effects) {
ret = -ENOSPC;
goto out;
}
effect->id = id;
old = NULL;
} else {
id = effect->id;
ret = check_effect_access(ff, id, file);
if (ret)
goto out;
old = &ff->effects[id];
if (!check_effects_compatible(effect, old)) {
ret = -EINVAL;
goto out;
}
}
ret = ff->upload(dev, effect, old);
if (ret)
goto out;
spin_lock_irq(&dev->event_lock);
ff->effects[id] = *effect;
ff->effect_owners[id] = file;
spin_unlock_irq(&dev->event_lock);
out:
mutex_unlock(&ff->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(input_ff_upload);
/*
* Erases the effect if the requester is also the effect owner. The mutex
* should already be locked before calling this function.
*/
static int erase_effect(struct input_dev *dev, int effect_id,
struct file *file)
{
struct ff_device *ff = dev->ff;
int error;
error = check_effect_access(ff, effect_id, file);
if (error)
return error;
spin_lock_irq(&dev->event_lock);
ff->playback(dev, effect_id, 0);
ff->effect_owners[effect_id] = NULL;
spin_unlock_irq(&dev->event_lock);
if (ff->erase) {
error = ff->erase(dev, effect_id);
if (error) {
spin_lock_irq(&dev->event_lock);
ff->effect_owners[effect_id] = file;
spin_unlock_irq(&dev->event_lock);
return error;
}
}
return 0;
}
/**
* input_ff_erase - erase a force-feedback effect from device
* @dev: input device to erase effect from
* @effect_id: id of the ffect to be erased
* @file: purported owner of the request
*
* This function erases a force-feedback effect from specified device.
* The effect will only be erased if it was uploaded through the same
* file handle that is requesting erase.
*/
int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file)
{
struct ff_device *ff = dev->ff;
int ret;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
mutex_lock(&ff->mutex);
ret = erase_effect(dev, effect_id, file);
mutex_unlock(&ff->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(input_ff_erase);
/*
* flush_effects - erase all effects owned by a file handle
*/
static int flush_effects(struct input_dev *dev, struct file *file)
{
struct ff_device *ff = dev->ff;
int i;
pr_debug("flushing now\n");
mutex_lock(&ff->mutex);
for (i = 0; i < ff->max_effects; i++)
erase_effect(dev, i, file);
mutex_unlock(&ff->mutex);
return 0;
}
/**
* input_ff_event() - generic handler for force-feedback events
* @dev: input device to send the effect to
* @type: event type (anything but EV_FF is ignored)
* @code: event code
* @value: event value
*/
int input_ff_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
struct ff_device *ff = dev->ff;
if (type != EV_FF)
return 0;
switch (code) {
case FF_GAIN:
if (!test_bit(FF_GAIN, dev->ffbit) || value > 0xffff)
break;
ff->set_gain(dev, value);
break;
case FF_AUTOCENTER:
if (!test_bit(FF_AUTOCENTER, dev->ffbit) || value > 0xffff)
break;
ff->set_autocenter(dev, value);
break;
default:
if (check_effect_access(ff, code, NULL) == 0)
ff->playback(dev, code, value);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_event);
/**
* input_ff_create() - create force-feedback device
* @dev: input device supporting force-feedback
* @max_effects: maximum number of effects supported by the device
*
* This function allocates all necessary memory for a force feedback
* portion of an input device and installs all default handlers.
* @dev->ffbit should be already set up before calling this function.
* Once ff device is created you need to setup its upload, erase,
* playback and other handlers before registering input device
*/
int input_ff_create(struct input_dev *dev, unsigned int max_effects)
{
struct ff_device *ff;
size_t ff_dev_size;
int i;
if (!max_effects) {
pr_err("cannot allocate device without any effects\n");
return -EINVAL;
}
ff_dev_size = sizeof(struct ff_device) +
max_effects * sizeof(struct file *);
if (ff_dev_size < max_effects) /* overflow */
return -EINVAL;
ff = kzalloc(ff_dev_size, GFP_KERNEL);
if (!ff)
return -ENOMEM;
ff->effects = kcalloc(max_effects, sizeof(struct ff_effect),
GFP_KERNEL);
if (!ff->effects) {
kfree(ff);
return -ENOMEM;
}
ff->max_effects = max_effects;
mutex_init(&ff->mutex);
dev->ff = ff;
dev->flush = flush_effects;
dev->event = input_ff_event;
__set_bit(EV_FF, dev->evbit);
/* Copy "true" bits into ff device bitmap */
for (i = 0; i <= FF_MAX; i++)
if (test_bit(i, dev->ffbit))
__set_bit(i, ff->ffbit);
/* we can emulate RUMBLE with periodic effects */
if (test_bit(FF_PERIODIC, ff->ffbit))
__set_bit(FF_RUMBLE, dev->ffbit);
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_create);
/**
* input_ff_destroy() - frees force feedback portion of input device
* @dev: input device supporting force feedback
*
* This function is only needed in error path as input core will
* automatically free force feedback structures when device is
* destroyed.
*/
void input_ff_destroy(struct input_dev *dev)
{
struct ff_device *ff = dev->ff;
__clear_bit(EV_FF, dev->evbit);
if (ff) {
if (ff->destroy)
ff->destroy(ff);
kfree(ff->private);
kfree(ff->effects);
kfree(ff);
dev->ff = NULL;
}
}
EXPORT_SYMBOL_GPL(input_ff_destroy);
| gpl-2.0 |
caplio/android_kernel_samsung_hltedcm | drivers/net/ethernet/freescale/fec_mpc52xx_phy.c | 10490 | 3865 | /*
* Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver
*
* Copyright (C) 2007 Domen Puncer, Telargo, Inc.
* Copyright (C) 2008 Wolfram Sang, Pengutronix
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/of_mdio.h>
#include <asm/io.h>
#include <asm/mpc52xx.h>
#include "fec_mpc52xx.h"
struct mpc52xx_fec_mdio_priv {
struct mpc52xx_fec __iomem *regs;
int mdio_irqs[PHY_MAX_ADDR];
};
static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
int reg, u32 value)
{
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
struct mpc52xx_fec __iomem *fec = priv->regs;
int tries = 3;
value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
out_be32(&fec->ievent, FEC_IEVENT_MII);
out_be32(&fec->mii_data, value);
/* wait for it to finish, this takes about 23 us on lite5200b */
while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
msleep(1);
if (!tries)
return -ETIMEDOUT;
return value & FEC_MII_DATA_OP_RD ?
in_be32(&fec->mii_data) & FEC_MII_DATA_DATAMSK : 0;
}
static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME);
}
static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u16 data)
{
return mpc52xx_fec_mdio_transfer(bus, phy_id, reg,
data | FEC_MII_WRITE_FRAME);
}
static int mpc52xx_fec_mdio_probe(struct platform_device *of)
{
struct device *dev = &of->dev;
struct device_node *np = of->dev.of_node;
struct mii_bus *bus;
struct mpc52xx_fec_mdio_priv *priv;
struct resource res;
int err;
bus = mdiobus_alloc();
if (bus == NULL)
return -ENOMEM;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL) {
err = -ENOMEM;
goto out_free;
}
bus->name = "mpc52xx MII bus";
bus->read = mpc52xx_fec_mdio_read;
bus->write = mpc52xx_fec_mdio_write;
/* setup irqs */
bus->irq = priv->mdio_irqs;
/* setup registers */
err = of_address_to_resource(np, 0, &res);
if (err)
goto out_free;
priv->regs = ioremap(res.start, resource_size(&res));
if (priv->regs == NULL) {
err = -ENOMEM;
goto out_free;
}
snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
bus->priv = priv;
bus->parent = dev;
dev_set_drvdata(dev, bus);
/* set MII speed */
out_be32(&priv->regs->mii_speed,
((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
err = of_mdiobus_register(bus, np);
if (err)
goto out_unmap;
return 0;
out_unmap:
iounmap(priv->regs);
out_free:
kfree(priv);
mdiobus_free(bus);
return err;
}
static int mpc52xx_fec_mdio_remove(struct platform_device *of)
{
struct device *dev = &of->dev;
struct mii_bus *bus = dev_get_drvdata(dev);
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
mdiobus_unregister(bus);
dev_set_drvdata(dev, NULL);
iounmap(priv->regs);
kfree(priv);
mdiobus_free(bus);
return 0;
}
static struct of_device_id mpc52xx_fec_mdio_match[] = {
{ .compatible = "fsl,mpc5200b-mdio", },
{ .compatible = "fsl,mpc5200-mdio", },
{ .compatible = "mpc5200b-fec-phy", },
{}
};
MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
struct platform_driver mpc52xx_fec_mdio_driver = {
.driver = {
.name = "mpc5200b-fec-phy",
.owner = THIS_MODULE,
.of_match_table = mpc52xx_fec_mdio_match,
},
.probe = mpc52xx_fec_mdio_probe,
.remove = mpc52xx_fec_mdio_remove,
};
/* let fec driver call it, since this has to be registered before it */
EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver);
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
x456/linux | arch/arm/kernel/swp_emulate.c | 251 | 7204 | /*
* linux/arch/arm/kernel/swp_emulate.c
*
* Copyright (C) 2009 ARM Limited
* __user_* functions adapted from include/asm/uaccess.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Implements emulation of the SWP/SWPB instructions using load-exclusive and
* store-exclusive for processors that have them disabled (or future ones that
* might not implement them).
*
* Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
* Where: Rt = destination
* Rt2 = source
* Rn = address
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/perf_event.h>
#include <asm/opcodes.h>
#include <asm/system_info.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
/*
* Error-checking SWP macros implemented using ldrex{b}/strex{b}
*/
#define __user_swpX_asm(data, addr, res, temp, B) \
__asm__ __volatile__( \
" mov %2, %1\n" \
"0: ldrex"B" %1, [%3]\n" \
"1: strex"B" %0, %2, [%3]\n" \
" cmp %0, #0\n" \
" movne %0, %4\n" \
"2:\n" \
" .section .text.fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %5\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 0b, 3b\n" \
" .long 1b, 3b\n" \
" .previous" \
: "=&r" (res), "+r" (data), "=&r" (temp) \
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
: "cc", "memory")
#define __user_swp_asm(data, addr, res, temp) \
__user_swpX_asm(data, addr, res, temp, "")
#define __user_swpb_asm(data, addr, res, temp) \
__user_swpX_asm(data, addr, res, temp, "b")
/*
* Macros/defines for extracting register numbers from instruction.
*/
#define EXTRACT_REG_NUM(instruction, offset) \
(((instruction) & (0xf << (offset))) >> (offset))
#define RN_OFFSET 16
#define RT_OFFSET 12
#define RT2_OFFSET 0
/*
* Bit 22 of the instruction encoding distinguishes between
* the SWP and SWPB variants (bit set means SWPB).
*/
#define TYPE_SWPB (1 << 22)
static unsigned long swpcounter;
static unsigned long swpbcounter;
static unsigned long abtcounter;
static pid_t previous_pid;
#ifdef CONFIG_PROC_FS
static int proc_status_show(struct seq_file *m, void *v)
{
seq_printf(m, "Emulated SWP:\t\t%lu\n", swpcounter);
seq_printf(m, "Emulated SWPB:\t\t%lu\n", swpbcounter);
seq_printf(m, "Aborted SWP{B}:\t\t%lu\n", abtcounter);
if (previous_pid != 0)
seq_printf(m, "Last process:\t\t%d\n", previous_pid);
return 0;
}
static int proc_status_open(struct inode *inode, struct file *file)
{
return single_open(file, proc_status_show, PDE_DATA(inode));
}
static const struct file_operations proc_status_fops = {
.open = proc_status_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
/*
* Set up process info to signal segmentation fault - called on access error.
*/
static void set_segfault(struct pt_regs *regs, unsigned long addr)
{
siginfo_t info;
down_read(¤t->mm->mmap_sem);
if (find_vma(current->mm, addr) == NULL)
info.si_code = SEGV_MAPERR;
else
info.si_code = SEGV_ACCERR;
up_read(¤t->mm->mmap_sem);
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_addr = (void *) instruction_pointer(regs);
pr_debug("SWP{B} emulation: access caused memory abort!\n");
arm_notify_die("Illegal memory access", regs, &info, 0, 0);
abtcounter++;
}
static int emulate_swpX(unsigned int address, unsigned int *data,
unsigned int type)
{
unsigned int res = 0;
if ((type != TYPE_SWPB) && (address & 0x3)) {
/* SWP to unaligned address not permitted */
pr_debug("SWP instruction on unaligned pointer!\n");
return -EFAULT;
}
while (1) {
unsigned long temp;
unsigned int __ua_flags;
__ua_flags = uaccess_save_and_enable();
if (type == TYPE_SWPB)
__user_swpb_asm(*data, address, res, temp);
else
__user_swp_asm(*data, address, res, temp);
uaccess_restore(__ua_flags);
if (likely(res != -EAGAIN) || signal_pending(current))
break;
cond_resched();
}
if (res == 0) {
if (type == TYPE_SWPB)
swpbcounter++;
else
swpcounter++;
}
return res;
}
/*
* swp_handler logs the id of calling process, dissects the instruction, sanity
* checks the memory location, calls emulate_swpX for the actual operation and
* deals with fixup/error handling before returning
*/
static int swp_handler(struct pt_regs *regs, unsigned int instr)
{
unsigned int address, destreg, data, type;
unsigned int res = 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
res = arm_check_condition(instr, regs->ARM_cpsr);
switch (res) {
case ARM_OPCODE_CONDTEST_PASS:
break;
case ARM_OPCODE_CONDTEST_FAIL:
/* Condition failed - return to next instruction */
regs->ARM_pc += 4;
return 0;
case ARM_OPCODE_CONDTEST_UNCOND:
/* If unconditional encoding - not a SWP, undef */
return -EFAULT;
default:
return -EINVAL;
}
if (current->pid != previous_pid) {
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
current->comm, (unsigned long)current->pid);
previous_pid = current->pid;
}
address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)];
data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
type = instr & TYPE_SWPB;
pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
EXTRACT_REG_NUM(instr, RN_OFFSET), address,
destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);
/* Check access in reasonable access range for both SWP and SWPB */
if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
pr_debug("SWP{B} emulation: access to %p not allowed!\n",
(void *)address);
res = -EFAULT;
} else {
res = emulate_swpX(address, &data, type);
}
if (res == 0) {
/*
* On successful emulation, revert the adjustment to the PC
* made in kernel/traps.c in order to resume execution at the
* instruction following the SWP{B}.
*/
regs->ARM_pc += 4;
regs->uregs[destreg] = data;
} else if (res == -EFAULT) {
/*
* Memory errors do not mean emulation failed.
* Set up signal info to return SEGV, then return OK
*/
set_segfault(regs, address);
}
return 0;
}
/*
* Only emulate SWP/SWPB executed in ARM state/User mode.
* The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE.
*/
static struct undef_hook swp_hook = {
.instr_mask = 0x0fb00ff0,
.instr_val = 0x01000090,
.cpsr_mask = MODE_MASK | PSR_T_BIT | PSR_J_BIT,
.cpsr_val = USR_MODE,
.fn = swp_handler
};
/*
* Register handler and create status file in /proc/cpu
* Invoked as late_initcall, since not needed before init spawned.
*/
static int __init swp_emulation_init(void)
{
if (cpu_architecture() < CPU_ARCH_ARMv7)
return 0;
#ifdef CONFIG_PROC_FS
if (!proc_create("cpu/swp_emulation", S_IRUGO, NULL, &proc_status_fops))
return -ENOMEM;
#endif /* CONFIG_PROC_FS */
pr_notice("Registering SWP/SWPB emulation handler\n");
register_undef_hook(&swp_hook);
return 0;
}
late_initcall(swp_emulation_init);
| gpl-2.0 |
wanghao-xznu/linux-2.6.32.9-hiveboard_130326 | arch/sh/kernel/cpu/sh4a/setup-sh7366.c | 507 | 11773 | /*
* SH7366 Setup
*
* Copyright (C) 2008 Renesas Solutions
*
* Based on linux/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/uio_driver.h>
#include <linux/sh_timer.h>
#include <linux/usb/r8a66597.h>
#include <asm/clock.h>
static struct resource iic_resources[] = {
[0] = {
.name = "IIC",
.start = 0x04470000,
.end = 0x04470017,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 96,
.end = 99,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device iic_device = {
.name = "i2c-sh_mobile",
.id = 0, /* "i2c0" clock */
.num_resources = ARRAY_SIZE(iic_resources),
.resource = iic_resources,
};
static struct r8a66597_platdata r8a66597_data = {
.on_chip = 1,
};
static struct resource usb_host_resources[] = {
[0] = {
.start = 0xa4d80000,
.end = 0xa4d800ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 65,
.end = 65,
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
},
};
static struct platform_device usb_host_device = {
.name = "r8a66597_hcd",
.id = -1,
.dev = {
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
.platform_data = &r8a66597_data,
},
.num_resources = ARRAY_SIZE(usb_host_resources),
.resource = usb_host_resources,
};
static struct uio_info vpu_platform_data = {
.name = "VPU5",
.version = "0",
.irq = 60,
};
static struct resource vpu_resources[] = {
[0] = {
.name = "VPU",
.start = 0xfe900000,
.end = 0xfe902807,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device vpu_device = {
.name = "uio_pdrv_genirq",
.id = 0,
.dev = {
.platform_data = &vpu_platform_data,
},
.resource = vpu_resources,
.num_resources = ARRAY_SIZE(vpu_resources),
};
static struct uio_info veu0_platform_data = {
.name = "VEU",
.version = "0",
.irq = 54,
};
static struct resource veu0_resources[] = {
[0] = {
.name = "VEU(1)",
.start = 0xfe920000,
.end = 0xfe9200b7,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device veu0_device = {
.name = "uio_pdrv_genirq",
.id = 1,
.dev = {
.platform_data = &veu0_platform_data,
},
.resource = veu0_resources,
.num_resources = ARRAY_SIZE(veu0_resources),
};
static struct uio_info veu1_platform_data = {
.name = "VEU",
.version = "0",
.irq = 27,
};
static struct resource veu1_resources[] = {
[0] = {
.name = "VEU(2)",
.start = 0xfe924000,
.end = 0xfe9240b7,
.flags = IORESOURCE_MEM,
},
[1] = {
/* place holder for contiguous memory */
},
};
static struct platform_device veu1_device = {
.name = "uio_pdrv_genirq",
.id = 2,
.dev = {
.platform_data = &veu1_platform_data,
},
.resource = veu1_resources,
.num_resources = ARRAY_SIZE(veu1_resources),
};
static struct sh_timer_config cmt_platform_data = {
.name = "CMT",
.channel_offset = 0x60,
.timer_bit = 5,
.clk = "cmt0",
.clockevent_rating = 125,
.clocksource_rating = 200,
};
static struct resource cmt_resources[] = {
[0] = {
.name = "CMT",
.start = 0x044a0060,
.end = 0x044a006b,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 104,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device cmt_device = {
.name = "sh_cmt",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct sh_timer_config tmu0_platform_data = {
.name = "TMU0",
.channel_offset = 0x04,
.timer_bit = 0,
.clk = "tmu0",
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
.name = "TMU0",
.start = 0xffd80008,
.end = 0xffd80013,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 16,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu0_device = {
.name = "sh_tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.name = "TMU1",
.channel_offset = 0x10,
.timer_bit = 1,
.clk = "tmu0",
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
.name = "TMU1",
.start = 0xffd80014,
.end = 0xffd8001f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 17,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu1_device = {
.name = "sh_tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
static struct sh_timer_config tmu2_platform_data = {
.name = "TMU2",
.channel_offset = 0x1c,
.timer_bit = 2,
.clk = "tmu0",
};
static struct resource tmu2_resources[] = {
[0] = {
.name = "TMU2",
.start = 0xffd80020,
.end = 0xffd8002b,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 18,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu2_device = {
.name = "sh_tmu",
.id = 2,
.dev = {
.platform_data = &tmu2_platform_data,
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
};
static struct plat_sci_port sci_platform_data[] = {
{
.mapbase = 0xffe00000,
.flags = UPF_BOOT_AUTOCONF,
.type = PORT_SCIF,
.irqs = { 80, 80, 80, 80 },
.clk = "scif0",
}, {
.flags = 0,
}
};
static struct platform_device sci_device = {
.name = "sh-sci",
.id = -1,
.dev = {
.platform_data = sci_platform_data,
},
};
static struct platform_device *sh7366_devices[] __initdata = {
&cmt_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&iic_device,
&sci_device,
&usb_host_device,
&vpu_device,
&veu0_device,
&veu1_device,
};
static int __init sh7366_devices_setup(void)
{
platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20);
platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
return platform_add_devices(sh7366_devices,
ARRAY_SIZE(sh7366_devices));
}
arch_initcall(sh7366_devices_setup);
static struct platform_device *sh7366_early_devices[] __initdata = {
&cmt_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
};
void __init plat_early_device_setup(void)
{
early_platform_add_devices(sh7366_early_devices,
ARRAY_SIZE(sh7366_early_devices));
}
enum {
UNUSED=0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
ICB,
DMAC0, DMAC1, DMAC2, DMAC3,
VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU,
MFI, VPU, USB,
MMC_MMC1I, MMC_MMC2I, MMC_MMC3I,
DMAC4, DMAC5, DMAC_DADERR,
SCIF, SCIFA1, SCIFA2,
DENC, MSIOF,
FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
SDHI0, SDHI1, SDHI2, SDHI3,
CMT, TSIF, SIU,
TMU0, TMU1, TMU2,
VEU2, LCDC,
/* interrupt groups */
DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, SDHI,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
INTC_VECT(ICB, 0x700),
INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820),
INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860),
INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0),
INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0),
INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(USB, 0xa20),
INTC_VECT(MMC_MMC1I, 0xb00), INTC_VECT(MMC_MMC2I, 0xb20),
INTC_VECT(MMC_MMC3I, 0xb40),
INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0),
INTC_VECT(DMAC_DADERR, 0xbc0),
INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIFA1, 0xc20),
INTC_VECT(SCIFA2, 0xc40),
INTC_VECT(DENC, 0xc60), INTC_VECT(MSIOF, 0xc80),
INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0),
INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
INTC_VECT(SIU, 0xf80),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440),
INTC_VECT(VEU2, 0x560), INTC_VECT(LCDC, 0x580),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3),
INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU),
INTC_GROUP(MMC, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I),
INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR),
INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
{ } },
{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
{ VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } },
{ 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
{ 0, 0, 0, VPU, 0, 0, 0, MFI } },
{ 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
{ 0, 0, 0, ICB } },
{ 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
{ 0, TMU2, TMU1, TMU0, VEU2, 0, 0, LCDC } },
{ 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
{ 0, DMAC_DADERR, DMAC5, DMAC4, DENC, SCIFA2, SCIFA1, SCIF } },
{ 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
{ 0, 0, 0, 0, 0, 0, 0, MSIOF } },
{ 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
{ I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
{ SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USB, } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
{ 0, MMC_MMC3I, MMC_MMC2I, MMC_MMC1I } },
{ 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
{ 0, 0, 0, 0, 0, 0, 0, TSIF } },
{ 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } },
{ 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2, LCDC, ICB } },
{ 0xa4080008, 0, 16, 4, /* IPRC */ { } },
{ 0xa408000c, 0, 16, 4, /* IPRD */ { } },
{ 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } },
{ 0xa4080014, 0, 16, 4, /* IPRF */ { 0, DMAC45, USB, CMT } },
{ 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIFA1, SCIFA2, DENC } },
{ 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF, 0, FLCTL, I2C } },
{ 0xa4080020, 0, 16, 4, /* IPRI */ { 0, 0, TSIF, } },
{ 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } },
{ 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } },
{ 0xa408002c, 0, 16, 4, /* IPRL */ { } },
{ 0xa4140010, 0, 32, 4, /* INTPRI00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_sense_reg sense_registers[] __initdata = {
{ 0xa414001c, 16, 2, /* ICR1 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static struct intc_mask_reg ack_registers[] __initdata = {
{ 0xa4140024, 0, 8, /* INTREQ00 */
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
static DECLARE_INTC_DESC_ACK(intc_desc, "sh7366", vectors, groups,
mask_registers, prio_registers, sense_registers,
ack_registers);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
void __init plat_mem_setup(void)
{
/* TODO: Register Node 1 */
}
| gpl-2.0 |
pershoot/android_kernel_samsung_p4 | drivers/acpi/battery.c | 507 | 32056 | /*
* battery.c - ACPI Battery Driver (Revision: 2.0)
*
* Copyright (C) 2007 Alexey Starikovskiy <astarikovskiy@suse.de>
* Copyright (C) 2004-2007 Vladimir Lebedev <vladimir.p.lebedev@intel.com>
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/async.h>
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
#endif
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/power_supply.h>
#define PREFIX "ACPI: "
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
#define ACPI_BATTERY_CLASS "battery"
#define ACPI_BATTERY_DEVICE_NAME "Battery"
#define ACPI_BATTERY_NOTIFY_STATUS 0x80
#define ACPI_BATTERY_NOTIFY_INFO 0x81
#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82
/* Battery power unit: 0 means mW, 1 means mA */
#define ACPI_BATTERY_POWER_UNIT_MA 1
#define _COMPONENT ACPI_BATTERY_COMPONENT
ACPI_MODULE_NAME("battery");
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
MODULE_DESCRIPTION("ACPI Battery Driver");
MODULE_LICENSE("GPL");
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
#ifdef CONFIG_ACPI_PROCFS_POWER
extern struct proc_dir_entry *acpi_lock_battery_dir(void);
extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
enum acpi_battery_files {
info_tag = 0,
state_tag,
alarm_tag,
ACPI_BATTERY_NUMFILES,
};
#endif
static const struct acpi_device_id battery_device_ids[] = {
{"PNP0C0A", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, battery_device_ids);
enum {
ACPI_BATTERY_ALARM_PRESENT,
ACPI_BATTERY_XINFO_PRESENT,
ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
};
struct acpi_battery {
struct mutex lock;
struct mutex sysfs_lock;
struct power_supply bat;
struct acpi_device *device;
struct notifier_block pm_nb;
unsigned long update_time;
int rate_now;
int capacity_now;
int voltage_now;
int design_capacity;
int full_charge_capacity;
int technology;
int design_voltage;
int design_capacity_warning;
int design_capacity_low;
int cycle_count;
int measurement_accuracy;
int max_sampling_time;
int min_sampling_time;
int max_averaging_interval;
int min_averaging_interval;
int capacity_granularity_1;
int capacity_granularity_2;
int alarm;
char model_number[32];
char serial_number[32];
char type[32];
char oem_info[32];
int state;
int power_unit;
unsigned long flags;
};
#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat)
inline int acpi_battery_present(struct acpi_battery *battery)
{
return battery->device->status.battery_present;
}
static int acpi_battery_technology(struct acpi_battery *battery)
{
if (!strcasecmp("NiCd", battery->type))
return POWER_SUPPLY_TECHNOLOGY_NiCd;
if (!strcasecmp("NiMH", battery->type))
return POWER_SUPPLY_TECHNOLOGY_NiMH;
if (!strcasecmp("LION", battery->type))
return POWER_SUPPLY_TECHNOLOGY_LION;
if (!strncasecmp("LI-ION", battery->type, 6))
return POWER_SUPPLY_TECHNOLOGY_LION;
if (!strcasecmp("LiP", battery->type))
return POWER_SUPPLY_TECHNOLOGY_LIPO;
return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
}
static int acpi_battery_get_state(struct acpi_battery *battery);
static int acpi_battery_is_charged(struct acpi_battery *battery)
{
/* either charging or discharging */
if (battery->state != 0)
return 0;
/* battery not reporting charge */
if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN ||
battery->capacity_now == 0)
return 0;
/* good batteries update full_charge as the batteries degrade */
if (battery->full_charge_capacity == battery->capacity_now)
return 1;
/* fallback to using design values for broken batteries */
if (battery->design_capacity == battery->capacity_now)
return 1;
/* we don't do any sort of metric based on percentages */
return 0;
}
static int acpi_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
int ret = 0;
struct acpi_battery *battery = to_acpi_battery(psy);
if (acpi_battery_present(battery)) {
/* run battery update only if it is present */
acpi_battery_get_state(battery);
} else if (psp != POWER_SUPPLY_PROP_PRESENT)
return -ENODEV;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
if (battery->state & 0x01)
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
else if (battery->state & 0x02)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_STATUS_FULL;
else
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = acpi_battery_present(battery);
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
val->intval = acpi_battery_technology(battery);
break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
val->intval = battery->cycle_count;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->design_voltage * 1000;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->voltage_now * 1000;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_POWER_NOW:
if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->rate_now * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->design_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL:
if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->full_charge_capacity * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
case POWER_SUPPLY_PROP_ENERGY_NOW:
if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
else
val->intval = battery->capacity_now * 1000;
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = battery->model_number;
break;
case POWER_SUPPLY_PROP_MANUFACTURER:
val->strval = battery->oem_info;
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
val->strval = battery->serial_number;
break;
default:
ret = -EINVAL;
}
return ret;
}
static enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_POWER_NOW,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_ENERGY_FULL,
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
#ifdef CONFIG_ACPI_PROCFS_POWER
inline char *acpi_battery_units(struct acpi_battery *battery)
{
return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
"mA" : "mW";
}
#endif
/* --------------------------------------------------------------------------
Battery Management
-------------------------------------------------------------------------- */
struct acpi_offsets {
size_t offset; /* offset inside struct acpi_sbs_battery */
u8 mode; /* int or string? */
};
static struct acpi_offsets state_offsets[] = {
{offsetof(struct acpi_battery, state), 0},
{offsetof(struct acpi_battery, rate_now), 0},
{offsetof(struct acpi_battery, capacity_now), 0},
{offsetof(struct acpi_battery, voltage_now), 0},
};
static struct acpi_offsets info_offsets[] = {
{offsetof(struct acpi_battery, power_unit), 0},
{offsetof(struct acpi_battery, design_capacity), 0},
{offsetof(struct acpi_battery, full_charge_capacity), 0},
{offsetof(struct acpi_battery, technology), 0},
{offsetof(struct acpi_battery, design_voltage), 0},
{offsetof(struct acpi_battery, design_capacity_warning), 0},
{offsetof(struct acpi_battery, design_capacity_low), 0},
{offsetof(struct acpi_battery, capacity_granularity_1), 0},
{offsetof(struct acpi_battery, capacity_granularity_2), 0},
{offsetof(struct acpi_battery, model_number), 1},
{offsetof(struct acpi_battery, serial_number), 1},
{offsetof(struct acpi_battery, type), 1},
{offsetof(struct acpi_battery, oem_info), 1},
};
static struct acpi_offsets extended_info_offsets[] = {
{offsetof(struct acpi_battery, power_unit), 0},
{offsetof(struct acpi_battery, design_capacity), 0},
{offsetof(struct acpi_battery, full_charge_capacity), 0},
{offsetof(struct acpi_battery, technology), 0},
{offsetof(struct acpi_battery, design_voltage), 0},
{offsetof(struct acpi_battery, design_capacity_warning), 0},
{offsetof(struct acpi_battery, design_capacity_low), 0},
{offsetof(struct acpi_battery, cycle_count), 0},
{offsetof(struct acpi_battery, measurement_accuracy), 0},
{offsetof(struct acpi_battery, max_sampling_time), 0},
{offsetof(struct acpi_battery, min_sampling_time), 0},
{offsetof(struct acpi_battery, max_averaging_interval), 0},
{offsetof(struct acpi_battery, min_averaging_interval), 0},
{offsetof(struct acpi_battery, capacity_granularity_1), 0},
{offsetof(struct acpi_battery, capacity_granularity_2), 0},
{offsetof(struct acpi_battery, model_number), 1},
{offsetof(struct acpi_battery, serial_number), 1},
{offsetof(struct acpi_battery, type), 1},
{offsetof(struct acpi_battery, oem_info), 1},
};
static int extract_package(struct acpi_battery *battery,
union acpi_object *package,
struct acpi_offsets *offsets, int num)
{
int i;
union acpi_object *element;
if (package->type != ACPI_TYPE_PACKAGE)
return -EFAULT;
for (i = 0; i < num; ++i) {
if (package->package.count <= i)
return -EFAULT;
element = &package->package.elements[i];
if (offsets[i].mode) {
u8 *ptr = (u8 *)battery + offsets[i].offset;
if (element->type == ACPI_TYPE_STRING ||
element->type == ACPI_TYPE_BUFFER)
strncpy(ptr, element->string.pointer, 32);
else if (element->type == ACPI_TYPE_INTEGER) {
strncpy(ptr, (u8 *)&element->integer.value,
sizeof(u64));
ptr[sizeof(u64)] = 0;
} else
*ptr = 0; /* don't have value */
} else {
int *x = (int *)((u8 *)battery + offsets[i].offset);
*x = (element->type == ACPI_TYPE_INTEGER) ?
element->integer.value : -1;
}
}
return 0;
}
static int acpi_battery_get_status(struct acpi_battery *battery)
{
if (acpi_bus_get_status(battery->device)) {
ACPI_EXCEPTION((AE_INFO, AE_ERROR, "Evaluating _STA"));
return -ENODEV;
}
return 0;
}
static int acpi_battery_get_info(struct acpi_battery *battery)
{
int result = -EFAULT;
acpi_status status = 0;
char *name = test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)?
"_BIX" : "_BIF";
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
if (!acpi_battery_present(battery))
return 0;
mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle, name,
NULL, &buffer);
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
return -ENODEV;
}
if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
result = extract_package(battery, buffer.pointer,
extended_info_offsets,
ARRAY_SIZE(extended_info_offsets));
else
result = extract_package(battery, buffer.pointer,
info_offsets, ARRAY_SIZE(info_offsets));
kfree(buffer.pointer);
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
battery->full_charge_capacity = battery->design_capacity;
return result;
}
static int acpi_battery_get_state(struct acpi_battery *battery)
{
int result = 0;
acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
if (!acpi_battery_present(battery))
return 0;
if (battery->update_time &&
time_before(jiffies, battery->update_time +
msecs_to_jiffies(cache_time)))
return 0;
mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle, "_BST",
NULL, &buffer);
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BST"));
return -ENODEV;
}
result = extract_package(battery, buffer.pointer,
state_offsets, ARRAY_SIZE(state_offsets));
battery->update_time = jiffies;
kfree(buffer.pointer);
/* For buggy DSDTs that report negative 16-bit values for either
* charging or discharging current and/or report 0 as 65536
* due to bad math.
*/
if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA &&
battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN &&
(s16)(battery->rate_now) < 0) {
battery->rate_now = abs((s16)battery->rate_now);
printk_once(KERN_WARNING FW_BUG "battery: (dis)charge rate"
" invalid.\n");
}
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
&& battery->capacity_now >= 0 && battery->capacity_now <= 100)
battery->capacity_now = (battery->capacity_now *
battery->full_charge_capacity) / 100;
return result;
}
static int acpi_battery_set_alarm(struct acpi_battery *battery)
{
acpi_status status = 0;
union acpi_object arg0 = { .type = ACPI_TYPE_INTEGER };
struct acpi_object_list arg_list = { 1, &arg0 };
if (!acpi_battery_present(battery) ||
!test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags))
return -ENODEV;
arg0.integer.value = battery->alarm;
mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle, "_BTP",
&arg_list, NULL);
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status))
return -ENODEV;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Alarm set to %d\n", battery->alarm));
return 0;
}
static int acpi_battery_init_alarm(struct acpi_battery *battery)
{
acpi_status status = AE_OK;
acpi_handle handle = NULL;
/* See if alarms are supported, and if so, set default */
status = acpi_get_handle(battery->device->handle, "_BTP", &handle);
if (ACPI_FAILURE(status)) {
clear_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags);
return 0;
}
set_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags);
if (!battery->alarm)
battery->alarm = battery->design_capacity_warning;
return acpi_battery_set_alarm(battery);
}
static ssize_t acpi_battery_alarm_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
return sprintf(buf, "%d\n", battery->alarm * 1000);
}
static ssize_t acpi_battery_alarm_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
if (sscanf(buf, "%ld\n", &x) == 1)
battery->alarm = x/1000;
if (acpi_battery_present(battery))
acpi_battery_set_alarm(battery);
return count;
}
static struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
};
static int sysfs_add_battery(struct acpi_battery *battery)
{
int result;
if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
battery->bat.properties = charge_battery_props;
battery->bat.num_properties =
ARRAY_SIZE(charge_battery_props);
} else {
battery->bat.properties = energy_battery_props;
battery->bat.num_properties =
ARRAY_SIZE(energy_battery_props);
}
battery->bat.name = acpi_device_bid(battery->device);
battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
battery->bat.get_property = acpi_battery_get_property;
result = power_supply_register(&battery->device->dev, &battery->bat);
if (result)
return result;
return device_create_file(battery->bat.dev, &alarm_attr);
}
static void sysfs_remove_battery(struct acpi_battery *battery)
{
mutex_lock(&battery->sysfs_lock);
if (!battery->bat.dev) {
mutex_unlock(&battery->sysfs_lock);
return;
}
device_remove_file(battery->bat.dev, &alarm_attr);
power_supply_unregister(&battery->bat);
battery->bat.dev = NULL;
mutex_unlock(&battery->sysfs_lock);
}
/*
* According to the ACPI spec, some kinds of primary batteries can
* report percentage battery remaining capacity directly to OS.
* In this case, it reports the Last Full Charged Capacity == 100
* and BatteryPresentRate == 0xFFFFFFFF.
*
* Now we found some battery reports percentage remaining capacity
* even if it's rechargeable.
* https://bugzilla.kernel.org/show_bug.cgi?id=15979
*
* Handle this correctly so that they won't break userspace.
*/
static void acpi_battery_quirks(struct acpi_battery *battery)
{
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
return ;
if (battery->full_charge_capacity == 100 &&
battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN &&
battery->capacity_now >=0 && battery->capacity_now <= 100) {
set_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags);
battery->full_charge_capacity = battery->design_capacity;
battery->capacity_now = (battery->capacity_now *
battery->full_charge_capacity) / 100;
}
}
static int acpi_battery_update(struct acpi_battery *battery)
{
int result, old_present = acpi_battery_present(battery);
result = acpi_battery_get_status(battery);
if (result)
return result;
if (!acpi_battery_present(battery)) {
sysfs_remove_battery(battery);
battery->update_time = 0;
return 0;
}
if (!battery->update_time ||
old_present != acpi_battery_present(battery)) {
result = acpi_battery_get_info(battery);
if (result)
return result;
acpi_battery_init_alarm(battery);
}
if (!battery->bat.dev) {
result = sysfs_add_battery(battery);
if (result)
return result;
}
result = acpi_battery_get_state(battery);
acpi_battery_quirks(battery);
return result;
}
static void acpi_battery_refresh(struct acpi_battery *battery)
{
if (!battery->bat.dev)
return;
acpi_battery_get_info(battery);
/* The battery may have changed its reporting units. */
sysfs_remove_battery(battery);
sysfs_add_battery(battery);
}
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
#ifdef CONFIG_ACPI_PROCFS_POWER
static struct proc_dir_entry *acpi_battery_dir;
static int acpi_battery_print_info(struct seq_file *seq, int result)
{
struct acpi_battery *battery = seq->private;
if (result)
goto end;
seq_printf(seq, "present: %s\n",
acpi_battery_present(battery)?"yes":"no");
if (!acpi_battery_present(battery))
goto end;
if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "design capacity: unknown\n");
else
seq_printf(seq, "design capacity: %d %sh\n",
battery->design_capacity,
acpi_battery_units(battery));
if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "last full capacity: unknown\n");
else
seq_printf(seq, "last full capacity: %d %sh\n",
battery->full_charge_capacity,
acpi_battery_units(battery));
seq_printf(seq, "battery technology: %srechargeable\n",
(!battery->technology)?"non-":"");
if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "design voltage: unknown\n");
else
seq_printf(seq, "design voltage: %d mV\n",
battery->design_voltage);
seq_printf(seq, "design capacity warning: %d %sh\n",
battery->design_capacity_warning,
acpi_battery_units(battery));
seq_printf(seq, "design capacity low: %d %sh\n",
battery->design_capacity_low,
acpi_battery_units(battery));
seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
seq_printf(seq, "capacity granularity 1: %d %sh\n",
battery->capacity_granularity_1,
acpi_battery_units(battery));
seq_printf(seq, "capacity granularity 2: %d %sh\n",
battery->capacity_granularity_2,
acpi_battery_units(battery));
seq_printf(seq, "model number: %s\n", battery->model_number);
seq_printf(seq, "serial number: %s\n", battery->serial_number);
seq_printf(seq, "battery type: %s\n", battery->type);
seq_printf(seq, "OEM info: %s\n", battery->oem_info);
end:
if (result)
seq_printf(seq, "ERROR: Unable to read battery info\n");
return result;
}
static int acpi_battery_print_state(struct seq_file *seq, int result)
{
struct acpi_battery *battery = seq->private;
if (result)
goto end;
seq_printf(seq, "present: %s\n",
acpi_battery_present(battery)?"yes":"no");
if (!acpi_battery_present(battery))
goto end;
seq_printf(seq, "capacity state: %s\n",
(battery->state & 0x04)?"critical":"ok");
if ((battery->state & 0x01) && (battery->state & 0x02))
seq_printf(seq,
"charging state: charging/discharging\n");
else if (battery->state & 0x01)
seq_printf(seq, "charging state: discharging\n");
else if (battery->state & 0x02)
seq_printf(seq, "charging state: charging\n");
else
seq_printf(seq, "charging state: charged\n");
if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "present rate: unknown\n");
else
seq_printf(seq, "present rate: %d %s\n",
battery->rate_now, acpi_battery_units(battery));
if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "remaining capacity: unknown\n");
else
seq_printf(seq, "remaining capacity: %d %sh\n",
battery->capacity_now, acpi_battery_units(battery));
if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
seq_printf(seq, "present voltage: unknown\n");
else
seq_printf(seq, "present voltage: %d mV\n",
battery->voltage_now);
end:
if (result)
seq_printf(seq, "ERROR: Unable to read battery state\n");
return result;
}
static int acpi_battery_print_alarm(struct seq_file *seq, int result)
{
struct acpi_battery *battery = seq->private;
if (result)
goto end;
if (!acpi_battery_present(battery)) {
seq_printf(seq, "present: no\n");
goto end;
}
seq_printf(seq, "alarm: ");
if (!battery->alarm)
seq_printf(seq, "unsupported\n");
else
seq_printf(seq, "%u %sh\n", battery->alarm,
acpi_battery_units(battery));
end:
if (result)
seq_printf(seq, "ERROR: Unable to read battery alarm\n");
return result;
}
static ssize_t acpi_battery_write_alarm(struct file *file,
const char __user * buffer,
size_t count, loff_t * ppos)
{
int result = 0;
char alarm_string[12] = { '\0' };
struct seq_file *m = file->private_data;
struct acpi_battery *battery = m->private;
if (!battery || (count > sizeof(alarm_string) - 1))
return -EINVAL;
if (!acpi_battery_present(battery)) {
result = -ENODEV;
goto end;
}
if (copy_from_user(alarm_string, buffer, count)) {
result = -EFAULT;
goto end;
}
alarm_string[count] = '\0';
battery->alarm = simple_strtol(alarm_string, NULL, 0);
result = acpi_battery_set_alarm(battery);
end:
if (!result)
return count;
return result;
}
typedef int(*print_func)(struct seq_file *seq, int result);
static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
acpi_battery_print_info,
acpi_battery_print_state,
acpi_battery_print_alarm,
};
static int acpi_battery_read(int fid, struct seq_file *seq)
{
struct acpi_battery *battery = seq->private;
int result = acpi_battery_update(battery);
return acpi_print_funcs[fid](seq, result);
}
#define DECLARE_FILE_FUNCTIONS(_name) \
static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
{ \
return acpi_battery_read(_name##_tag, seq); \
} \
static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
{ \
return single_open(file, acpi_battery_read_##_name, PDE(inode)->data); \
}
DECLARE_FILE_FUNCTIONS(info);
DECLARE_FILE_FUNCTIONS(state);
DECLARE_FILE_FUNCTIONS(alarm);
#undef DECLARE_FILE_FUNCTIONS
#define FILE_DESCRIPTION_RO(_name) \
{ \
.name = __stringify(_name), \
.mode = S_IRUGO, \
.ops = { \
.open = acpi_battery_##_name##_open_fs, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
.owner = THIS_MODULE, \
}, \
}
#define FILE_DESCRIPTION_RW(_name) \
{ \
.name = __stringify(_name), \
.mode = S_IFREG | S_IRUGO | S_IWUSR, \
.ops = { \
.open = acpi_battery_##_name##_open_fs, \
.read = seq_read, \
.llseek = seq_lseek, \
.write = acpi_battery_write_##_name, \
.release = single_release, \
.owner = THIS_MODULE, \
}, \
}
static const struct battery_file {
struct file_operations ops;
mode_t mode;
const char *name;
} acpi_battery_file[] = {
FILE_DESCRIPTION_RO(info),
FILE_DESCRIPTION_RO(state),
FILE_DESCRIPTION_RW(alarm),
};
#undef FILE_DESCRIPTION_RO
#undef FILE_DESCRIPTION_RW
static int acpi_battery_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *entry = NULL;
int i;
printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
" please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
if (!acpi_device_dir(device)) {
acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
acpi_battery_dir);
if (!acpi_device_dir(device))
return -ENODEV;
}
for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
entry = proc_create_data(acpi_battery_file[i].name,
acpi_battery_file[i].mode,
acpi_device_dir(device),
&acpi_battery_file[i].ops,
acpi_driver_data(device));
if (!entry)
return -ENODEV;
}
return 0;
}
static void acpi_battery_remove_fs(struct acpi_device *device)
{
int i;
if (!acpi_device_dir(device))
return;
for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
remove_proc_entry(acpi_battery_file[i].name,
acpi_device_dir(device));
remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
acpi_device_dir(device) = NULL;
}
#endif
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
static void acpi_battery_notify(struct acpi_device *device, u32 event)
{
struct acpi_battery *battery = acpi_driver_data(device);
struct device *old;
if (!battery)
return;
old = battery->bat.dev;
if (event == ACPI_BATTERY_NOTIFY_INFO)
acpi_battery_refresh(battery);
acpi_battery_update(battery);
acpi_bus_generate_proc_event(device, event,
acpi_battery_present(battery));
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
acpi_battery_present(battery));
/* acpi_battery_update could remove power_supply object */
if (old && battery->bat.dev)
power_supply_changed(&battery->bat);
}
static int battery_notify(struct notifier_block *nb,
unsigned long mode, void *_unused)
{
struct acpi_battery *battery = container_of(nb, struct acpi_battery,
pm_nb);
switch (mode) {
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
if (battery->bat.dev) {
sysfs_remove_battery(battery);
sysfs_add_battery(battery);
}
break;
}
return 0;
}
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
struct acpi_battery *battery = NULL;
acpi_handle handle;
if (!device)
return -EINVAL;
battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
if (!battery)
return -ENOMEM;
battery->device = device;
strcpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
device->driver_data = battery;
mutex_init(&battery->lock);
mutex_init(&battery->sysfs_lock);
if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle,
"_BIX", &handle)))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
result = acpi_battery_update(battery);
if (result)
goto fail;
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
#endif
if (result) {
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
goto fail;
}
printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
device->status.battery_present ? "present" : "absent");
battery->pm_nb.notifier_call = battery_notify;
register_pm_notifier(&battery->pm_nb);
return result;
fail:
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
mutex_destroy(&battery->sysfs_lock);
kfree(battery);
return result;
}
static int acpi_battery_remove(struct acpi_device *device, int type)
{
struct acpi_battery *battery = NULL;
if (!device || !acpi_driver_data(device))
return -EINVAL;
battery = acpi_driver_data(device);
unregister_pm_notifier(&battery->pm_nb);
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
mutex_destroy(&battery->sysfs_lock);
kfree(battery);
return 0;
}
/* this is needed to learn about changes made in suspended state */
static int acpi_battery_resume(struct acpi_device *device)
{
struct acpi_battery *battery;
if (!device)
return -EINVAL;
battery = acpi_driver_data(device);
battery->update_time = 0;
acpi_battery_update(battery);
return 0;
}
static struct acpi_driver acpi_battery_driver = {
.name = "battery",
.class = ACPI_BATTERY_CLASS,
.ids = battery_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = acpi_battery_add,
.resume = acpi_battery_resume,
.remove = acpi_battery_remove,
.notify = acpi_battery_notify,
},
};
static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
{
if (acpi_disabled)
return;
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_dir = acpi_lock_battery_dir();
if (!acpi_battery_dir)
return;
#endif
if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_unlock_battery_dir(acpi_battery_dir);
#endif
return;
}
return;
}
static int __init acpi_battery_init(void)
{
async_schedule(acpi_battery_init_async, NULL);
return 0;
}
static void __exit acpi_battery_exit(void)
{
acpi_bus_unregister_driver(&acpi_battery_driver);
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_unlock_battery_dir(acpi_battery_dir);
#endif
}
module_init(acpi_battery_init);
module_exit(acpi_battery_exit);
| gpl-2.0 |
rombaby/android_kernel_xiaomi_redmi2 | kernel/posix-cpu-timers.c | 2043 | 43324 | /*
* Implement CPU time clocks for the POSIX clock interface.
*/
#include <linux/sched.h>
#include <linux/posix-timers.h>
#include <linux/errno.h>
#include <linux/math64.h>
#include <asm/uaccess.h>
#include <linux/kernel_stat.h>
#include <trace/events/timer.h>
#include <linux/random.h>
#include <linux/tick.h>
#include <linux/workqueue.h>
/*
* Called after updating RLIMIT_CPU to run cpu timer and update
* tsk->signal->cputime_expires expiration cache if necessary. Needs
* siglock protection since other code may update expiration cache as
* well.
*/
void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
{
cputime_t cputime = secs_to_cputime(rlim_new);
spin_lock_irq(&task->sighand->siglock);
set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
spin_unlock_irq(&task->sighand->siglock);
}
static int check_clock(const clockid_t which_clock)
{
int error = 0;
struct task_struct *p;
const pid_t pid = CPUCLOCK_PID(which_clock);
if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
return -EINVAL;
if (pid == 0)
return 0;
rcu_read_lock();
p = find_task_by_vpid(pid);
if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
same_thread_group(p, current) : has_group_leader_pid(p))) {
error = -EINVAL;
}
rcu_read_unlock();
return error;
}
static inline union cpu_time_count
timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
{
union cpu_time_count ret;
ret.sched = 0; /* high half always zero when .cpu used */
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
} else {
ret.cpu = timespec_to_cputime(tp);
}
return ret;
}
static void sample_to_timespec(const clockid_t which_clock,
union cpu_time_count cpu,
struct timespec *tp)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
*tp = ns_to_timespec(cpu.sched);
else
cputime_to_timespec(cpu.cpu, tp);
}
static inline int cpu_time_before(const clockid_t which_clock,
union cpu_time_count now,
union cpu_time_count then)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
return now.sched < then.sched;
} else {
return now.cpu < then.cpu;
}
}
static inline void cpu_time_add(const clockid_t which_clock,
union cpu_time_count *acc,
union cpu_time_count val)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
acc->sched += val.sched;
} else {
acc->cpu += val.cpu;
}
}
static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
union cpu_time_count a,
union cpu_time_count b)
{
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
a.sched -= b.sched;
} else {
a.cpu -= b.cpu;
}
return a;
}
/*
* Update expiry time from increment, and increase overrun count,
* given the current clock sample.
*/
static void bump_cpu_timer(struct k_itimer *timer,
union cpu_time_count now)
{
int i;
if (timer->it.cpu.incr.sched == 0)
return;
if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
unsigned long long delta, incr;
if (now.sched < timer->it.cpu.expires.sched)
return;
incr = timer->it.cpu.incr.sched;
delta = now.sched + incr - timer->it.cpu.expires.sched;
/* Don't use (incr*2 < delta), incr*2 might overflow. */
for (i = 0; incr < delta - incr; i++)
incr = incr << 1;
for (; i >= 0; incr >>= 1, i--) {
if (delta < incr)
continue;
timer->it.cpu.expires.sched += incr;
timer->it_overrun += 1 << i;
delta -= incr;
}
} else {
cputime_t delta, incr;
if (now.cpu < timer->it.cpu.expires.cpu)
return;
incr = timer->it.cpu.incr.cpu;
delta = now.cpu + incr - timer->it.cpu.expires.cpu;
/* Don't use (incr*2 < delta), incr*2 might overflow. */
for (i = 0; incr < delta - incr; i++)
incr += incr;
for (; i >= 0; incr = incr >> 1, i--) {
if (delta < incr)
continue;
timer->it.cpu.expires.cpu += incr;
timer->it_overrun += 1 << i;
delta -= incr;
}
}
}
/**
* task_cputime_zero - Check a task_cputime struct for all zero fields.
*
* @cputime: The struct to compare.
*
* Checks @cputime to see if all fields are zero. Returns true if all fields
* are zero, false if any field is nonzero.
*/
static inline int task_cputime_zero(const struct task_cputime *cputime)
{
if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
return 1;
return 0;
}
static inline cputime_t prof_ticks(struct task_struct *p)
{
cputime_t utime, stime;
task_cputime(p, &utime, &stime);
return utime + stime;
}
static inline cputime_t virt_ticks(struct task_struct *p)
{
cputime_t utime;
task_cputime(p, &utime, NULL);
return utime;
}
static int
posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
{
int error = check_clock(which_clock);
if (!error) {
tp->tv_sec = 0;
tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
/*
* If sched_clock is using a cycle counter, we
* don't have any idea of its true resolution
* exported, but it is much more than 1s/HZ.
*/
tp->tv_nsec = 1;
}
}
return error;
}
static int
posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
{
/*
* You can never reset a CPU clock, but we check for other errors
* in the call before failing with EPERM.
*/
int error = check_clock(which_clock);
if (error == 0) {
error = -EPERM;
}
return error;
}
/*
* Sample a per-thread clock for the given task.
*/
static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
union cpu_time_count *cpu)
{
switch (CPUCLOCK_WHICH(which_clock)) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
cpu->cpu = prof_ticks(p);
break;
case CPUCLOCK_VIRT:
cpu->cpu = virt_ticks(p);
break;
case CPUCLOCK_SCHED:
cpu->sched = task_sched_runtime(p);
break;
}
return 0;
}
static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
{
if (b->utime > a->utime)
a->utime = b->utime;
if (b->stime > a->stime)
a->stime = b->stime;
if (b->sum_exec_runtime > a->sum_exec_runtime)
a->sum_exec_runtime = b->sum_exec_runtime;
}
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
{
struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
struct task_cputime sum;
unsigned long flags;
if (!cputimer->running) {
/*
* The POSIX timer interface allows for absolute time expiry
* values through the TIMER_ABSTIME flag, therefore we have
* to synchronize the timer to the clock every time we start
* it.
*/
thread_group_cputime(tsk, &sum);
raw_spin_lock_irqsave(&cputimer->lock, flags);
cputimer->running = 1;
update_gt_cputime(&cputimer->cputime, &sum);
} else
raw_spin_lock_irqsave(&cputimer->lock, flags);
*times = cputimer->cputime;
raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
/*
* Sample a process (thread group) clock for the given group_leader task.
* Must be called with tasklist_lock held for reading.
*/
static int cpu_clock_sample_group(const clockid_t which_clock,
struct task_struct *p,
union cpu_time_count *cpu)
{
struct task_cputime cputime;
switch (CPUCLOCK_WHICH(which_clock)) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
thread_group_cputime(p, &cputime);
cpu->cpu = cputime.utime + cputime.stime;
break;
case CPUCLOCK_VIRT:
thread_group_cputime(p, &cputime);
cpu->cpu = cputime.utime;
break;
case CPUCLOCK_SCHED:
thread_group_cputime(p, &cputime);
cpu->sched = cputime.sum_exec_runtime;
break;
}
return 0;
}
static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
{
const pid_t pid = CPUCLOCK_PID(which_clock);
int error = -EINVAL;
union cpu_time_count rtn;
if (pid == 0) {
/*
* Special case constant value for our own clocks.
* We don't have to do any lookup to find ourselves.
*/
if (CPUCLOCK_PERTHREAD(which_clock)) {
/*
* Sampling just ourselves we can do with no locking.
*/
error = cpu_clock_sample(which_clock,
current, &rtn);
} else {
read_lock(&tasklist_lock);
error = cpu_clock_sample_group(which_clock,
current, &rtn);
read_unlock(&tasklist_lock);
}
} else {
/*
* Find the given PID, and validate that the caller
* should be able to see it.
*/
struct task_struct *p;
rcu_read_lock();
p = find_task_by_vpid(pid);
if (p) {
if (CPUCLOCK_PERTHREAD(which_clock)) {
if (same_thread_group(p, current)) {
error = cpu_clock_sample(which_clock,
p, &rtn);
}
} else {
read_lock(&tasklist_lock);
if (thread_group_leader(p) && p->sighand) {
error =
cpu_clock_sample_group(which_clock,
p, &rtn);
}
read_unlock(&tasklist_lock);
}
}
rcu_read_unlock();
}
if (error)
return error;
sample_to_timespec(which_clock, rtn, tp);
return 0;
}
/*
* Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
* This is called from sys_timer_create() and do_cpu_nanosleep() with the
* new timer already all-zeros initialized.
*/
static int posix_cpu_timer_create(struct k_itimer *new_timer)
{
int ret = 0;
const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
struct task_struct *p;
if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
return -EINVAL;
INIT_LIST_HEAD(&new_timer->it.cpu.entry);
rcu_read_lock();
if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
if (pid == 0) {
p = current;
} else {
p = find_task_by_vpid(pid);
if (p && !same_thread_group(p, current))
p = NULL;
}
} else {
if (pid == 0) {
p = current->group_leader;
} else {
p = find_task_by_vpid(pid);
if (p && !has_group_leader_pid(p))
p = NULL;
}
}
new_timer->it.cpu.task = p;
if (p) {
get_task_struct(p);
} else {
ret = -EINVAL;
}
rcu_read_unlock();
return ret;
}
/*
* Clean up a CPU-clock timer that is about to be destroyed.
* This is called from timer deletion with the timer already locked.
* If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.)
*/
static int posix_cpu_timer_del(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
int ret = 0;
if (likely(p != NULL)) {
read_lock(&tasklist_lock);
if (unlikely(p->sighand == NULL)) {
/*
* We raced with the reaping of the task.
* The deletion should have cleared us off the list.
*/
BUG_ON(!list_empty(&timer->it.cpu.entry));
} else {
spin_lock(&p->sighand->siglock);
if (timer->it.cpu.firing)
ret = TIMER_RETRY;
else
list_del(&timer->it.cpu.entry);
spin_unlock(&p->sighand->siglock);
}
read_unlock(&tasklist_lock);
if (!ret)
put_task_struct(p);
}
return ret;
}
/*
* Clean out CPU timers still ticking when a thread exited. The task
* pointer is cleared, and the expiry time is replaced with the residual
* time for later timer_gettime calls to return.
* This must be called with the siglock held.
*/
static void cleanup_timers(struct list_head *head,
cputime_t utime, cputime_t stime,
unsigned long long sum_exec_runtime)
{
struct cpu_timer_list *timer, *next;
cputime_t ptime = utime + stime;
list_for_each_entry_safe(timer, next, head, entry) {
list_del_init(&timer->entry);
if (timer->expires.cpu < ptime) {
timer->expires.cpu = 0;
} else {
timer->expires.cpu -= ptime;
}
}
++head;
list_for_each_entry_safe(timer, next, head, entry) {
list_del_init(&timer->entry);
if (timer->expires.cpu < utime) {
timer->expires.cpu = 0;
} else {
timer->expires.cpu -= utime;
}
}
++head;
list_for_each_entry_safe(timer, next, head, entry) {
list_del_init(&timer->entry);
if (timer->expires.sched < sum_exec_runtime) {
timer->expires.sched = 0;
} else {
timer->expires.sched -= sum_exec_runtime;
}
}
}
/*
* These are both called with the siglock held, when the current thread
* is being reaped. When the final (leader) thread in the group is reaped,
* posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
*/
void posix_cpu_timers_exit(struct task_struct *tsk)
{
cputime_t utime, stime;
add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
sizeof(unsigned long long));
task_cputime(tsk, &utime, &stime);
cleanup_timers(tsk->cpu_timers,
utime, stime, tsk->se.sum_exec_runtime);
}
void posix_cpu_timers_exit_group(struct task_struct *tsk)
{
struct signal_struct *const sig = tsk->signal;
cputime_t utime, stime;
task_cputime(tsk, &utime, &stime);
cleanup_timers(tsk->signal->cpu_timers,
utime + sig->utime, stime + sig->stime,
tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
}
static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
{
/*
* That's all for this thread or process.
* We leave our residual in expires to be reported.
*/
put_task_struct(timer->it.cpu.task);
timer->it.cpu.task = NULL;
timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
timer->it.cpu.expires,
now);
}
static inline int expires_gt(cputime_t expires, cputime_t new_exp)
{
return expires == 0 || expires > new_exp;
}
/*
* Insert the timer on the appropriate list before any timers that
* expire later. This must be called with the tasklist_lock held
* for reading, interrupts disabled and p->sighand->siglock taken.
*/
static void arm_timer(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
struct list_head *head, *listpos;
struct task_cputime *cputime_expires;
struct cpu_timer_list *const nt = &timer->it.cpu;
struct cpu_timer_list *next;
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
head = p->cpu_timers;
cputime_expires = &p->cputime_expires;
} else {
head = p->signal->cpu_timers;
cputime_expires = &p->signal->cputime_expires;
}
head += CPUCLOCK_WHICH(timer->it_clock);
listpos = head;
list_for_each_entry(next, head, entry) {
if (cpu_time_before(timer->it_clock, nt->expires, next->expires))
break;
listpos = &next->entry;
}
list_add(&nt->entry, listpos);
if (listpos == head) {
union cpu_time_count *exp = &nt->expires;
/*
* We are the new earliest-expiring POSIX 1.b timer, hence
* need to update expiration cache. Take into account that
* for process timers we share expiration cache with itimers
* and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
*/
switch (CPUCLOCK_WHICH(timer->it_clock)) {
case CPUCLOCK_PROF:
if (expires_gt(cputime_expires->prof_exp, exp->cpu))
cputime_expires->prof_exp = exp->cpu;
break;
case CPUCLOCK_VIRT:
if (expires_gt(cputime_expires->virt_exp, exp->cpu))
cputime_expires->virt_exp = exp->cpu;
break;
case CPUCLOCK_SCHED:
if (cputime_expires->sched_exp == 0 ||
cputime_expires->sched_exp > exp->sched)
cputime_expires->sched_exp = exp->sched;
break;
}
}
}
/*
* The timer is locked, fire it and arrange for its reload.
*/
static void cpu_timer_fire(struct k_itimer *timer)
{
if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
/*
* User don't want any signal.
*/
timer->it.cpu.expires.sched = 0;
} else if (unlikely(timer->sigq == NULL)) {
/*
* This a special case for clock_nanosleep,
* not a normal timer from sys_timer_create.
*/
wake_up_process(timer->it_process);
timer->it.cpu.expires.sched = 0;
} else if (timer->it.cpu.incr.sched == 0) {
/*
* One-shot timer. Clear it as soon as it's fired.
*/
posix_timer_event(timer, 0);
timer->it.cpu.expires.sched = 0;
} else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
/*
* The signal did not get queued because the signal
* was ignored, so we won't get any callback to
* reload the timer. But we need to keep it
* ticking in case the signal is deliverable next time.
*/
posix_cpu_timer_schedule(timer);
}
}
/*
* Sample a process (thread group) timer for the given group_leader task.
* Must be called with tasklist_lock held for reading.
*/
static int cpu_timer_sample_group(const clockid_t which_clock,
struct task_struct *p,
union cpu_time_count *cpu)
{
struct task_cputime cputime;
thread_group_cputimer(p, &cputime);
switch (CPUCLOCK_WHICH(which_clock)) {
default:
return -EINVAL;
case CPUCLOCK_PROF:
cpu->cpu = cputime.utime + cputime.stime;
break;
case CPUCLOCK_VIRT:
cpu->cpu = cputime.utime;
break;
case CPUCLOCK_SCHED:
cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
break;
}
return 0;
}
#ifdef CONFIG_NO_HZ_FULL
static void nohz_kick_work_fn(struct work_struct *work)
{
tick_nohz_full_kick_all();
}
static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
/*
* We need the IPIs to be sent from sane process context.
* The posix cpu timers are always set with irqs disabled.
*/
static void posix_cpu_timer_kick_nohz(void)
{
schedule_work(&nohz_kick_work);
}
bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
{
if (!task_cputime_zero(&tsk->cputime_expires))
return false;
if (tsk->signal->cputimer.running)
return false;
return true;
}
#else
static inline void posix_cpu_timer_kick_nohz(void) { }
#endif
/*
* Guts of sys_timer_settime for CPU timers.
* This is called with the timer locked and interrupts disabled.
* If we return TIMER_RETRY, it's necessary to release the timer's lock
* and try again. (This happens when the timer is in the middle of firing.)
*/
static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
struct itimerspec *new, struct itimerspec *old)
{
struct task_struct *p = timer->it.cpu.task;
union cpu_time_count old_expires, new_expires, old_incr, val;
int ret;
if (unlikely(p == NULL)) {
/*
* Timer refers to a dead task's clock.
*/
return -ESRCH;
}
new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
read_lock(&tasklist_lock);
/*
* We need the tasklist_lock to protect against reaping that
* clears p->sighand. If p has just been reaped, we can no
* longer get any information about it at all.
*/
if (unlikely(p->sighand == NULL)) {
read_unlock(&tasklist_lock);
put_task_struct(p);
timer->it.cpu.task = NULL;
return -ESRCH;
}
/*
* Disarm any old timer after extracting its expiry time.
*/
BUG_ON(!irqs_disabled());
ret = 0;
old_incr = timer->it.cpu.incr;
spin_lock(&p->sighand->siglock);
old_expires = timer->it.cpu.expires;
if (unlikely(timer->it.cpu.firing)) {
timer->it.cpu.firing = -1;
ret = TIMER_RETRY;
} else
list_del_init(&timer->it.cpu.entry);
/*
* We need to sample the current value to convert the new
* value from to relative and absolute, and to convert the
* old value from absolute to relative. To set a process
* timer, we need a sample to balance the thread expiry
* times (in arm_timer). With an absolute time, we must
* check if it's already passed. In short, we need a sample.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &val);
} else {
cpu_timer_sample_group(timer->it_clock, p, &val);
}
if (old) {
if (old_expires.sched == 0) {
old->it_value.tv_sec = 0;
old->it_value.tv_nsec = 0;
} else {
/*
* Update the timer in case it has
* overrun already. If it has,
* we'll report it as having overrun
* and with the next reloaded timer
* already ticking, though we are
* swallowing that pending
* notification here to install the
* new setting.
*/
bump_cpu_timer(timer, val);
if (cpu_time_before(timer->it_clock, val,
timer->it.cpu.expires)) {
old_expires = cpu_time_sub(
timer->it_clock,
timer->it.cpu.expires, val);
sample_to_timespec(timer->it_clock,
old_expires,
&old->it_value);
} else {
old->it_value.tv_nsec = 1;
old->it_value.tv_sec = 0;
}
}
}
if (unlikely(ret)) {
/*
* We are colliding with the timer actually firing.
* Punt after filling in the timer's old value, and
* disable this firing since we are already reporting
* it as an overrun (thanks to bump_cpu_timer above).
*/
spin_unlock(&p->sighand->siglock);
read_unlock(&tasklist_lock);
goto out;
}
if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
cpu_time_add(timer->it_clock, &new_expires, val);
}
/*
* Install the new expiry time (or zero).
* For a timer with no notification action, we don't actually
* arm the timer (we'll just fake it for timer_gettime).
*/
timer->it.cpu.expires = new_expires;
if (new_expires.sched != 0 &&
cpu_time_before(timer->it_clock, val, new_expires)) {
arm_timer(timer);
}
spin_unlock(&p->sighand->siglock);
read_unlock(&tasklist_lock);
/*
* Install the new reload setting, and
* set up the signal and overrun bookkeeping.
*/
timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
&new->it_interval);
/*
* This acts as a modification timestamp for the timer,
* so any automatic reload attempt will punt on seeing
* that we have reset the timer manually.
*/
timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
~REQUEUE_PENDING;
timer->it_overrun_last = 0;
timer->it_overrun = -1;
if (new_expires.sched != 0 &&
!cpu_time_before(timer->it_clock, val, new_expires)) {
/*
* The designated time already passed, so we notify
* immediately, even if the thread never runs to
* accumulate more time on this clock.
*/
cpu_timer_fire(timer);
}
ret = 0;
out:
if (old) {
sample_to_timespec(timer->it_clock,
old_incr, &old->it_interval);
}
if (!ret)
posix_cpu_timer_kick_nohz();
return ret;
}
static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
{
union cpu_time_count now;
struct task_struct *p = timer->it.cpu.task;
int clear_dead;
/*
* Easy part: convert the reload time.
*/
sample_to_timespec(timer->it_clock,
timer->it.cpu.incr, &itp->it_interval);
if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
return;
}
if (unlikely(p == NULL)) {
/*
* This task already died and the timer will never fire.
* In this case, expires is actually the dead value.
*/
dead:
sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
&itp->it_value);
return;
}
/*
* Sample the clock to take the difference with the expiry time.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &now);
clear_dead = p->exit_state;
} else {
read_lock(&tasklist_lock);
if (unlikely(p->sighand == NULL)) {
/*
* The process has been reaped.
* We can't even collect a sample any more.
* Call the timer disarmed, nothing else to do.
*/
put_task_struct(p);
timer->it.cpu.task = NULL;
timer->it.cpu.expires.sched = 0;
read_unlock(&tasklist_lock);
goto dead;
} else {
cpu_timer_sample_group(timer->it_clock, p, &now);
clear_dead = (unlikely(p->exit_state) &&
thread_group_empty(p));
}
read_unlock(&tasklist_lock);
}
if (unlikely(clear_dead)) {
/*
* We've noticed that the thread is dead, but
* not yet reaped. Take this opportunity to
* drop our task ref.
*/
clear_dead_task(timer, now);
goto dead;
}
if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
sample_to_timespec(timer->it_clock,
cpu_time_sub(timer->it_clock,
timer->it.cpu.expires, now),
&itp->it_value);
} else {
/*
* The timer should have expired already, but the firing
* hasn't taken place yet. Say it's just about to expire.
*/
itp->it_value.tv_nsec = 1;
itp->it_value.tv_sec = 0;
}
}
/*
* Check for any per-thread CPU timers that have fired and move them off
* the tsk->cpu_timers[N] list onto the firing list. Here we update the
* tsk->it_*_expires values to reflect the remaining thread CPU timers.
*/
static void check_thread_timers(struct task_struct *tsk,
struct list_head *firing)
{
int maxfire;
struct list_head *timers = tsk->cpu_timers;
struct signal_struct *const sig = tsk->signal;
unsigned long soft;
maxfire = 20;
tsk->cputime_expires.prof_exp = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) {
tsk->cputime_expires.prof_exp = t->expires.cpu;
break;
}
t->firing = 1;
list_move_tail(&t->entry, firing);
}
++timers;
maxfire = 20;
tsk->cputime_expires.virt_exp = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) {
tsk->cputime_expires.virt_exp = t->expires.cpu;
break;
}
t->firing = 1;
list_move_tail(&t->entry, firing);
}
++timers;
maxfire = 20;
tsk->cputime_expires.sched_exp = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *t = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
tsk->cputime_expires.sched_exp = t->expires.sched;
break;
}
t->firing = 1;
list_move_tail(&t->entry, firing);
}
/*
* Check for the special case thread timers.
*/
soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
if (soft != RLIM_INFINITY) {
unsigned long hard =
ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
if (hard != RLIM_INFINITY &&
tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
/*
* At the hard limit, we just die.
* No need to calculate anything else now.
*/
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
return;
}
if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
/*
* At the soft limit, send a SIGXCPU every second.
*/
if (soft < hard) {
soft += USEC_PER_SEC;
sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
}
printk(KERN_INFO
"RT Watchdog Timeout: %s[%d]\n",
tsk->comm, task_pid_nr(tsk));
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
}
}
}
static void stop_process_timers(struct signal_struct *sig)
{
struct thread_group_cputimer *cputimer = &sig->cputimer;
unsigned long flags;
raw_spin_lock_irqsave(&cputimer->lock, flags);
cputimer->running = 0;
raw_spin_unlock_irqrestore(&cputimer->lock, flags);
}
static u32 onecputick;
static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
cputime_t *expires, cputime_t cur_time, int signo)
{
if (!it->expires)
return;
if (cur_time >= it->expires) {
if (it->incr) {
it->expires += it->incr;
it->error += it->incr_error;
if (it->error >= onecputick) {
it->expires -= cputime_one_jiffy;
it->error -= onecputick;
}
} else {
it->expires = 0;
}
trace_itimer_expire(signo == SIGPROF ?
ITIMER_PROF : ITIMER_VIRTUAL,
tsk->signal->leader_pid, cur_time);
__group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
}
if (it->expires && (!*expires || it->expires < *expires)) {
*expires = it->expires;
}
}
/*
* Check for any per-thread CPU timers that have fired and move them
* off the tsk->*_timers list onto the firing list. Per-thread timers
* have already been taken off.
*/
static void check_process_timers(struct task_struct *tsk,
struct list_head *firing)
{
int maxfire;
struct signal_struct *const sig = tsk->signal;
cputime_t utime, ptime, virt_expires, prof_expires;
unsigned long long sum_sched_runtime, sched_expires;
struct list_head *timers = sig->cpu_timers;
struct task_cputime cputime;
unsigned long soft;
/*
* Collect the current process totals.
*/
thread_group_cputimer(tsk, &cputime);
utime = cputime.utime;
ptime = utime + cputime.stime;
sum_sched_runtime = cputime.sum_exec_runtime;
maxfire = 20;
prof_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *tl = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || ptime < tl->expires.cpu) {
prof_expires = tl->expires.cpu;
break;
}
tl->firing = 1;
list_move_tail(&tl->entry, firing);
}
++timers;
maxfire = 20;
virt_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *tl = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || utime < tl->expires.cpu) {
virt_expires = tl->expires.cpu;
break;
}
tl->firing = 1;
list_move_tail(&tl->entry, firing);
}
++timers;
maxfire = 20;
sched_expires = 0;
while (!list_empty(timers)) {
struct cpu_timer_list *tl = list_first_entry(timers,
struct cpu_timer_list,
entry);
if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
sched_expires = tl->expires.sched;
break;
}
tl->firing = 1;
list_move_tail(&tl->entry, firing);
}
/*
* Check for the special case process timers.
*/
check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
SIGPROF);
check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
SIGVTALRM);
soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
if (soft != RLIM_INFINITY) {
unsigned long psecs = cputime_to_secs(ptime);
unsigned long hard =
ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
cputime_t x;
if (psecs >= hard) {
/*
* At the hard limit, we just die.
* No need to calculate anything else now.
*/
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
return;
}
if (psecs >= soft) {
/*
* At the soft limit, send a SIGXCPU every second.
*/
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
if (soft < hard) {
soft++;
sig->rlim[RLIMIT_CPU].rlim_cur = soft;
}
}
x = secs_to_cputime(soft);
if (!prof_expires || x < prof_expires) {
prof_expires = x;
}
}
sig->cputime_expires.prof_exp = prof_expires;
sig->cputime_expires.virt_exp = virt_expires;
sig->cputime_expires.sched_exp = sched_expires;
if (task_cputime_zero(&sig->cputime_expires))
stop_process_timers(sig);
}
/*
* This is called from the signal code (via do_schedule_next_timer)
* when the last timer signal was delivered and we have to reload the timer.
*/
void posix_cpu_timer_schedule(struct k_itimer *timer)
{
struct task_struct *p = timer->it.cpu.task;
union cpu_time_count now;
if (unlikely(p == NULL))
/*
* The task was cleaned up already, no future firings.
*/
goto out;
/*
* Fetch the current sample and update the timer's expiry time.
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
cpu_clock_sample(timer->it_clock, p, &now);
bump_cpu_timer(timer, now);
if (unlikely(p->exit_state)) {
clear_dead_task(timer, now);
goto out;
}
read_lock(&tasklist_lock); /* arm_timer needs it. */
spin_lock(&p->sighand->siglock);
} else {
read_lock(&tasklist_lock);
if (unlikely(p->sighand == NULL)) {
/*
* The process has been reaped.
* We can't even collect a sample any more.
*/
put_task_struct(p);
timer->it.cpu.task = p = NULL;
timer->it.cpu.expires.sched = 0;
goto out_unlock;
} else if (unlikely(p->exit_state) && thread_group_empty(p)) {
/*
* We've noticed that the thread is dead, but
* not yet reaped. Take this opportunity to
* drop our task ref.
*/
clear_dead_task(timer, now);
goto out_unlock;
}
spin_lock(&p->sighand->siglock);
cpu_timer_sample_group(timer->it_clock, p, &now);
bump_cpu_timer(timer, now);
/* Leave the tasklist_lock locked for the call below. */
}
/*
* Now re-arm for the new expiry time.
*/
BUG_ON(!irqs_disabled());
arm_timer(timer);
spin_unlock(&p->sighand->siglock);
out_unlock:
read_unlock(&tasklist_lock);
out:
timer->it_overrun_last = timer->it_overrun;
timer->it_overrun = -1;
++timer->it_requeue_pending;
}
/**
* task_cputime_expired - Compare two task_cputime entities.
*
* @sample: The task_cputime structure to be checked for expiration.
* @expires: Expiration times, against which @sample will be checked.
*
* Checks @sample against @expires to see if any field of @sample has expired.
* Returns true if any field of the former is greater than the corresponding
* field of the latter if the latter field is set. Otherwise returns false.
*/
static inline int task_cputime_expired(const struct task_cputime *sample,
const struct task_cputime *expires)
{
if (expires->utime && sample->utime >= expires->utime)
return 1;
if (expires->stime && sample->utime + sample->stime >= expires->stime)
return 1;
if (expires->sum_exec_runtime != 0 &&
sample->sum_exec_runtime >= expires->sum_exec_runtime)
return 1;
return 0;
}
/**
* fastpath_timer_check - POSIX CPU timers fast path.
*
* @tsk: The task (thread) being checked.
*
* Check the task and thread group timers. If both are zero (there are no
* timers set) return false. Otherwise snapshot the task and thread group
* timers and compare them with the corresponding expiration times. Return
* true if a timer has expired, else return false.
*/
static inline int fastpath_timer_check(struct task_struct *tsk)
{
struct signal_struct *sig;
cputime_t utime, stime;
task_cputime(tsk, &utime, &stime);
if (!task_cputime_zero(&tsk->cputime_expires)) {
struct task_cputime task_sample = {
.utime = utime,
.stime = stime,
.sum_exec_runtime = tsk->se.sum_exec_runtime
};
if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
return 1;
}
sig = tsk->signal;
if (sig->cputimer.running) {
struct task_cputime group_sample;
raw_spin_lock(&sig->cputimer.lock);
group_sample = sig->cputimer.cputime;
raw_spin_unlock(&sig->cputimer.lock);
if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1;
}
return 0;
}
/*
* This is called from the timer interrupt handler. The irq handler has
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
void run_posix_cpu_timers(struct task_struct *tsk)
{
LIST_HEAD(firing);
struct k_itimer *timer, *next;
unsigned long flags;
BUG_ON(!irqs_disabled());
/*
* The fast path checks that there are no expired thread or thread
* group timers. If that's so, just return.
*/
if (!fastpath_timer_check(tsk))
return;
if (!lock_task_sighand(tsk, &flags))
return;
/*
* Here we take off tsk->signal->cpu_timers[N] and
* tsk->cpu_timers[N] all the timers that are firing, and
* put them on the firing list.
*/
check_thread_timers(tsk, &firing);
/*
* If there are any active process wide timers (POSIX 1.b, itimers,
* RLIMIT_CPU) cputimer must be running.
*/
if (tsk->signal->cputimer.running)
check_process_timers(tsk, &firing);
/*
* We must release these locks before taking any timer's lock.
* There is a potential race with timer deletion here, as the
* siglock now protects our private firing list. We have set
* the firing flag in each timer, so that a deletion attempt
* that gets the timer lock before we do will give it up and
* spin until we've taken care of that timer below.
*/
unlock_task_sighand(tsk, &flags);
/*
* Now that all the timers on our list have the firing flag,
* no one will touch their list entries but us. We'll take
* each timer's lock before clearing its firing flag, so no
* timer call will interfere.
*/
list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
int cpu_firing;
spin_lock(&timer->it_lock);
list_del_init(&timer->it.cpu.entry);
cpu_firing = timer->it.cpu.firing;
timer->it.cpu.firing = 0;
/*
* The firing flag is -1 if we collided with a reset
* of the timer, which already reported this
* almost-firing as an overrun. So don't generate an event.
*/
if (likely(cpu_firing >= 0))
cpu_timer_fire(timer);
spin_unlock(&timer->it_lock);
}
/*
* In case some timers were rescheduled after the queue got emptied,
* wake up full dynticks CPUs.
*/
if (tsk->signal->cputimer.running)
posix_cpu_timer_kick_nohz();
}
/*
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
* The tsk->sighand->siglock must be held by the caller.
*/
void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
cputime_t *newval, cputime_t *oldval)
{
union cpu_time_count now;
BUG_ON(clock_idx == CPUCLOCK_SCHED);
cpu_timer_sample_group(clock_idx, tsk, &now);
if (oldval) {
/*
* We are setting itimer. The *oldval is absolute and we update
* it to be relative, *newval argument is relative and we update
* it to be absolute.
*/
if (*oldval) {
if (*oldval <= now.cpu) {
/* Just about to fire. */
*oldval = cputime_one_jiffy;
} else {
*oldval -= now.cpu;
}
}
if (!*newval)
goto out;
*newval += now.cpu;
}
/*
* Update expiration cache if we are the earliest timer, or eventually
* RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
*/
switch (clock_idx) {
case CPUCLOCK_PROF:
if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
tsk->signal->cputime_expires.prof_exp = *newval;
break;
case CPUCLOCK_VIRT:
if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
tsk->signal->cputime_expires.virt_exp = *newval;
break;
}
out:
posix_cpu_timer_kick_nohz();
}
static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
struct timespec *rqtp, struct itimerspec *it)
{
struct k_itimer timer;
int error;
/*
* Set up a temporary timer and then wait for it to go off.
*/
memset(&timer, 0, sizeof timer);
spin_lock_init(&timer.it_lock);
timer.it_clock = which_clock;
timer.it_overrun = -1;
error = posix_cpu_timer_create(&timer);
timer.it_process = current;
if (!error) {
static struct itimerspec zero_it;
memset(it, 0, sizeof *it);
it->it_value = *rqtp;
spin_lock_irq(&timer.it_lock);
error = posix_cpu_timer_set(&timer, flags, it, NULL);
if (error) {
spin_unlock_irq(&timer.it_lock);
return error;
}
while (!signal_pending(current)) {
if (timer.it.cpu.expires.sched == 0) {
/*
* Our timer fired and was reset, below
* deletion can not fail.
*/
posix_cpu_timer_del(&timer);
spin_unlock_irq(&timer.it_lock);
return 0;
}
/*
* Block until cpu_timer_fire (or a signal) wakes us.
*/
__set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&timer.it_lock);
schedule();
spin_lock_irq(&timer.it_lock);
}
/*
* We were interrupted by a signal.
*/
sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
if (!error) {
/*
* Timer is now unarmed, deletion can not fail.
*/
posix_cpu_timer_del(&timer);
}
spin_unlock_irq(&timer.it_lock);
while (error == TIMER_RETRY) {
/*
* We need to handle case when timer was or is in the
* middle of firing. In other cases we already freed
* resources.
*/
spin_lock_irq(&timer.it_lock);
error = posix_cpu_timer_del(&timer);
spin_unlock_irq(&timer.it_lock);
}
if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
/*
* It actually did fire already.
*/
return 0;
}
error = -ERESTART_RESTARTBLOCK;
}
return error;
}
static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
struct timespec *rqtp, struct timespec __user *rmtp)
{
struct restart_block *restart_block =
¤t_thread_info()->restart_block;
struct itimerspec it;
int error;
/*
* Diagnose required errors first.
*/
if (CPUCLOCK_PERTHREAD(which_clock) &&
(CPUCLOCK_PID(which_clock) == 0 ||
CPUCLOCK_PID(which_clock) == current->pid))
return -EINVAL;
error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
if (error == -ERESTART_RESTARTBLOCK) {
if (flags & TIMER_ABSTIME)
return -ERESTARTNOHAND;
/*
* Report back to the user the time still remaining.
*/
if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
return -EFAULT;
restart_block->fn = posix_cpu_nsleep_restart;
restart_block->nanosleep.clockid = which_clock;
restart_block->nanosleep.rmtp = rmtp;
restart_block->nanosleep.expires = timespec_to_ns(rqtp);
}
return error;
}
static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
{
clockid_t which_clock = restart_block->nanosleep.clockid;
struct timespec t;
struct itimerspec it;
int error;
t = ns_to_timespec(restart_block->nanosleep.expires);
error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
if (error == -ERESTART_RESTARTBLOCK) {
struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
/*
* Report back to the user the time still remaining.
*/
if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
return -EFAULT;
restart_block->nanosleep.expires = timespec_to_ns(&t);
}
return error;
}
#define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
#define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
static int process_cpu_clock_getres(const clockid_t which_clock,
struct timespec *tp)
{
return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
}
static int process_cpu_clock_get(const clockid_t which_clock,
struct timespec *tp)
{
return posix_cpu_clock_get(PROCESS_CLOCK, tp);
}
static int process_cpu_timer_create(struct k_itimer *timer)
{
timer->it_clock = PROCESS_CLOCK;
return posix_cpu_timer_create(timer);
}
static int process_cpu_nsleep(const clockid_t which_clock, int flags,
struct timespec *rqtp,
struct timespec __user *rmtp)
{
return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
}
static long process_cpu_nsleep_restart(struct restart_block *restart_block)
{
return -EINVAL;
}
static int thread_cpu_clock_getres(const clockid_t which_clock,
struct timespec *tp)
{
return posix_cpu_clock_getres(THREAD_CLOCK, tp);
}
static int thread_cpu_clock_get(const clockid_t which_clock,
struct timespec *tp)
{
return posix_cpu_clock_get(THREAD_CLOCK, tp);
}
static int thread_cpu_timer_create(struct k_itimer *timer)
{
timer->it_clock = THREAD_CLOCK;
return posix_cpu_timer_create(timer);
}
struct k_clock clock_posix_cpu = {
.clock_getres = posix_cpu_clock_getres,
.clock_set = posix_cpu_clock_set,
.clock_get = posix_cpu_clock_get,
.timer_create = posix_cpu_timer_create,
.nsleep = posix_cpu_nsleep,
.nsleep_restart = posix_cpu_nsleep_restart,
.timer_set = posix_cpu_timer_set,
.timer_del = posix_cpu_timer_del,
.timer_get = posix_cpu_timer_get,
};
static __init int init_posix_cpu_timers(void)
{
struct k_clock process = {
.clock_getres = process_cpu_clock_getres,
.clock_get = process_cpu_clock_get,
.timer_create = process_cpu_timer_create,
.nsleep = process_cpu_nsleep,
.nsleep_restart = process_cpu_nsleep_restart,
};
struct k_clock thread = {
.clock_getres = thread_cpu_clock_getres,
.clock_get = thread_cpu_clock_get,
.timer_create = thread_cpu_timer_create,
};
struct timespec ts;
posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
cputime_to_timespec(cputime_one_jiffy, &ts);
onecputick = ts.tv_nsec;
WARN_ON(ts.tv_sec != 0);
return 0;
}
__initcall(init_posix_cpu_timers);
| gpl-2.0 |
ztemt/NX510J_5.1_kernel | drivers/gpio/gpio-bt8xx.c | 2299 | 8173 | /*
bt8xx GPIO abuser
Copyright (C) 2008 Michael Buesch <m@bues.ch>
Please do _only_ contact the people listed _above_ with issues related to this driver.
All the other people listed below are not related to this driver. Their names
are only here, because this driver is derived from the bt848 driver.
Derived from the bt848 driver:
Copyright (C) 1996,97,98 Ralph Metzler
& Marcus Metzler
(c) 1999-2002 Gerd Knorr
some v4l2 code lines are taken from Justin's bttv2 driver which is
(c) 2000 Justin Schoeman
V4L1 removal from:
(c) 2005-2006 Nickolay V. Shmyrev
Fixes to be fully V4L2 compliant by
(c) 2006 Mauro Carvalho Chehab
Cropping and overscan support
Copyright (C) 2005, 2006 Michael H. Schimek
Sponsored by OPQ Systems AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <linux/slab.h>
/* Steal the hardware definitions from the bttv driver. */
#include "../media/pci/bt8xx/bt848.h"
#define BT8XXGPIO_NR_GPIOS 24 /* We have 24 GPIO pins */
struct bt8xxgpio {
spinlock_t lock;
void __iomem *mmio;
struct pci_dev *pdev;
struct gpio_chip gpio;
#ifdef CONFIG_PM
u32 saved_outen;
u32 saved_data;
#endif
};
#define bgwrite(dat, adr) writel((dat), bg->mmio+(adr))
#define bgread(adr) readl(bg->mmio+(adr))
static int modparam_gpiobase = -1/* dynamic */;
module_param_named(gpiobase, modparam_gpiobase, int, 0444);
MODULE_PARM_DESC(gpiobase, "The GPIO number base. -1 means dynamic, which is the default.");
static int bt8xxgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
{
struct bt8xxgpio *bg = container_of(gpio, struct bt8xxgpio, gpio);
unsigned long flags;
u32 outen, data;
spin_lock_irqsave(&bg->lock, flags);
data = bgread(BT848_GPIO_DATA);
data &= ~(1 << nr);
bgwrite(data, BT848_GPIO_DATA);
outen = bgread(BT848_GPIO_OUT_EN);
outen &= ~(1 << nr);
bgwrite(outen, BT848_GPIO_OUT_EN);
spin_unlock_irqrestore(&bg->lock, flags);
return 0;
}
static int bt8xxgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
{
struct bt8xxgpio *bg = container_of(gpio, struct bt8xxgpio, gpio);
unsigned long flags;
u32 val;
spin_lock_irqsave(&bg->lock, flags);
val = bgread(BT848_GPIO_DATA);
spin_unlock_irqrestore(&bg->lock, flags);
return !!(val & (1 << nr));
}
static int bt8xxgpio_gpio_direction_output(struct gpio_chip *gpio,
unsigned nr, int val)
{
struct bt8xxgpio *bg = container_of(gpio, struct bt8xxgpio, gpio);
unsigned long flags;
u32 outen, data;
spin_lock_irqsave(&bg->lock, flags);
outen = bgread(BT848_GPIO_OUT_EN);
outen |= (1 << nr);
bgwrite(outen, BT848_GPIO_OUT_EN);
data = bgread(BT848_GPIO_DATA);
if (val)
data |= (1 << nr);
else
data &= ~(1 << nr);
bgwrite(data, BT848_GPIO_DATA);
spin_unlock_irqrestore(&bg->lock, flags);
return 0;
}
static void bt8xxgpio_gpio_set(struct gpio_chip *gpio,
unsigned nr, int val)
{
struct bt8xxgpio *bg = container_of(gpio, struct bt8xxgpio, gpio);
unsigned long flags;
u32 data;
spin_lock_irqsave(&bg->lock, flags);
data = bgread(BT848_GPIO_DATA);
if (val)
data |= (1 << nr);
else
data &= ~(1 << nr);
bgwrite(data, BT848_GPIO_DATA);
spin_unlock_irqrestore(&bg->lock, flags);
}
static void bt8xxgpio_gpio_setup(struct bt8xxgpio *bg)
{
struct gpio_chip *c = &bg->gpio;
c->label = dev_name(&bg->pdev->dev);
c->owner = THIS_MODULE;
c->direction_input = bt8xxgpio_gpio_direction_input;
c->get = bt8xxgpio_gpio_get;
c->direction_output = bt8xxgpio_gpio_direction_output;
c->set = bt8xxgpio_gpio_set;
c->dbg_show = NULL;
c->base = modparam_gpiobase;
c->ngpio = BT8XXGPIO_NR_GPIOS;
c->can_sleep = 0;
}
static int bt8xxgpio_probe(struct pci_dev *dev,
const struct pci_device_id *pci_id)
{
struct bt8xxgpio *bg;
int err;
bg = kzalloc(sizeof(*bg), GFP_KERNEL);
if (!bg)
return -ENOMEM;
bg->pdev = dev;
spin_lock_init(&bg->lock);
err = pci_enable_device(dev);
if (err) {
printk(KERN_ERR "bt8xxgpio: Can't enable device.\n");
goto err_freebg;
}
if (!request_mem_region(pci_resource_start(dev, 0),
pci_resource_len(dev, 0),
"bt8xxgpio")) {
printk(KERN_WARNING "bt8xxgpio: Can't request iomem (0x%llx).\n",
(unsigned long long)pci_resource_start(dev, 0));
err = -EBUSY;
goto err_disable;
}
pci_set_master(dev);
pci_set_drvdata(dev, bg);
bg->mmio = ioremap(pci_resource_start(dev, 0), 0x1000);
if (!bg->mmio) {
printk(KERN_ERR "bt8xxgpio: ioremap() failed\n");
err = -EIO;
goto err_release_mem;
}
/* Disable interrupts */
bgwrite(0, BT848_INT_MASK);
/* gpio init */
bgwrite(0, BT848_GPIO_DMA_CTL);
bgwrite(0, BT848_GPIO_REG_INP);
bgwrite(0, BT848_GPIO_OUT_EN);
bt8xxgpio_gpio_setup(bg);
err = gpiochip_add(&bg->gpio);
if (err) {
printk(KERN_ERR "bt8xxgpio: Failed to register GPIOs\n");
goto err_release_mem;
}
return 0;
err_release_mem:
release_mem_region(pci_resource_start(dev, 0),
pci_resource_len(dev, 0));
pci_set_drvdata(dev, NULL);
err_disable:
pci_disable_device(dev);
err_freebg:
kfree(bg);
return err;
}
static void bt8xxgpio_remove(struct pci_dev *pdev)
{
struct bt8xxgpio *bg = pci_get_drvdata(pdev);
gpiochip_remove(&bg->gpio);
bgwrite(0, BT848_INT_MASK);
bgwrite(~0x0, BT848_INT_STAT);
bgwrite(0x0, BT848_GPIO_OUT_EN);
iounmap(bg->mmio);
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
kfree(bg);
}
#ifdef CONFIG_PM
static int bt8xxgpio_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct bt8xxgpio *bg = pci_get_drvdata(pdev);
unsigned long flags;
spin_lock_irqsave(&bg->lock, flags);
bg->saved_outen = bgread(BT848_GPIO_OUT_EN);
bg->saved_data = bgread(BT848_GPIO_DATA);
bgwrite(0, BT848_INT_MASK);
bgwrite(~0x0, BT848_INT_STAT);
bgwrite(0x0, BT848_GPIO_OUT_EN);
spin_unlock_irqrestore(&bg->lock, flags);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
static int bt8xxgpio_resume(struct pci_dev *pdev)
{
struct bt8xxgpio *bg = pci_get_drvdata(pdev);
unsigned long flags;
int err;
pci_set_power_state(pdev, 0);
err = pci_enable_device(pdev);
if (err)
return err;
pci_restore_state(pdev);
spin_lock_irqsave(&bg->lock, flags);
bgwrite(0, BT848_INT_MASK);
bgwrite(0, BT848_GPIO_DMA_CTL);
bgwrite(0, BT848_GPIO_REG_INP);
bgwrite(bg->saved_outen, BT848_GPIO_OUT_EN);
bgwrite(bg->saved_data & bg->saved_outen,
BT848_GPIO_DATA);
spin_unlock_irqrestore(&bg->lock, flags);
return 0;
}
#else
#define bt8xxgpio_suspend NULL
#define bt8xxgpio_resume NULL
#endif /* CONFIG_PM */
static DEFINE_PCI_DEVICE_TABLE(bt8xxgpio_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT848) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT849) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT878) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT879) },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, bt8xxgpio_pci_tbl);
static struct pci_driver bt8xxgpio_pci_driver = {
.name = "bt8xxgpio",
.id_table = bt8xxgpio_pci_tbl,
.probe = bt8xxgpio_probe,
.remove = bt8xxgpio_remove,
.suspend = bt8xxgpio_suspend,
.resume = bt8xxgpio_resume,
};
module_pci_driver(bt8xxgpio_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Buesch");
MODULE_DESCRIPTION("Abuse a BT8xx framegrabber card as generic GPIO card");
| gpl-2.0 |
shakalaca/ASUS_ZenFone_ZC451CG | linux/kernel/drivers/acpi/acpica/evgpeutil.c | 2299 | 11755 | /******************************************************************************
*
* Module Name: evgpeutil - GPE utilities
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evgpeutil")
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/*******************************************************************************
*
* FUNCTION: acpi_ev_walk_gpe_list
*
* PARAMETERS: gpe_walk_callback - Routine called for each GPE block
* context - Value passed to callback
*
* RETURN: Status
*
* DESCRIPTION: Walk the GPE lists.
*
******************************************************************************/
acpi_status
acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
{
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
acpi_status status = AE_OK;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Walk the interrupt level descriptor list */
gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
while (gpe_xrupt_info) {
/* Walk all Gpe Blocks attached to this interrupt level */
gpe_block = gpe_xrupt_info->gpe_block_list_head;
while (gpe_block) {
/* One callback per GPE block */
status =
gpe_walk_callback(gpe_xrupt_info, gpe_block,
context);
if (ACPI_FAILURE(status)) {
if (status == AE_CTRL_END) { /* Callback abort */
status = AE_OK;
}
goto unlock_and_exit;
}
gpe_block = gpe_block->next;
}
gpe_xrupt_info = gpe_xrupt_info->next;
}
unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_valid_gpe_event
*
* PARAMETERS: gpe_event_info - Info for this GPE
*
* RETURN: TRUE if the gpe_event is valid
*
* DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
* Should be called only when the GPE lists are semaphore locked
* and not subject to change.
*
******************************************************************************/
u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
{
struct acpi_gpe_xrupt_info *gpe_xrupt_block;
struct acpi_gpe_block_info *gpe_block;
ACPI_FUNCTION_ENTRY();
/* No need for spin lock since we are not changing any list elements */
/* Walk the GPE interrupt levels */
gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
while (gpe_xrupt_block) {
gpe_block = gpe_xrupt_block->gpe_block_list_head;
/* Walk the GPE blocks on this interrupt level */
while (gpe_block) {
if ((&gpe_block->event_info[0] <= gpe_event_info) &&
(&gpe_block->event_info[gpe_block->gpe_count] >
gpe_event_info)) {
return (TRUE);
}
gpe_block = gpe_block->next;
}
gpe_xrupt_block = gpe_xrupt_block->next;
}
return (FALSE);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_get_gpe_device
*
* PARAMETERS: GPE_WALK_CALLBACK
*
* RETURN: Status
*
* DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
* block device. NULL if the GPE is one of the FADT-defined GPEs.
*
******************************************************************************/
acpi_status
acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, void *context)
{
struct acpi_gpe_device_info *info = context;
/* Increment Index by the number of GPEs in this block */
info->next_block_base_index += gpe_block->gpe_count;
if (info->index < info->next_block_base_index) {
/*
* The GPE index is within this block, get the node. Leave the node
* NULL for the FADT-defined GPEs
*/
if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
info->gpe_device = gpe_block->node;
}
info->status = AE_OK;
return (AE_CTRL_END);
}
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_get_gpe_xrupt_block
*
* PARAMETERS: interrupt_number - Interrupt for a GPE block
*
* RETURN: A GPE interrupt block
*
* DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
* block per unique interrupt level used for GPEs. Should be
* called only when the GPE lists are semaphore locked and not
* subject to change.
*
******************************************************************************/
struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
{
struct acpi_gpe_xrupt_info *next_gpe_xrupt;
struct acpi_gpe_xrupt_info *gpe_xrupt;
acpi_status status;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
/* No need for lock since we are not changing any list elements here */
next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
while (next_gpe_xrupt) {
if (next_gpe_xrupt->interrupt_number == interrupt_number) {
return_PTR(next_gpe_xrupt);
}
next_gpe_xrupt = next_gpe_xrupt->next;
}
/* Not found, must allocate a new xrupt descriptor */
gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
if (!gpe_xrupt) {
return_PTR(NULL);
}
gpe_xrupt->interrupt_number = interrupt_number;
/* Install new interrupt descriptor with spin lock */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
if (acpi_gbl_gpe_xrupt_list_head) {
next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
while (next_gpe_xrupt->next) {
next_gpe_xrupt = next_gpe_xrupt->next;
}
next_gpe_xrupt->next = gpe_xrupt;
gpe_xrupt->previous = next_gpe_xrupt;
} else {
acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
}
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
/* Install new interrupt handler if not SCI_INT */
if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
status = acpi_os_install_interrupt_handler(interrupt_number,
acpi_ev_gpe_xrupt_handler,
gpe_xrupt);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
"Could not install GPE interrupt handler at level 0x%X",
interrupt_number));
return_PTR(NULL);
}
}
return_PTR(gpe_xrupt);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_delete_gpe_xrupt
*
* PARAMETERS: gpe_xrupt - A GPE interrupt info block
*
* RETURN: Status
*
* DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
* interrupt handler if not the SCI interrupt.
*
******************************************************************************/
acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
{
acpi_status status;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
/* We never want to remove the SCI interrupt handler */
if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
gpe_xrupt->gpe_block_list_head = NULL;
return_ACPI_STATUS(AE_OK);
}
/* Disable this interrupt */
status =
acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
acpi_ev_gpe_xrupt_handler);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Unlink the interrupt block with lock */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
if (gpe_xrupt->previous) {
gpe_xrupt->previous->next = gpe_xrupt->next;
} else {
/* No previous, update list head */
acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
}
if (gpe_xrupt->next) {
gpe_xrupt->next->previous = gpe_xrupt->previous;
}
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
/* Free the block */
ACPI_FREE(gpe_xrupt);
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_delete_gpe_handlers
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
*
* RETURN: Status
*
* DESCRIPTION: Delete all Handler objects found in the GPE data structs.
* Used only prior to termination.
*
******************************************************************************/
acpi_status
acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
void *context)
{
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_gpe_notify_info *notify;
struct acpi_gpe_notify_info *next;
u32 i;
u32 j;
ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
/* Now look at the individual GPEs in this byte register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
gpe_event_info = &gpe_block->event_info[((acpi_size) i *
ACPI_GPE_REGISTER_WIDTH)
+ j];
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_HANDLER) {
/* Delete an installed handler block */
ACPI_FREE(gpe_event_info->dispatch.handler);
gpe_event_info->dispatch.handler = NULL;
gpe_event_info->flags &=
~ACPI_GPE_DISPATCH_MASK;
} else
if ((gpe_event_info->
flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_NOTIFY) {
/* Delete the implicit notification device list */
notify = gpe_event_info->dispatch.notify_list;
while (notify) {
next = notify->next;
ACPI_FREE(notify);
notify = next;
}
gpe_event_info->dispatch.notify_list = NULL;
gpe_event_info->flags &=
~ACPI_GPE_DISPATCH_MASK;
}
}
}
return_ACPI_STATUS(AE_OK);
}
#endif /* !ACPI_REDUCED_HARDWARE */
| gpl-2.0 |
cm-a7lte/kernel_samsung_a7lte | drivers/acpi/acpica/utresrc.c | 2299 | 21565 | /*******************************************************************************
*
* Module Name: utresrc - Resource management utilities
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acresrc.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utresrc")
#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
/*
* Strings used to decode resource descriptors.
* Used by both the disassembler and the debugger resource dump routines
*/
const char *acpi_gbl_bm_decode[] = {
"NotBusMaster",
"BusMaster"
};
const char *acpi_gbl_config_decode[] = {
"0 - Good Configuration",
"1 - Acceptable Configuration",
"2 - Suboptimal Configuration",
"3 - ***Invalid Configuration***",
};
const char *acpi_gbl_consume_decode[] = {
"ResourceProducer",
"ResourceConsumer"
};
const char *acpi_gbl_dec_decode[] = {
"PosDecode",
"SubDecode"
};
const char *acpi_gbl_he_decode[] = {
"Level",
"Edge"
};
const char *acpi_gbl_io_decode[] = {
"Decode10",
"Decode16"
};
const char *acpi_gbl_ll_decode[] = {
"ActiveHigh",
"ActiveLow"
};
const char *acpi_gbl_max_decode[] = {
"MaxNotFixed",
"MaxFixed"
};
const char *acpi_gbl_mem_decode[] = {
"NonCacheable",
"Cacheable",
"WriteCombining",
"Prefetchable"
};
const char *acpi_gbl_min_decode[] = {
"MinNotFixed",
"MinFixed"
};
const char *acpi_gbl_mtp_decode[] = {
"AddressRangeMemory",
"AddressRangeReserved",
"AddressRangeACPI",
"AddressRangeNVS"
};
const char *acpi_gbl_rng_decode[] = {
"InvalidRanges",
"NonISAOnlyRanges",
"ISAOnlyRanges",
"EntireRange"
};
const char *acpi_gbl_rw_decode[] = {
"ReadOnly",
"ReadWrite"
};
const char *acpi_gbl_shr_decode[] = {
"Exclusive",
"Shared",
"ExclusiveAndWake", /* ACPI 5.0 */
"SharedAndWake" /* ACPI 5.0 */
};
const char *acpi_gbl_siz_decode[] = {
"Transfer8",
"Transfer8_16",
"Transfer16",
"InvalidSize"
};
const char *acpi_gbl_trs_decode[] = {
"DenseTranslation",
"SparseTranslation"
};
const char *acpi_gbl_ttp_decode[] = {
"TypeStatic",
"TypeTranslation"
};
const char *acpi_gbl_typ_decode[] = {
"Compatibility",
"TypeA",
"TypeB",
"TypeF"
};
const char *acpi_gbl_ppc_decode[] = {
"PullDefault",
"PullUp",
"PullDown",
"PullNone"
};
const char *acpi_gbl_ior_decode[] = {
"IoRestrictionNone",
"IoRestrictionInputOnly",
"IoRestrictionOutputOnly",
"IoRestrictionNoneAndPreserve"
};
const char *acpi_gbl_dts_decode[] = {
"Width8bit",
"Width16bit",
"Width32bit",
"Width64bit",
"Width128bit",
"Width256bit",
};
/* GPIO connection type */
const char *acpi_gbl_ct_decode[] = {
"Interrupt",
"I/O"
};
/* Serial bus type */
const char *acpi_gbl_sbt_decode[] = {
"/* UNKNOWN serial bus type */",
"I2C",
"SPI",
"UART"
};
/* I2C serial bus access mode */
const char *acpi_gbl_am_decode[] = {
"AddressingMode7Bit",
"AddressingMode10Bit"
};
/* I2C serial bus slave mode */
const char *acpi_gbl_sm_decode[] = {
"ControllerInitiated",
"DeviceInitiated"
};
/* SPI serial bus wire mode */
const char *acpi_gbl_wm_decode[] = {
"FourWireMode",
"ThreeWireMode"
};
/* SPI serial clock phase */
const char *acpi_gbl_cph_decode[] = {
"ClockPhaseFirst",
"ClockPhaseSecond"
};
/* SPI serial bus clock polarity */
const char *acpi_gbl_cpo_decode[] = {
"ClockPolarityLow",
"ClockPolarityHigh"
};
/* SPI serial bus device polarity */
const char *acpi_gbl_dp_decode[] = {
"PolarityLow",
"PolarityHigh"
};
/* UART serial bus endian */
const char *acpi_gbl_ed_decode[] = {
"LittleEndian",
"BigEndian"
};
/* UART serial bus bits per byte */
const char *acpi_gbl_bpb_decode[] = {
"DataBitsFive",
"DataBitsSix",
"DataBitsSeven",
"DataBitsEight",
"DataBitsNine",
"/* UNKNOWN Bits per byte */",
"/* UNKNOWN Bits per byte */",
"/* UNKNOWN Bits per byte */"
};
/* UART serial bus stop bits */
const char *acpi_gbl_sb_decode[] = {
"StopBitsNone",
"StopBitsOne",
"StopBitsOnePlusHalf",
"StopBitsTwo"
};
/* UART serial bus flow control */
const char *acpi_gbl_fc_decode[] = {
"FlowControlNone",
"FlowControlHardware",
"FlowControlXON",
"/* UNKNOWN flow control keyword */"
};
/* UART serial bus parity type */
const char *acpi_gbl_pt_decode[] = {
"ParityTypeNone",
"ParityTypeEven",
"ParityTypeOdd",
"ParityTypeMark",
"ParityTypeSpace",
"/* UNKNOWN parity keyword */",
"/* UNKNOWN parity keyword */",
"/* UNKNOWN parity keyword */"
};
#endif
/*
* Base sizes of the raw AML resource descriptors, indexed by resource type.
* Zero indicates a reserved (and therefore invalid) resource type.
*/
const u8 acpi_gbl_resource_aml_sizes[] = {
/* Small descriptors */
0,
0,
0,
0,
ACPI_AML_SIZE_SMALL(struct aml_resource_irq),
ACPI_AML_SIZE_SMALL(struct aml_resource_dma),
ACPI_AML_SIZE_SMALL(struct aml_resource_start_dependent),
ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent),
ACPI_AML_SIZE_SMALL(struct aml_resource_io),
ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io),
ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_dma),
0,
0,
0,
ACPI_AML_SIZE_SMALL(struct aml_resource_vendor_small),
ACPI_AML_SIZE_SMALL(struct aml_resource_end_tag),
/* Large descriptors */
0,
ACPI_AML_SIZE_LARGE(struct aml_resource_memory24),
ACPI_AML_SIZE_LARGE(struct aml_resource_generic_register),
0,
ACPI_AML_SIZE_LARGE(struct aml_resource_vendor_large),
ACPI_AML_SIZE_LARGE(struct aml_resource_memory32),
ACPI_AML_SIZE_LARGE(struct aml_resource_fixed_memory32),
ACPI_AML_SIZE_LARGE(struct aml_resource_address32),
ACPI_AML_SIZE_LARGE(struct aml_resource_address16),
ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq),
ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64),
ACPI_AML_SIZE_LARGE(struct aml_resource_gpio),
0,
ACPI_AML_SIZE_LARGE(struct aml_resource_common_serialbus),
};
const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = {
0,
ACPI_AML_SIZE_LARGE(struct aml_resource_i2c_serialbus),
ACPI_AML_SIZE_LARGE(struct aml_resource_spi_serialbus),
ACPI_AML_SIZE_LARGE(struct aml_resource_uart_serialbus),
};
/*
* Resource types, used to validate the resource length field.
* The length of fixed-length types must match exactly, variable
* lengths must meet the minimum required length, etc.
* Zero indicates a reserved (and therefore invalid) resource type.
*/
static const u8 acpi_gbl_resource_types[] = {
/* Small descriptors */
0,
0,
0,
0,
ACPI_SMALL_VARIABLE_LENGTH, /* 04 IRQ */
ACPI_FIXED_LENGTH, /* 05 DMA */
ACPI_SMALL_VARIABLE_LENGTH, /* 06 start_dependent_functions */
ACPI_FIXED_LENGTH, /* 07 end_dependent_functions */
ACPI_FIXED_LENGTH, /* 08 IO */
ACPI_FIXED_LENGTH, /* 09 fixed_IO */
ACPI_FIXED_LENGTH, /* 0A fixed_DMA */
0,
0,
0,
ACPI_VARIABLE_LENGTH, /* 0E vendor_short */
ACPI_FIXED_LENGTH, /* 0F end_tag */
/* Large descriptors */
0,
ACPI_FIXED_LENGTH, /* 01 Memory24 */
ACPI_FIXED_LENGTH, /* 02 generic_register */
0,
ACPI_VARIABLE_LENGTH, /* 04 vendor_long */
ACPI_FIXED_LENGTH, /* 05 Memory32 */
ACPI_FIXED_LENGTH, /* 06 memory32_fixed */
ACPI_VARIABLE_LENGTH, /* 07 Dword* address */
ACPI_VARIABLE_LENGTH, /* 08 Word* address */
ACPI_VARIABLE_LENGTH, /* 09 extended_IRQ */
ACPI_VARIABLE_LENGTH, /* 0A Qword* address */
ACPI_FIXED_LENGTH, /* 0B Extended* address */
ACPI_VARIABLE_LENGTH, /* 0C Gpio* */
0,
ACPI_VARIABLE_LENGTH /* 0E *serial_bus */
};
/*******************************************************************************
*
* FUNCTION: acpi_ut_walk_aml_resources
*
* PARAMETERS: walk_state - Current walk info
* PARAMETERS: aml - Pointer to the raw AML resource template
* aml_length - Length of the entire template
* user_function - Called once for each descriptor found. If
* NULL, a pointer to the end_tag is returned
* context - Passed to user_function
*
* RETURN: Status
*
* DESCRIPTION: Walk a raw AML resource list(buffer). User function called
* once for each resource found.
*
******************************************************************************/
acpi_status
acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
u8 *aml,
acpi_size aml_length,
acpi_walk_aml_callback user_function, void **context)
{
acpi_status status;
u8 *end_aml;
u8 resource_index;
u32 length;
u32 offset = 0;
u8 end_tag[2] = { 0x79, 0x00 };
ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
/* The absolute minimum resource template is one end_tag descriptor */
if (aml_length < sizeof(struct aml_resource_end_tag)) {
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/* Point to the end of the resource template buffer */
end_aml = aml + aml_length;
/* Walk the byte list, abort on any invalid descriptor type or length */
while (aml < end_aml) {
/* Validate the Resource Type and Resource Length */
status =
acpi_ut_validate_resource(walk_state, aml, &resource_index);
if (ACPI_FAILURE(status)) {
/*
* Exit on failure. Cannot continue because the descriptor length
* may be bogus also.
*/
return_ACPI_STATUS(status);
}
/* Get the length of this descriptor */
length = acpi_ut_get_descriptor_length(aml);
/* Invoke the user function */
if (user_function) {
status =
user_function(aml, length, offset, resource_index,
context);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/* An end_tag descriptor terminates this resource template */
if (acpi_ut_get_resource_type(aml) ==
ACPI_RESOURCE_NAME_END_TAG) {
/*
* There must be at least one more byte in the buffer for
* the 2nd byte of the end_tag
*/
if ((aml + 1) >= end_aml) {
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/* Return the pointer to the end_tag if requested */
if (!user_function) {
*context = aml;
}
/* Normal exit */
return_ACPI_STATUS(AE_OK);
}
aml += length;
offset += length;
}
/* Did not find an end_tag descriptor */
if (user_function) {
/* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */
(void)acpi_ut_validate_resource(walk_state, end_tag,
&resource_index);
status =
user_function(end_tag, 2, offset, resource_index, context);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_validate_resource
*
* PARAMETERS: walk_state - Current walk info
* aml - Pointer to the raw AML resource descriptor
* return_index - Where the resource index is returned. NULL
* if the index is not required.
*
* RETURN: Status, and optionally the Index into the global resource tables
*
* DESCRIPTION: Validate an AML resource descriptor by checking the Resource
* Type and Resource Length. Returns an index into the global
* resource information/dispatch tables for later use.
*
******************************************************************************/
acpi_status
acpi_ut_validate_resource(struct acpi_walk_state *walk_state,
void *aml, u8 *return_index)
{
union aml_resource *aml_resource;
u8 resource_type;
u8 resource_index;
acpi_rs_length resource_length;
acpi_rs_length minimum_resource_length;
ACPI_FUNCTION_ENTRY();
/*
* 1) Validate the resource_type field (Byte 0)
*/
resource_type = ACPI_GET8(aml);
/*
* Byte 0 contains the descriptor name (Resource Type)
* Examine the large/small bit in the resource header
*/
if (resource_type & ACPI_RESOURCE_NAME_LARGE) {
/* Verify the large resource type (name) against the max */
if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
goto invalid_resource;
}
/*
* Large Resource Type -- bits 6:0 contain the name
* Translate range 0x80-0x8B to index range 0x10-0x1B
*/
resource_index = (u8) (resource_type - 0x70);
} else {
/*
* Small Resource Type -- bits 6:3 contain the name
* Shift range to index range 0x00-0x0F
*/
resource_index = (u8)
((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3);
}
/*
* Check validity of the resource type, via acpi_gbl_resource_types. Zero
* indicates an invalid resource.
*/
if (!acpi_gbl_resource_types[resource_index]) {
goto invalid_resource;
}
/*
* Validate the resource_length field. This ensures that the length
* is at least reasonable, and guarantees that it is non-zero.
*/
resource_length = acpi_ut_get_resource_length(aml);
minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index];
/* Validate based upon the type of resource - fixed length or variable */
switch (acpi_gbl_resource_types[resource_index]) {
case ACPI_FIXED_LENGTH:
/* Fixed length resource, length must match exactly */
if (resource_length != minimum_resource_length) {
goto bad_resource_length;
}
break;
case ACPI_VARIABLE_LENGTH:
/* Variable length resource, length must be at least the minimum */
if (resource_length < minimum_resource_length) {
goto bad_resource_length;
}
break;
case ACPI_SMALL_VARIABLE_LENGTH:
/* Small variable length resource, length can be (Min) or (Min-1) */
if ((resource_length > minimum_resource_length) ||
(resource_length < (minimum_resource_length - 1))) {
goto bad_resource_length;
}
break;
default:
/* Shouldn't happen (because of validation earlier), but be sure */
goto invalid_resource;
}
aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
/* Validate the bus_type field */
if ((aml_resource->common_serial_bus.type == 0) ||
(aml_resource->common_serial_bus.type >
AML_RESOURCE_MAX_SERIALBUSTYPE)) {
if (walk_state) {
ACPI_ERROR((AE_INFO,
"Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
aml_resource->common_serial_bus.
type));
}
return (AE_AML_INVALID_RESOURCE_TYPE);
}
}
/* Optionally return the resource table index */
if (return_index) {
*return_index = resource_index;
}
return (AE_OK);
invalid_resource:
if (walk_state) {
ACPI_ERROR((AE_INFO,
"Invalid/unsupported resource descriptor: Type 0x%2.2X",
resource_type));
}
return (AE_AML_INVALID_RESOURCE_TYPE);
bad_resource_length:
if (walk_state) {
ACPI_ERROR((AE_INFO,
"Invalid resource descriptor length: Type "
"0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
resource_type, resource_length,
minimum_resource_length));
}
return (AE_AML_BAD_RESOURCE_LENGTH);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_resource_type
*
* PARAMETERS: aml - Pointer to the raw AML resource descriptor
*
* RETURN: The Resource Type with no extraneous bits (except the
* Large/Small descriptor bit -- this is left alone)
*
* DESCRIPTION: Extract the Resource Type/Name from the first byte of
* a resource descriptor.
*
******************************************************************************/
u8 acpi_ut_get_resource_type(void *aml)
{
ACPI_FUNCTION_ENTRY();
/*
* Byte 0 contains the descriptor name (Resource Type)
* Examine the large/small bit in the resource header
*/
if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
/* Large Resource Type -- bits 6:0 contain the name */
return (ACPI_GET8(aml));
} else {
/* Small Resource Type -- bits 6:3 contain the name */
return ((u8) (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_SMALL_MASK));
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_resource_length
*
* PARAMETERS: aml - Pointer to the raw AML resource descriptor
*
* RETURN: Byte Length
*
* DESCRIPTION: Get the "Resource Length" of a raw AML descriptor. By
* definition, this does not include the size of the descriptor
* header or the length field itself.
*
******************************************************************************/
u16 acpi_ut_get_resource_length(void *aml)
{
acpi_rs_length resource_length;
ACPI_FUNCTION_ENTRY();
/*
* Byte 0 contains the descriptor name (Resource Type)
* Examine the large/small bit in the resource header
*/
if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
/* Large Resource type -- bytes 1-2 contain the 16-bit length */
ACPI_MOVE_16_TO_16(&resource_length, ACPI_ADD_PTR(u8, aml, 1));
} else {
/* Small Resource type -- bits 2:0 of byte 0 contain the length */
resource_length = (u16) (ACPI_GET8(aml) &
ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK);
}
return (resource_length);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_resource_header_length
*
* PARAMETERS: aml - Pointer to the raw AML resource descriptor
*
* RETURN: Length of the AML header (depends on large/small descriptor)
*
* DESCRIPTION: Get the length of the header for this resource.
*
******************************************************************************/
u8 acpi_ut_get_resource_header_length(void *aml)
{
ACPI_FUNCTION_ENTRY();
/* Examine the large/small bit in the resource header */
if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
return (sizeof(struct aml_resource_large_header));
} else {
return (sizeof(struct aml_resource_small_header));
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_descriptor_length
*
* PARAMETERS: aml - Pointer to the raw AML resource descriptor
*
* RETURN: Byte length
*
* DESCRIPTION: Get the total byte length of a raw AML descriptor, including the
* length of the descriptor header and the length field itself.
* Used to walk descriptor lists.
*
******************************************************************************/
u32 acpi_ut_get_descriptor_length(void *aml)
{
ACPI_FUNCTION_ENTRY();
/*
* Get the Resource Length (does not include header length) and add
* the header length (depends on if this is a small or large resource)
*/
return (acpi_ut_get_resource_length(aml) +
acpi_ut_get_resource_header_length(aml));
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_resource_end_tag
*
* PARAMETERS: obj_desc - The resource template buffer object
* end_tag - Where the pointer to the end_tag is returned
*
* RETURN: Status, pointer to the end tag
*
* DESCRIPTION: Find the end_tag resource descriptor in an AML resource template
* Note: allows a buffer length of zero.
*
******************************************************************************/
acpi_status
acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ut_get_resource_end_tag);
/* Allow a buffer length of zero */
if (!obj_desc->buffer.length) {
*end_tag = obj_desc->buffer.pointer;
return_ACPI_STATUS(AE_OK);
}
/* Validate the template and get a pointer to the end_tag */
status = acpi_ut_walk_aml_resources(NULL, obj_desc->buffer.pointer,
obj_desc->buffer.length, NULL,
(void **)end_tag);
return_ACPI_STATUS(status);
}
| gpl-2.0 |
JackpotClavin/android_kernel_samsung_venturi | drivers/media/video/uvc/uvc_isight.c | 3323 | 3953 | /*
* uvc_isight.c -- USB Video Class driver - iSight support
*
* Copyright (C) 2006-2007
* Ivan N. Zlatev <contact@i-nz.net>
* Copyright (C) 2008-2009
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/usb.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include "uvcvideo.h"
/* Built-in iSight webcams implements most of UVC 1.0 except a
* different packet format. Instead of sending a header at the
* beginning of each isochronous transfer payload, the webcam sends a
* single header per image (on its own in a packet), followed by
* packets containing data only.
*
* Offset Size (bytes) Description
* ------------------------------------------------------------------
* 0x00 1 Header length
* 0x01 1 Flags (UVC-compliant)
* 0x02 4 Always equal to '11223344'
* 0x06 8 Always equal to 'deadbeefdeadface'
* 0x0e 16 Unknown
*
* The header can be prefixed by an optional, unknown-purpose byte.
*/
static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf,
const __u8 *data, unsigned int len)
{
static const __u8 hdr[] = {
0x11, 0x22, 0x33, 0x44,
0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xfa, 0xce
};
unsigned int maxlen, nbytes;
__u8 *mem;
int is_header = 0;
if (buf == NULL)
return 0;
if ((len >= 14 && memcmp(&data[2], hdr, 12) == 0) ||
(len >= 15 && memcmp(&data[3], hdr, 12) == 0)) {
uvc_trace(UVC_TRACE_FRAME, "iSight header found\n");
is_header = 1;
}
/* Synchronize to the input stream by waiting for a header packet. */
if (buf->state != UVC_BUF_STATE_ACTIVE) {
if (!is_header) {
uvc_trace(UVC_TRACE_FRAME, "Dropping packet (out of "
"sync).\n");
return 0;
}
buf->state = UVC_BUF_STATE_ACTIVE;
}
/* Mark the buffer as done if we're at the beginning of a new frame.
*
* Empty buffers (bytesused == 0) don't trigger end of frame detection
* as it doesn't make sense to return an empty buffer.
*/
if (is_header && buf->buf.bytesused != 0) {
buf->state = UVC_BUF_STATE_DONE;
return -EAGAIN;
}
/* Copy the video data to the buffer. Skip header packets, as they
* contain no data.
*/
if (!is_header) {
maxlen = buf->buf.length - buf->buf.bytesused;
mem = queue->mem + buf->buf.m.offset + buf->buf.bytesused;
nbytes = min(len, maxlen);
memcpy(mem, data, nbytes);
buf->buf.bytesused += nbytes;
if (len > maxlen || buf->buf.bytesused == buf->buf.length) {
uvc_trace(UVC_TRACE_FRAME, "Frame complete "
"(overflow).\n");
buf->state = UVC_BUF_STATE_DONE;
}
}
return 0;
}
void uvc_video_decode_isight(struct urb *urb, struct uvc_streaming *stream,
struct uvc_buffer *buf)
{
int ret, i;
for (i = 0; i < urb->number_of_packets; ++i) {
if (urb->iso_frame_desc[i].status < 0) {
uvc_trace(UVC_TRACE_FRAME, "USB isochronous frame "
"lost (%d).\n",
urb->iso_frame_desc[i].status);
}
/* Decode the payload packet.
* uvc_video_decode is entered twice when a frame transition
* has been detected because the end of frame can only be
* reliably detected when the first packet of the new frame
* is processed. The first pass detects the transition and
* closes the previous frame's buffer, the second pass
* processes the data of the first payload of the new frame.
*/
do {
ret = isight_decode(&stream->queue, buf,
urb->transfer_buffer +
urb->iso_frame_desc[i].offset,
urb->iso_frame_desc[i].actual_length);
if (buf == NULL)
break;
if (buf->state == UVC_BUF_STATE_DONE ||
buf->state == UVC_BUF_STATE_ERROR)
buf = uvc_queue_next_buffer(&stream->queue,
buf);
} while (ret == -EAGAIN);
}
}
| gpl-2.0 |
Evervolv/android_kernel_oppo_msm8974 | drivers/connector/cn_proc.c | 4091 | 9716 | /*
* cn_proc.c - process events connector
*
* Copyright (C) Matt Helsley, IBM Corp. 2005
* Based on cn_fork.c by Guillaume Thouvenin <guillaume.thouvenin@bull.net>
* Original copyright notice follows:
* Copyright (C) 2005 BULL SA.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/init.h>
#include <linux/connector.h>
#include <linux/gfp.h>
#include <linux/ptrace.h>
#include <linux/atomic.h>
#include <asm/unaligned.h>
#include <linux/cn_proc.h>
#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event))
static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
/* proc_event_counts is used as the sequence number of the netlink message */
static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
static inline void get_seq(__u32 *ts, int *cpu)
{
preempt_disable();
*ts = __this_cpu_inc_return(proc_event_counts) -1;
*cpu = smp_processor_id();
preempt_enable();
}
void proc_fork_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE];
struct timespec ts;
struct task_struct *parent;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = (struct cn_msg*)buffer;
ev = (struct proc_event*)msg->data;
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
ev->what = PROC_EVENT_FORK;
rcu_read_lock();
parent = rcu_dereference(task->real_parent);
ev->event_data.fork.parent_pid = parent->pid;
ev->event_data.fork.parent_tgid = parent->tgid;
rcu_read_unlock();
ev->event_data.fork.child_pid = task->pid;
ev->event_data.fork.child_tgid = task->tgid;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
/* If cn_netlink_send() failed, the data is not sent */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
void proc_exec_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
struct timespec ts;
__u8 buffer[CN_PROC_MSG_SIZE];
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = (struct cn_msg*)buffer;
ev = (struct proc_event*)msg->data;
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
ev->what = PROC_EVENT_EXEC;
ev->event_data.exec.process_pid = task->pid;
ev->event_data.exec.process_tgid = task->tgid;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
void proc_id_connector(struct task_struct *task, int which_id)
{
struct cn_msg *msg;
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE];
struct timespec ts;
const struct cred *cred;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = (struct cn_msg*)buffer;
ev = (struct proc_event*)msg->data;
ev->what = which_id;
ev->event_data.id.process_pid = task->pid;
ev->event_data.id.process_tgid = task->tgid;
rcu_read_lock();
cred = __task_cred(task);
if (which_id == PROC_EVENT_UID) {
ev->event_data.id.r.ruid = cred->uid;
ev->event_data.id.e.euid = cred->euid;
} else if (which_id == PROC_EVENT_GID) {
ev->event_data.id.r.rgid = cred->gid;
ev->event_data.id.e.egid = cred->egid;
} else {
rcu_read_unlock();
return;
}
rcu_read_unlock();
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
void proc_sid_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
struct timespec ts;
__u8 buffer[CN_PROC_MSG_SIZE];
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
ev->what = PROC_EVENT_SID;
ev->event_data.sid.process_pid = task->pid;
ev->event_data.sid.process_tgid = task->tgid;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
{
struct cn_msg *msg;
struct proc_event *ev;
struct timespec ts;
__u8 buffer[CN_PROC_MSG_SIZE];
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
ev->what = PROC_EVENT_PTRACE;
ev->event_data.ptrace.process_pid = task->pid;
ev->event_data.ptrace.process_tgid = task->tgid;
if (ptrace_id == PTRACE_ATTACH) {
ev->event_data.ptrace.tracer_pid = current->pid;
ev->event_data.ptrace.tracer_tgid = current->tgid;
} else if (ptrace_id == PTRACE_DETACH) {
ev->event_data.ptrace.tracer_pid = 0;
ev->event_data.ptrace.tracer_tgid = 0;
} else
return;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
void proc_comm_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
struct timespec ts;
__u8 buffer[CN_PROC_MSG_SIZE];
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = (struct cn_msg *)buffer;
ev = (struct proc_event *)msg->data;
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
ev->what = PROC_EVENT_COMM;
ev->event_data.comm.process_pid = task->pid;
ev->event_data.comm.process_tgid = task->tgid;
get_task_comm(ev->event_data.comm.comm, task);
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
void proc_exit_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE];
struct timespec ts;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = (struct cn_msg*)buffer;
ev = (struct proc_event*)msg->data;
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
ev->what = PROC_EVENT_EXIT;
ev->event_data.exit.process_pid = task->pid;
ev->event_data.exit.process_tgid = task->tgid;
ev->event_data.exit.exit_code = task->exit_code;
ev->event_data.exit.exit_signal = task->exit_signal;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
/*
* Send an acknowledgement message to userspace
*
* Use 0 for success, EFOO otherwise.
* Note: this is the negative of conventional kernel error
* values because it's not being returned via syscall return
* mechanisms.
*/
static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
{
struct cn_msg *msg;
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE];
struct timespec ts;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
msg = (struct cn_msg*)buffer;
ev = (struct proc_event*)msg->data;
msg->seq = rcvd_seq;
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
ev->cpu = -1;
ev->what = PROC_EVENT_NONE;
ev->event_data.ack.err = err;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = rcvd_ack + 1;
msg->len = sizeof(*ev);
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
/**
* cn_proc_mcast_ctl
* @data: message sent from userspace via the connector
*/
static void cn_proc_mcast_ctl(struct cn_msg *msg,
struct netlink_skb_parms *nsp)
{
enum proc_cn_mcast_op *mc_op = NULL;
int err = 0;
if (msg->len != sizeof(*mc_op))
return;
mc_op = (enum proc_cn_mcast_op*)msg->data;
switch (*mc_op) {
case PROC_CN_MCAST_LISTEN:
atomic_inc(&proc_event_num_listeners);
break;
case PROC_CN_MCAST_IGNORE:
atomic_dec(&proc_event_num_listeners);
break;
default:
err = EINVAL;
break;
}
cn_proc_ack(err, msg->seq, msg->ack);
}
/*
* cn_proc_init - initialization entry point
*
* Adds the connector callback to the connector driver.
*/
static int __init cn_proc_init(void)
{
int err;
if ((err = cn_add_callback(&cn_proc_event_id, "cn_proc",
&cn_proc_mcast_ctl))) {
printk(KERN_WARNING "cn_proc failed to register\n");
return err;
}
return 0;
}
module_init(cn_proc_init);
| gpl-2.0 |
DirtyUnicorns/android_kernel_nvidia_shieldtablet | drivers/video/omap2/omapfb/omapfb-sysfs.c | 4347 | 12283 | /*
* linux/drivers/video/omap2/omapfb-sysfs.c
*
* Copyright (C) 2008 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/fb.h>
#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/omapfb.h>
#include <video/omapdss.h>
#include <video/omapvrfb.h>
#include "omapfb.h"
static ssize_t show_rotate_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->rotation_type);
}
static ssize_t store_rotate_type(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_mem_region *rg;
int rot_type;
int r;
r = kstrtoint(buf, 0, &rot_type);
if (r)
return r;
if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB)
return -EINVAL;
if (!lock_fb_info(fbi))
return -ENODEV;
r = 0;
if (rot_type == ofbi->rotation_type)
goto out;
rg = omapfb_get_mem_region(ofbi->region);
if (rg->size) {
r = -EBUSY;
goto put_region;
}
ofbi->rotation_type = rot_type;
/*
* Since the VRAM for this FB is not allocated at the moment we don't
* need to do any further parameter checking at this point.
*/
put_region:
omapfb_put_mem_region(rg);
out:
unlock_fb_info(fbi);
return r ? r : count;
}
static ssize_t show_mirror(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->mirror);
}
static ssize_t store_mirror(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
bool mirror;
int r;
struct fb_var_screeninfo new_var;
r = strtobool(buf, &mirror);
if (r)
return r;
if (!lock_fb_info(fbi))
return -ENODEV;
ofbi->mirror = mirror;
omapfb_get_mem_region(ofbi->region);
memcpy(&new_var, &fbi->var, sizeof(new_var));
r = check_fb_var(fbi, &new_var);
if (r)
goto out;
memcpy(&fbi->var, &new_var, sizeof(fbi->var));
set_fb_fix(fbi);
r = omapfb_apply_changes(fbi, 0);
if (r)
goto out;
r = count;
out:
omapfb_put_mem_region(ofbi->region);
unlock_fb_info(fbi);
return r;
}
static ssize_t show_overlays(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
ssize_t l = 0;
int t;
if (!lock_fb_info(fbi))
return -ENODEV;
omapfb_lock(fbdev);
for (t = 0; t < ofbi->num_overlays; t++) {
struct omap_overlay *ovl = ofbi->overlays[t];
int ovlnum;
for (ovlnum = 0; ovlnum < fbdev->num_overlays; ++ovlnum)
if (ovl == fbdev->overlays[ovlnum])
break;
l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
t == 0 ? "" : ",", ovlnum);
}
l += snprintf(buf + l, PAGE_SIZE - l, "\n");
omapfb_unlock(fbdev);
unlock_fb_info(fbi);
return l;
}
static struct omapfb_info *get_overlay_fb(struct omapfb2_device *fbdev,
struct omap_overlay *ovl)
{
int i, t;
for (i = 0; i < fbdev->num_fbs; i++) {
struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
for (t = 0; t < ofbi->num_overlays; t++) {
if (ofbi->overlays[t] == ovl)
return ofbi;
}
}
return NULL;
}
static ssize_t store_overlays(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omap_overlay *ovls[OMAPFB_MAX_OVL_PER_FB];
struct omap_overlay *ovl;
int num_ovls, r, i;
int len;
bool added = false;
num_ovls = 0;
len = strlen(buf);
if (buf[len - 1] == '\n')
len = len - 1;
if (!lock_fb_info(fbi))
return -ENODEV;
omapfb_lock(fbdev);
if (len > 0) {
char *p = (char *)buf;
int ovlnum;
while (p < buf + len) {
int found;
if (num_ovls == OMAPFB_MAX_OVL_PER_FB) {
r = -EINVAL;
goto out;
}
ovlnum = simple_strtoul(p, &p, 0);
if (ovlnum > fbdev->num_overlays) {
r = -EINVAL;
goto out;
}
found = 0;
for (i = 0; i < num_ovls; ++i) {
if (ovls[i] == fbdev->overlays[ovlnum]) {
found = 1;
break;
}
}
if (!found)
ovls[num_ovls++] = fbdev->overlays[ovlnum];
p++;
}
}
for (i = 0; i < num_ovls; ++i) {
struct omapfb_info *ofbi2 = get_overlay_fb(fbdev, ovls[i]);
if (ofbi2 && ofbi2 != ofbi) {
dev_err(fbdev->dev, "overlay already in use\n");
r = -EINVAL;
goto out;
}
}
/* detach unused overlays */
for (i = 0; i < ofbi->num_overlays; ++i) {
int t, found;
ovl = ofbi->overlays[i];
found = 0;
for (t = 0; t < num_ovls; ++t) {
if (ovl == ovls[t]) {
found = 1;
break;
}
}
if (found)
continue;
DBG("detaching %d\n", ofbi->overlays[i]->id);
omapfb_get_mem_region(ofbi->region);
omapfb_overlay_enable(ovl, 0);
if (ovl->manager)
ovl->manager->apply(ovl->manager);
omapfb_put_mem_region(ofbi->region);
for (t = i + 1; t < ofbi->num_overlays; t++) {
ofbi->rotation[t-1] = ofbi->rotation[t];
ofbi->overlays[t-1] = ofbi->overlays[t];
}
ofbi->num_overlays--;
i--;
}
for (i = 0; i < num_ovls; ++i) {
int t, found;
ovl = ovls[i];
found = 0;
for (t = 0; t < ofbi->num_overlays; ++t) {
if (ovl == ofbi->overlays[t]) {
found = 1;
break;
}
}
if (found)
continue;
ofbi->rotation[ofbi->num_overlays] = 0;
ofbi->overlays[ofbi->num_overlays++] = ovl;
added = true;
}
if (added) {
omapfb_get_mem_region(ofbi->region);
r = omapfb_apply_changes(fbi, 0);
omapfb_put_mem_region(ofbi->region);
if (r)
goto out;
}
r = count;
out:
omapfb_unlock(fbdev);
unlock_fb_info(fbi);
return r;
}
static ssize_t show_overlays_rotate(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
ssize_t l = 0;
int t;
if (!lock_fb_info(fbi))
return -ENODEV;
for (t = 0; t < ofbi->num_overlays; t++) {
l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
t == 0 ? "" : ",", ofbi->rotation[t]);
}
l += snprintf(buf + l, PAGE_SIZE - l, "\n");
unlock_fb_info(fbi);
return l;
}
static ssize_t store_overlays_rotate(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
int num_ovls = 0, r, i;
int len;
bool changed = false;
u8 rotation[OMAPFB_MAX_OVL_PER_FB];
len = strlen(buf);
if (buf[len - 1] == '\n')
len = len - 1;
if (!lock_fb_info(fbi))
return -ENODEV;
if (len > 0) {
char *p = (char *)buf;
while (p < buf + len) {
int rot;
if (num_ovls == ofbi->num_overlays) {
r = -EINVAL;
goto out;
}
rot = simple_strtoul(p, &p, 0);
if (rot < 0 || rot > 3) {
r = -EINVAL;
goto out;
}
if (ofbi->rotation[num_ovls] != rot)
changed = true;
rotation[num_ovls++] = rot;
p++;
}
}
if (num_ovls != ofbi->num_overlays) {
r = -EINVAL;
goto out;
}
if (changed) {
for (i = 0; i < num_ovls; ++i)
ofbi->rotation[i] = rotation[i];
omapfb_get_mem_region(ofbi->region);
r = omapfb_apply_changes(fbi, 0);
omapfb_put_mem_region(ofbi->region);
if (r)
goto out;
/* FIXME error handling? */
}
r = count;
out:
unlock_fb_info(fbi);
return r;
}
static ssize_t show_size(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
return snprintf(buf, PAGE_SIZE, "%lu\n", ofbi->region->size);
}
static ssize_t store_size(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omap_dss_device *display = fb2display(fbi);
struct omapfb2_mem_region *rg;
unsigned long size;
int r;
int i;
r = kstrtoul(buf, 0, &size);
if (r)
return r;
size = PAGE_ALIGN(size);
if (!lock_fb_info(fbi))
return -ENODEV;
if (display && display->driver->sync)
display->driver->sync(display);
rg = ofbi->region;
down_write_nested(&rg->lock, rg->id);
atomic_inc(&rg->lock_count);
if (atomic_read(&rg->map_count)) {
r = -EBUSY;
goto out;
}
for (i = 0; i < fbdev->num_fbs; i++) {
struct omapfb_info *ofbi2 = FB2OFB(fbdev->fbs[i]);
int j;
if (ofbi2->region != rg)
continue;
for (j = 0; j < ofbi2->num_overlays; j++) {
struct omap_overlay *ovl;
ovl = ofbi2->overlays[j];
if (ovl->is_enabled(ovl)) {
r = -EBUSY;
goto out;
}
}
}
if (size != ofbi->region->size) {
r = omapfb_realloc_fbmem(fbi, size, ofbi->region->type);
if (r) {
dev_err(dev, "realloc fbmem failed\n");
goto out;
}
}
r = count;
out:
atomic_dec(&rg->lock_count);
up_write(&rg->lock);
unlock_fb_info(fbi);
return r;
}
static ssize_t show_phys(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
return snprintf(buf, PAGE_SIZE, "%0x\n", ofbi->region->paddr);
}
static ssize_t show_virt(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct omapfb_info *ofbi = FB2OFB(fbi);
return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr);
}
static ssize_t show_upd_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
enum omapfb_update_mode mode;
int r;
r = omapfb_get_update_mode(fbi, &mode);
if (r)
return r;
return snprintf(buf, PAGE_SIZE, "%u\n", (unsigned)mode);
}
static ssize_t store_upd_mode(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fb_info *fbi = dev_get_drvdata(dev);
unsigned mode;
int r;
r = kstrtouint(buf, 0, &mode);
if (r)
return r;
r = omapfb_set_update_mode(fbi, mode);
if (r)
return r;
return count;
}
static struct device_attribute omapfb_attrs[] = {
__ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type,
store_rotate_type),
__ATTR(mirror, S_IRUGO | S_IWUSR, show_mirror, store_mirror),
__ATTR(size, S_IRUGO | S_IWUSR, show_size, store_size),
__ATTR(overlays, S_IRUGO | S_IWUSR, show_overlays, store_overlays),
__ATTR(overlays_rotate, S_IRUGO | S_IWUSR, show_overlays_rotate,
store_overlays_rotate),
__ATTR(phys_addr, S_IRUGO, show_phys, NULL),
__ATTR(virt_addr, S_IRUGO, show_virt, NULL),
__ATTR(update_mode, S_IRUGO | S_IWUSR, show_upd_mode, store_upd_mode),
};
int omapfb_create_sysfs(struct omapfb2_device *fbdev)
{
int i;
int r;
DBG("create sysfs for fbs\n");
for (i = 0; i < fbdev->num_fbs; i++) {
int t;
for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) {
r = device_create_file(fbdev->fbs[i]->dev,
&omapfb_attrs[t]);
if (r) {
dev_err(fbdev->dev, "failed to create sysfs "
"file\n");
return r;
}
}
}
return 0;
}
void omapfb_remove_sysfs(struct omapfb2_device *fbdev)
{
int i, t;
DBG("remove sysfs for fbs\n");
for (i = 0; i < fbdev->num_fbs; i++) {
for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++)
device_remove_file(fbdev->fbs[i]->dev,
&omapfb_attrs[t]);
}
}
| gpl-2.0 |
Alex-V2/One_M8_4.4.3_kernel | drivers/staging/nvec/nvec_ps2.c | 4859 | 4135 | /*
* nvec_ps2: mouse driver for a NVIDIA compliant embedded controller
*
* Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.launchpad.net>
*
* Authors: Pierre-Hugues Husson <phhusson@free.fr>
* Ilya Petrov <ilya.muromec@gmail.com>
* Marc Dietrich <marvin24@gmx.de>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/serio.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include "nvec.h"
#define START_STREAMING {'\x06', '\x03', '\x06'}
#define STOP_STREAMING {'\x06', '\x04'}
#define SEND_COMMAND {'\x06', '\x01', '\xf4', '\x01'}
#ifdef NVEC_PS2_DEBUG
#define NVEC_PHD(str, buf, len) \
print_hex_dump(KERN_DEBUG, str, DUMP_PREFIX_NONE, \
16, 1, buf, len, false)
#else
#define NVEC_PHD(str, buf, len)
#endif
static const unsigned char MOUSE_RESET[] = {'\x06', '\x01', '\xff', '\x03'};
struct nvec_ps2 {
struct serio *ser_dev;
struct notifier_block notifier;
struct nvec_chip *nvec;
};
static struct nvec_ps2 ps2_dev;
static int ps2_startstreaming(struct serio *ser_dev)
{
unsigned char buf[] = START_STREAMING;
return nvec_write_async(ps2_dev.nvec, buf, sizeof(buf));
}
static void ps2_stopstreaming(struct serio *ser_dev)
{
unsigned char buf[] = STOP_STREAMING;
nvec_write_async(ps2_dev.nvec, buf, sizeof(buf));
}
static int ps2_sendcommand(struct serio *ser_dev, unsigned char cmd)
{
unsigned char buf[] = SEND_COMMAND;
buf[2] = cmd & 0xff;
dev_dbg(&ser_dev->dev, "Sending ps2 cmd %02x\n", cmd);
return nvec_write_async(ps2_dev.nvec, buf, sizeof(buf));
}
static int nvec_ps2_notifier(struct notifier_block *nb,
unsigned long event_type, void *data)
{
int i;
unsigned char *msg = (unsigned char *)data;
switch (event_type) {
case NVEC_PS2_EVT:
for (i = 0; i < msg[1]; i++)
serio_interrupt(ps2_dev.ser_dev, msg[2 + i], 0);
NVEC_PHD("ps/2 mouse event: ", &msg[2], msg[1]);
return NOTIFY_STOP;
case NVEC_PS2:
if (msg[2] == 1) {
for (i = 0; i < (msg[1] - 2); i++)
serio_interrupt(ps2_dev.ser_dev, msg[i + 4], 0);
NVEC_PHD("ps/2 mouse reply: ", &msg[4], msg[1] - 2);
}
else if (msg[1] != 2) /* !ack */
NVEC_PHD("unhandled mouse event: ", msg, msg[1] + 2);
return NOTIFY_STOP;
}
return NOTIFY_DONE;
}
static int __devinit nvec_mouse_probe(struct platform_device *pdev)
{
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
struct serio *ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
ser_dev->id.type = SERIO_PS_PSTHRU;
ser_dev->write = ps2_sendcommand;
ser_dev->start = ps2_startstreaming;
ser_dev->stop = ps2_stopstreaming;
strlcpy(ser_dev->name, "nvec mouse", sizeof(ser_dev->name));
strlcpy(ser_dev->phys, "nvec", sizeof(ser_dev->phys));
ps2_dev.ser_dev = ser_dev;
ps2_dev.notifier.notifier_call = nvec_ps2_notifier;
ps2_dev.nvec = nvec;
nvec_register_notifier(nvec, &ps2_dev.notifier, 0);
serio_register_port(ser_dev);
/* mouse reset */
nvec_write_async(nvec, MOUSE_RESET, 4);
return 0;
}
static int nvec_mouse_suspend(struct platform_device *pdev, pm_message_t state)
{
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
/* disable mouse */
nvec_write_async(nvec, "\x06\xf4", 2);
/* send cancel autoreceive */
nvec_write_async(nvec, "\x06\x04", 2);
return 0;
}
static int nvec_mouse_resume(struct platform_device *pdev)
{
struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
ps2_startstreaming(ps2_dev.ser_dev);
/* enable mouse */
nvec_write_async(nvec, "\x06\xf5", 2);
return 0;
}
static struct platform_driver nvec_mouse_driver = {
.probe = nvec_mouse_probe,
.suspend = nvec_mouse_suspend,
.resume = nvec_mouse_resume,
.driver = {
.name = "nvec-mouse",
.owner = THIS_MODULE,
},
};
static int __init nvec_mouse_init(void)
{
return platform_driver_register(&nvec_mouse_driver);
}
module_init(nvec_mouse_init);
MODULE_DESCRIPTION("NVEC mouse driver");
MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ztemt/NX505J_5.1_kernel | drivers/net/ethernet/intel/ixgbevf/vf.c | 4859 | 12780 | /*******************************************************************************
Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "vf.h"
#include "ixgbevf.h"
/**
* ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
* Starts the hardware by filling the bus info structure and media type, clears
* all on chip counters, initializes receive address registers, multicast
* table, VLAN filter table, calls routine to set up link and flow control
* settings, and leaves transmit and receive units disabled and uninitialized
**/
static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
{
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
return 0;
}
/**
* ixgbevf_init_hw_vf - virtual function hardware initialization
* @hw: pointer to hardware structure
*
* Initialize the hardware by resetting the hardware and then starting
* the hardware
**/
static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
{
s32 status = hw->mac.ops.start_hw(hw);
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
return status;
}
/**
* ixgbevf_reset_hw_vf - Performs hardware reset
* @hw: pointer to hardware structure
*
* Resets the hardware by reseting the transmit and receive units, masks and
* clears all interrupts.
**/
static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 timeout = IXGBE_VF_INIT_TIMEOUT;
s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
u8 *addr = (u8 *)(&msgbuf[1]);
/* Call adapter stop to disable tx/rx and clear interrupts */
hw->mac.ops.stop_adapter(hw);
IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
IXGBE_WRITE_FLUSH(hw);
/* we cannot reset while the RSTI / RSTD bits are asserted */
while (!mbx->ops.check_for_rst(hw) && timeout) {
timeout--;
udelay(5);
}
if (!timeout)
return IXGBE_ERR_RESET_FAILED;
/* mailbox timeout can now become active */
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
msgbuf[0] = IXGBE_VF_RESET;
mbx->ops.write_posted(hw, msgbuf, 1);
msleep(10);
/* set our "perm_addr" based on info provided by PF */
/* also set up the mc_filter_type which is piggy backed
* on the mac address in word 3 */
ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
if (ret_val)
return ret_val;
if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
return IXGBE_ERR_INVALID_MAC_ADDR;
memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
return 0;
}
/**
* ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
* @hw: pointer to hardware structure
*
* Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
* disables transmit and receive units. The adapter_stopped flag is used by
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
{
u32 number_of_queues;
u32 reg_val;
u16 i;
/*
* Set the adapter_stopped flag so other driver functions stop touching
* the hardware
*/
hw->adapter_stopped = true;
/* Disable the receive unit by stopped each queue */
number_of_queues = hw->mac.max_rx_queues;
for (i = 0; i < number_of_queues; i++) {
reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
if (reg_val & IXGBE_RXDCTL_ENABLE) {
reg_val &= ~IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
}
}
IXGBE_WRITE_FLUSH(hw);
/* Clear interrupt mask to stop from interrupts being generated */
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
/* Clear any pending interrupts */
IXGBE_READ_REG(hw, IXGBE_VTEICR);
/* Disable the transmit unit. Each queue must be disabled. */
number_of_queues = hw->mac.max_tx_queues;
for (i = 0; i < number_of_queues; i++) {
reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
if (reg_val & IXGBE_TXDCTL_ENABLE) {
reg_val &= ~IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
}
}
return 0;
}
/**
* ixgbevf_mta_vector - Determines bit-vector in multicast table to set
* @hw: pointer to hardware structure
* @mc_addr: the multicast address
*
* Extracts the 12 bits, from a multicast address, to determine which
* bit-vector to set in the multicast table. The hardware uses 12 bits, from
* incoming rx multicast addresses, to determine the bit-vector to check in
* the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
**/
static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
{
u32 vector = 0;
switch (hw->mac.mc_filter_type) {
case 0: /* use bits [47:36] of the address */
vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
break;
case 1: /* use bits [46:35] of the address */
vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
break;
case 2: /* use bits [45:34] of the address */
vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
break;
case 3: /* use bits [43:32] of the address */
vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
break;
default: /* Invalid mc_filter_type */
break;
}
/* vector can only be 12-bits or boundary will be exceeded */
vector &= 0xFFF;
return vector;
}
/**
* ixgbevf_get_mac_addr_vf - Read device MAC address
* @hw: pointer to the HW structure
* @mac_addr: pointer to storage for retrieved MAC address
**/
static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
{
memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
return 0;
}
static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
memset(msgbuf, 0, sizeof(msgbuf));
/*
* If index is one then this is the start of a new list and needs
* indication to the PF so it can do it's own list management.
* If it is zero then that tells the PF to just clear all of
* this VF's macvlans and there is no new list.
*/
msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
if (addr)
memcpy(msg_addr, addr, 6);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
if (!ret_val)
if (msgbuf[0] ==
(IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
ret_val = -ENOMEM;
return ret_val;
}
/**
* ixgbevf_set_rar_vf - set device MAC address
* @hw: pointer to hardware structure
* @index: Receive address register to write
* @addr: Address to put into receive address register
* @vmdq: Unused in this implementation
**/
static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
u32 vmdq)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
memset(msgbuf, 0, sizeof(msgbuf));
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
memcpy(msg_addr, addr, 6);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
(msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
return ret_val;
}
static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 retmsg[IXGBE_VFMAILBOX_SIZE];
s32 retval = mbx->ops.write_posted(hw, msg, size);
if (!retval)
mbx->ops.read_posted(hw, retmsg, size);
}
/**
* ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
* @hw: pointer to the HW structure
* @netdev: pointer to net device structure
*
* Updates the Multicast Table Array.
**/
static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
struct net_device *netdev)
{
struct netdev_hw_addr *ha;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
u16 *vector_list = (u16 *)&msgbuf[1];
u32 cnt, i;
/* Each entry in the list uses 1 16 bit word. We have 30
* 16 bit words available in our HW msg buffer (minus 1 for the
* msg type). That's 30 hash values if we pack 'em right. If
* there are more than 30 MC addresses to add then punt the
* extras for now and then add code to handle more than 30 later.
* It would be unusual for a server to request that many multi-cast
* addresses except for in large enterprise network environments.
*/
cnt = netdev_mc_count(netdev);
if (cnt > 30)
cnt = 30;
msgbuf[0] = IXGBE_VF_SET_MULTICAST;
msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
i = 0;
netdev_for_each_mc_addr(ha, netdev) {
if (i == cnt)
break;
vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
return 0;
}
/**
* ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
* @vind: unused by VF drivers
* @vlan_on: if true then set bit, else clear bit
**/
static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
u32 msgbuf[2];
msgbuf[0] = IXGBE_VF_SET_VLAN;
msgbuf[1] = vlan;
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
return 0;
}
/**
* ixgbevf_setup_mac_link_vf - Setup MAC link settings
* @hw: pointer to hardware structure
* @speed: Unused in this implementation
* @autoneg: Unused in this implementation
* @autoneg_wait_to_complete: Unused in this implementation
*
* Do nothing and return success. VF drivers are not allowed to change
* global settings. Maintained for driver compatibility.
**/
static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete)
{
return 0;
}
/**
* ixgbevf_check_mac_link_vf - Get link/speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
* @link_up: true is link is up, false otherwise
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Reads the links register to determine if link is up and the current speed
**/
static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up,
bool autoneg_wait_to_complete)
{
u32 links_reg;
if (!(hw->mbx.ops.check_for_rst(hw))) {
*link_up = false;
*speed = 0;
return -1;
}
links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
if (links_reg & IXGBE_LINKS_UP)
*link_up = true;
else
*link_up = false;
if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
IXGBE_LINKS_SPEED_10G_82599)
*speed = IXGBE_LINK_SPEED_10GB_FULL;
else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
return 0;
}
static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.init_hw = ixgbevf_init_hw_vf,
.reset_hw = ixgbevf_reset_hw_vf,
.start_hw = ixgbevf_start_hw_vf,
.get_mac_addr = ixgbevf_get_mac_addr_vf,
.stop_adapter = ixgbevf_stop_hw_vf,
.setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
};
const struct ixgbevf_info ixgbevf_82599_vf_info = {
.mac = ixgbe_mac_82599_vf,
.mac_ops = &ixgbevf_mac_ops,
};
const struct ixgbevf_info ixgbevf_X540_vf_info = {
.mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_mac_ops,
};
| gpl-2.0 |
ivanich/senny_kernel-3.4 | drivers/char/tpm/tpm_tis.c | 4859 | 22637 | /*
* Copyright (C) 2005, 2006 IBM Corporation
*
* Authors:
* Leendert van Doorn <leendert@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
* Device driver for TCG/TCPA TPM (trusted platform module).
* Specifications at www.trustedcomputinggroup.org
*
* This device driver implements the TPM interface as defined in
* the TCG TPM Interface Spec version 1.2, revision 1.0.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/acpi.h>
#include <linux/freezer.h>
#include "tpm.h"
enum tis_access {
TPM_ACCESS_VALID = 0x80,
TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
TPM_ACCESS_REQUEST_PENDING = 0x04,
TPM_ACCESS_REQUEST_USE = 0x02,
};
enum tis_status {
TPM_STS_VALID = 0x80,
TPM_STS_COMMAND_READY = 0x40,
TPM_STS_GO = 0x20,
TPM_STS_DATA_AVAIL = 0x10,
TPM_STS_DATA_EXPECT = 0x08,
};
enum tis_int_flags {
TPM_GLOBAL_INT_ENABLE = 0x80000000,
TPM_INTF_BURST_COUNT_STATIC = 0x100,
TPM_INTF_CMD_READY_INT = 0x080,
TPM_INTF_INT_EDGE_FALLING = 0x040,
TPM_INTF_INT_EDGE_RISING = 0x020,
TPM_INTF_INT_LEVEL_LOW = 0x010,
TPM_INTF_INT_LEVEL_HIGH = 0x008,
TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
TPM_INTF_STS_VALID_INT = 0x002,
TPM_INTF_DATA_AVAIL_INT = 0x001,
};
enum tis_defaults {
TIS_MEM_BASE = 0xFED40000,
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
};
#define TPM_ACCESS(l) (0x0000 | ((l) << 12))
#define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
#define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
#define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
#define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
#define TPM_STS(l) (0x0018 | ((l) << 12))
#define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
#define TPM_RID(l) (0x0F04 | ((l) << 12))
static LIST_HEAD(tis_chips);
static DEFINE_MUTEX(tis_lock);
#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
static int is_itpm(struct pnp_dev *dev)
{
struct acpi_device *acpi = pnp_acpi_device(dev);
struct acpi_hardware_id *id;
list_for_each_entry(id, &acpi->pnp.ids, list) {
if (!strcmp("INTC0102", id->id))
return 1;
}
return 0;
}
#else
static inline int is_itpm(struct pnp_dev *dev)
{
return 0;
}
#endif
static int check_locality(struct tpm_chip *chip, int l)
{
if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
return chip->vendor.locality = l;
return -1;
}
static void release_locality(struct tpm_chip *chip, int l, int force)
{
if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
(TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
(TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
chip->vendor.iobase + TPM_ACCESS(l));
}
static int request_locality(struct tpm_chip *chip, int l)
{
unsigned long stop, timeout;
long rc;
if (check_locality(chip, l) >= 0)
return l;
iowrite8(TPM_ACCESS_REQUEST_USE,
chip->vendor.iobase + TPM_ACCESS(l));
stop = jiffies + chip->vendor.timeout_a;
if (chip->vendor.irq) {
again:
timeout = stop - jiffies;
if ((long)timeout <= 0)
return -1;
rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
(check_locality
(chip, l) >= 0),
timeout);
if (rc > 0)
return l;
if (rc == -ERESTARTSYS && freezing(current)) {
clear_thread_flag(TIF_SIGPENDING);
goto again;
}
} else {
/* wait for burstcount */
do {
if (check_locality(chip, l) >= 0)
return l;
msleep(TPM_TIMEOUT);
}
while (time_before(jiffies, stop));
}
return -1;
}
static u8 tpm_tis_status(struct tpm_chip *chip)
{
return ioread8(chip->vendor.iobase +
TPM_STS(chip->vendor.locality));
}
static void tpm_tis_ready(struct tpm_chip *chip)
{
/* this causes the current command to be aborted */
iowrite8(TPM_STS_COMMAND_READY,
chip->vendor.iobase + TPM_STS(chip->vendor.locality));
}
static int get_burstcount(struct tpm_chip *chip)
{
unsigned long stop;
int burstcnt;
/* wait for burstcount */
/* which timeout value, spec has 2 answers (c & d) */
stop = jiffies + chip->vendor.timeout_d;
do {
burstcnt = ioread8(chip->vendor.iobase +
TPM_STS(chip->vendor.locality) + 1);
burstcnt += ioread8(chip->vendor.iobase +
TPM_STS(chip->vendor.locality) +
2) << 8;
if (burstcnt)
return burstcnt;
msleep(TPM_TIMEOUT);
} while (time_before(jiffies, stop));
return -EBUSY;
}
static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
{
int size = 0, burstcnt;
while (size < count &&
wait_for_tpm_stat(chip,
TPM_STS_DATA_AVAIL | TPM_STS_VALID,
chip->vendor.timeout_c,
&chip->vendor.read_queue)
== 0) {
burstcnt = get_burstcount(chip);
for (; burstcnt > 0 && size < count; burstcnt--)
buf[size++] = ioread8(chip->vendor.iobase +
TPM_DATA_FIFO(chip->vendor.
locality));
}
return size;
}
static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
int size = 0;
int expected, status;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
goto out;
}
/* read first 10 bytes, including tag, paramsize, and result */
if ((size =
recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
dev_err(chip->dev, "Unable to read header\n");
goto out;
}
expected = be32_to_cpu(*(__be32 *) (buf + 2));
if (expected > count) {
size = -EIO;
goto out;
}
if ((size +=
recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE)) < expected) {
dev_err(chip->dev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
&chip->vendor.int_queue);
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(chip->dev, "Error left over data\n");
size = -EIO;
goto out;
}
out:
tpm_tis_ready(chip);
release_locality(chip, chip->vendor.locality, 0);
return size;
}
static bool itpm;
module_param(itpm, bool, 0444);
MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
/*
* If interrupts are used (signaled by an irq set in the vendor structure)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
{
int rc, status, burstcnt;
size_t count = 0;
if (request_locality(chip, 0) < 0)
return -EBUSY;
status = tpm_tis_status(chip);
if ((status & TPM_STS_COMMAND_READY) == 0) {
tpm_tis_ready(chip);
if (wait_for_tpm_stat
(chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
&chip->vendor.int_queue) < 0) {
rc = -ETIME;
goto out_err;
}
}
while (count < len - 1) {
burstcnt = get_burstcount(chip);
for (; burstcnt > 0 && count < len - 1; burstcnt--) {
iowrite8(buf[count], chip->vendor.iobase +
TPM_DATA_FIFO(chip->vendor.locality));
count++;
}
wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
&chip->vendor.int_queue);
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
rc = -EIO;
goto out_err;
}
}
/* write last byte */
iowrite8(buf[count],
chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
&chip->vendor.int_queue);
status = tpm_tis_status(chip);
if ((status & TPM_STS_DATA_EXPECT) != 0) {
rc = -EIO;
goto out_err;
}
return 0;
out_err:
tpm_tis_ready(chip);
release_locality(chip, chip->vendor.locality, 0);
return rc;
}
/*
* If interrupts are used (signaled by an irq set in the vendor structure)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
int rc;
u32 ordinal;
rc = tpm_tis_send_data(chip, buf, len);
if (rc < 0)
return rc;
/* go and do it */
iowrite8(TPM_STS_GO,
chip->vendor.iobase + TPM_STS(chip->vendor.locality));
if (chip->vendor.irq) {
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
if (wait_for_tpm_stat
(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
tpm_calc_ordinal_duration(chip, ordinal),
&chip->vendor.read_queue) < 0) {
rc = -ETIME;
goto out_err;
}
}
return len;
out_err:
tpm_tis_ready(chip);
release_locality(chip, chip->vendor.locality, 0);
return rc;
}
/*
* Early probing for iTPM with STS_DATA_EXPECT flaw.
* Try sending command without itpm flag set and if that
* fails, repeat with itpm flag set.
*/
static int probe_itpm(struct tpm_chip *chip)
{
int rc = 0;
u8 cmd_getticks[] = {
0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
0x00, 0x00, 0x00, 0xf1
};
size_t len = sizeof(cmd_getticks);
bool rem_itpm = itpm;
u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0));
/* probe only iTPMS */
if (vendor != TPM_VID_INTEL)
return 0;
itpm = 0;
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0)
goto out;
tpm_tis_ready(chip);
release_locality(chip, chip->vendor.locality, 0);
itpm = 1;
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0) {
dev_info(chip->dev, "Detected an iTPM.\n");
rc = 1;
} else
rc = -EFAULT;
out:
itpm = rem_itpm;
tpm_tis_ready(chip);
release_locality(chip, chip->vendor.locality, 0);
return rc;
}
static const struct file_operations tis_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.open = tpm_open,
.read = tpm_read,
.write = tpm_write,
.release = tpm_release,
};
static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
NULL);
static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
static struct attribute *tis_attrs[] = {
&dev_attr_pubek.attr,
&dev_attr_pcrs.attr,
&dev_attr_enabled.attr,
&dev_attr_active.attr,
&dev_attr_owned.attr,
&dev_attr_temp_deactivated.attr,
&dev_attr_caps.attr,
&dev_attr_cancel.attr,
&dev_attr_durations.attr,
&dev_attr_timeouts.attr, NULL,
};
static struct attribute_group tis_attr_grp = {
.attrs = tis_attrs
};
static struct tpm_vendor_specific tpm_tis = {
.status = tpm_tis_status,
.recv = tpm_tis_recv,
.send = tpm_tis_send,
.cancel = tpm_tis_ready,
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = TPM_STS_COMMAND_READY,
.attr_group = &tis_attr_grp,
.miscdev = {
.fops = &tis_ops,},
};
static irqreturn_t tis_int_probe(int irq, void *dev_id)
{
struct tpm_chip *chip = dev_id;
u32 interrupt;
interrupt = ioread32(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
if (interrupt == 0)
return IRQ_NONE;
chip->vendor.probed_irq = irq;
/* Clear interrupts handled with TPM_EOI */
iowrite32(interrupt,
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
return IRQ_HANDLED;
}
static irqreturn_t tis_int_handler(int dummy, void *dev_id)
{
struct tpm_chip *chip = dev_id;
u32 interrupt;
int i;
interrupt = ioread32(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
if (interrupt == 0)
return IRQ_NONE;
if (interrupt & TPM_INTF_DATA_AVAIL_INT)
wake_up_interruptible(&chip->vendor.read_queue);
if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
for (i = 0; i < 5; i++)
if (check_locality(chip, i) >= 0)
break;
if (interrupt &
(TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
TPM_INTF_CMD_READY_INT))
wake_up_interruptible(&chip->vendor.int_queue);
/* Clear interrupts handled with TPM_EOI */
iowrite32(interrupt,
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
return IRQ_HANDLED;
}
static bool interrupts = 1;
module_param(interrupts, bool, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
static int tpm_tis_init(struct device *dev, resource_size_t start,
resource_size_t len, unsigned int irq)
{
u32 vendor, intfcaps, intmask;
int rc, i, irq_s, irq_e, probe;
struct tpm_chip *chip;
if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
return -ENODEV;
chip->vendor.iobase = ioremap(start, len);
if (!chip->vendor.iobase) {
rc = -EIO;
goto out_err;
}
/* Default timeouts */
chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
if (request_locality(chip, 0) != 0) {
rc = -ENODEV;
goto out_err;
}
vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
dev_info(dev,
"1.2 TPM (device-id 0x%X, rev-id %d)\n",
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
if (!itpm) {
probe = probe_itpm(chip);
if (probe < 0) {
rc = -ENODEV;
goto out_err;
}
itpm = (probe == 0) ? 0 : 1;
}
if (itpm)
dev_info(dev, "Intel iTPM workaround enabled\n");
/* Figure out the capabilities */
intfcaps =
ioread32(chip->vendor.iobase +
TPM_INTF_CAPS(chip->vendor.locality));
dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
intfcaps);
if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
dev_dbg(dev, "\tBurst Count Static\n");
if (intfcaps & TPM_INTF_CMD_READY_INT)
dev_dbg(dev, "\tCommand Ready Int Support\n");
if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
dev_dbg(dev, "\tInterrupt Edge Falling\n");
if (intfcaps & TPM_INTF_INT_EDGE_RISING)
dev_dbg(dev, "\tInterrupt Edge Rising\n");
if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
dev_dbg(dev, "\tInterrupt Level Low\n");
if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
dev_dbg(dev, "\tInterrupt Level High\n");
if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
dev_dbg(dev, "\tLocality Change Int Support\n");
if (intfcaps & TPM_INTF_STS_VALID_INT)
dev_dbg(dev, "\tSts Valid Int Support\n");
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
dev_dbg(dev, "\tData Avail Int Support\n");
/* get the timeouts before testing for irqs */
if (tpm_get_timeouts(chip)) {
dev_err(dev, "Could not get TPM timeouts and durations\n");
rc = -ENODEV;
goto out_err;
}
if (tpm_do_selftest(chip)) {
dev_err(dev, "TPM self test failed\n");
rc = -ENODEV;
goto out_err;
}
/* INTERRUPT Setup */
init_waitqueue_head(&chip->vendor.read_queue);
init_waitqueue_head(&chip->vendor.int_queue);
intmask =
ioread32(chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
intmask |= TPM_INTF_CMD_READY_INT
| TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
| TPM_INTF_STS_VALID_INT;
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
if (interrupts)
chip->vendor.irq = irq;
if (interrupts && !chip->vendor.irq) {
irq_s =
ioread8(chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
if (irq_s) {
irq_e = irq_s;
} else {
irq_s = 3;
irq_e = 15;
}
for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
iowrite8(i, chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
if (request_irq
(i, tis_int_probe, IRQF_SHARED,
chip->vendor.miscdev.name, chip) != 0) {
dev_info(chip->dev,
"Unable to request irq: %d for probe\n",
i);
continue;
}
/* Clear all existing */
iowrite32(ioread32
(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality)),
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
/* Turn on */
iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
chip->vendor.probed_irq = 0;
/* Generate Interrupts */
tpm_gen_interrupt(chip);
chip->vendor.irq = chip->vendor.probed_irq;
/* free_irq will call into tis_int_probe;
clear all irqs we haven't seen while doing
tpm_gen_interrupt */
iowrite32(ioread32
(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality)),
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
/* Turn off */
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
free_irq(i, chip);
}
}
if (chip->vendor.irq) {
iowrite8(chip->vendor.irq,
chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
if (request_irq
(chip->vendor.irq, tis_int_handler, IRQF_SHARED,
chip->vendor.miscdev.name, chip) != 0) {
dev_info(chip->dev,
"Unable to request irq: %d for use\n",
chip->vendor.irq);
chip->vendor.irq = 0;
} else {
/* Clear all existing */
iowrite32(ioread32
(chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality)),
chip->vendor.iobase +
TPM_INT_STATUS(chip->vendor.locality));
/* Turn on */
iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
}
}
INIT_LIST_HEAD(&chip->vendor.list);
mutex_lock(&tis_lock);
list_add(&chip->vendor.list, &tis_chips);
mutex_unlock(&tis_lock);
return 0;
out_err:
if (chip->vendor.iobase)
iounmap(chip->vendor.iobase);
tpm_remove_hardware(chip->dev);
return rc;
}
static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
{
u32 intmask;
/* reenable interrupts that device may have lost or
BIOS/firmware may have disabled */
iowrite8(chip->vendor.irq, chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
intmask =
ioread32(chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
intmask |= TPM_INTF_CMD_READY_INT
| TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
| TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
iowrite32(intmask,
chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
}
#ifdef CONFIG_PNP
static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
const struct pnp_device_id *pnp_id)
{
resource_size_t start, len;
unsigned int irq = 0;
start = pnp_mem_start(pnp_dev, 0);
len = pnp_mem_len(pnp_dev, 0);
if (pnp_irq_valid(pnp_dev, 0))
irq = pnp_irq(pnp_dev, 0);
else
interrupts = 0;
if (is_itpm(pnp_dev))
itpm = 1;
return tpm_tis_init(&pnp_dev->dev, start, len, irq);
}
static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
{
return tpm_pm_suspend(&dev->dev, msg);
}
static int tpm_tis_pnp_resume(struct pnp_dev *dev)
{
struct tpm_chip *chip = pnp_get_drvdata(dev);
int ret;
if (chip->vendor.irq)
tpm_tis_reenable_interrupts(chip);
ret = tpm_pm_resume(&dev->dev);
if (!ret)
tpm_do_selftest(chip);
return ret;
}
static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
{"PNP0C31", 0}, /* TPM */
{"ATM1200", 0}, /* Atmel */
{"IFX0102", 0}, /* Infineon */
{"BCM0101", 0}, /* Broadcom */
{"BCM0102", 0}, /* Broadcom */
{"NSC1200", 0}, /* National */
{"ICO0102", 0}, /* Intel */
/* Add new here */
{"", 0}, /* User Specified */
{"", 0} /* Terminator */
};
MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
{
struct tpm_chip *chip = pnp_get_drvdata(dev);
tpm_dev_vendor_release(chip);
kfree(chip);
}
static struct pnp_driver tis_pnp_driver = {
.name = "tpm_tis",
.id_table = tpm_pnp_tbl,
.probe = tpm_tis_pnp_init,
.suspend = tpm_tis_pnp_suspend,
.resume = tpm_tis_pnp_resume,
.remove = tpm_tis_pnp_remove,
};
#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
#endif
static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
{
return tpm_pm_suspend(&dev->dev, msg);
}
static int tpm_tis_resume(struct platform_device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
if (chip->vendor.irq)
tpm_tis_reenable_interrupts(chip);
return tpm_pm_resume(&dev->dev);
}
static struct platform_driver tis_drv = {
.driver = {
.name = "tpm_tis",
.owner = THIS_MODULE,
},
.suspend = tpm_tis_suspend,
.resume = tpm_tis_resume,
};
static struct platform_device *pdev;
static bool force;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
static int __init init_tis(void)
{
int rc;
#ifdef CONFIG_PNP
if (!force)
return pnp_register_driver(&tis_pnp_driver);
#endif
rc = platform_driver_register(&tis_drv);
if (rc < 0)
return rc;
if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
return PTR_ERR(pdev);
if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
platform_device_unregister(pdev);
platform_driver_unregister(&tis_drv);
}
return rc;
}
static void __exit cleanup_tis(void)
{
struct tpm_vendor_specific *i, *j;
struct tpm_chip *chip;
mutex_lock(&tis_lock);
list_for_each_entry_safe(i, j, &tis_chips, list) {
chip = to_tpm_chip(i);
tpm_remove_hardware(chip->dev);
iowrite32(~TPM_GLOBAL_INT_ENABLE &
ioread32(chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.
locality)),
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
release_locality(chip, chip->vendor.locality, 1);
if (chip->vendor.irq)
free_irq(chip->vendor.irq, chip);
iounmap(i->iobase);
list_del(&i->list);
}
mutex_unlock(&tis_lock);
#ifdef CONFIG_PNP
if (!force) {
pnp_unregister_driver(&tis_pnp_driver);
return;
}
#endif
platform_device_unregister(pdev);
platform_driver_unregister(&tis_drv);
}
module_init(init_tis);
module_exit(cleanup_tis);
MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
MODULE_DESCRIPTION("TPM Driver");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
| gpl-2.0 |
kumajaya/android_kernel_samsung_espresso10 | drivers/misc/ioc4.c | 7931 | 14731 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
/* This file contains the master driver module for use by SGI IOC4 subdrivers.
*
* It allocates any resources shared between multiple subdevices, and
* provides accessor functions (where needed) and the like for those
* resources. It also provides a mechanism for the subdevice modules
* to support loading and unloading.
*
* Non-shared resources (e.g. external interrupt A_INT_OUT register page
* alias, serial port and UART registers) are handled by the subdevice
* modules themselves.
*
* This is all necessary because IOC4 is not implemented as a multi-function
* PCI device, but an amalgamation of disparate registers for several
* types of device (ATA, serial, external interrupts). The normal
* resource management in the kernel doesn't have quite the right interfaces
* to handle this situation (e.g. multiple modules can't claim the same
* PCI ID), thus this IOC4 master module.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ioc4.h>
#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/time.h>
#include <asm/io.h>
/***************
* Definitions *
***************/
/* Tweakable values */
/* PCI bus speed detection/calibration */
#define IOC4_CALIBRATE_COUNT 63 /* Calibration cycle period */
#define IOC4_CALIBRATE_CYCLES 256 /* Average over this many cycles */
#define IOC4_CALIBRATE_DISCARD 2 /* Discard first few cycles */
#define IOC4_CALIBRATE_LOW_MHZ 25 /* Lower bound on bus speed sanity */
#define IOC4_CALIBRATE_HIGH_MHZ 75 /* Upper bound on bus speed sanity */
#define IOC4_CALIBRATE_DEFAULT_MHZ 66 /* Assumed if sanity check fails */
/************************
* Submodule management *
************************/
static DEFINE_MUTEX(ioc4_mutex);
static LIST_HEAD(ioc4_devices);
static LIST_HEAD(ioc4_submodules);
/* Register an IOC4 submodule */
int
ioc4_register_submodule(struct ioc4_submodule *is)
{
struct ioc4_driver_data *idd;
mutex_lock(&ioc4_mutex);
list_add(&is->is_list, &ioc4_submodules);
/* Initialize submodule for each IOC4 */
if (!is->is_probe)
goto out;
list_for_each_entry(idd, &ioc4_devices, idd_list) {
if (is->is_probe(idd)) {
printk(KERN_WARNING
"%s: IOC4 submodule %s probe failed "
"for pci_dev %s",
__func__, module_name(is->is_owner),
pci_name(idd->idd_pdev));
}
}
out:
mutex_unlock(&ioc4_mutex);
return 0;
}
/* Unregister an IOC4 submodule */
void
ioc4_unregister_submodule(struct ioc4_submodule *is)
{
struct ioc4_driver_data *idd;
mutex_lock(&ioc4_mutex);
list_del(&is->is_list);
/* Remove submodule for each IOC4 */
if (!is->is_remove)
goto out;
list_for_each_entry(idd, &ioc4_devices, idd_list) {
if (is->is_remove(idd)) {
printk(KERN_WARNING
"%s: IOC4 submodule %s remove failed "
"for pci_dev %s.\n",
__func__, module_name(is->is_owner),
pci_name(idd->idd_pdev));
}
}
out:
mutex_unlock(&ioc4_mutex);
}
/*********************
* Device management *
*********************/
#define IOC4_CALIBRATE_LOW_LIMIT \
(1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_LOW_MHZ)
#define IOC4_CALIBRATE_HIGH_LIMIT \
(1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_HIGH_MHZ)
#define IOC4_CALIBRATE_DEFAULT \
(1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_DEFAULT_MHZ)
#define IOC4_CALIBRATE_END \
(IOC4_CALIBRATE_CYCLES + IOC4_CALIBRATE_DISCARD)
#define IOC4_INT_OUT_MODE_TOGGLE 0x7 /* Toggle INT_OUT every COUNT+1 ticks */
/* Determines external interrupt output clock period of the PCI bus an
* IOC4 is attached to. This value can be used to determine the PCI
* bus speed.
*
* IOC4 has a design feature that various internal timers are derived from
* the PCI bus clock. This causes IOC4 device drivers to need to take the
* bus speed into account when setting various register values (e.g. INT_OUT
* register COUNT field, UART divisors, etc). Since this information is
* needed by several subdrivers, it is determined by the main IOC4 driver,
* even though the following code utilizes external interrupt registers
* to perform the speed calculation.
*/
static void __devinit
ioc4_clock_calibrate(struct ioc4_driver_data *idd)
{
union ioc4_int_out int_out;
union ioc4_gpcr gpcr;
unsigned int state, last_state = 1;
struct timespec start_ts, end_ts;
uint64_t start, end, period;
unsigned int count = 0;
/* Enable output */
gpcr.raw = 0;
gpcr.fields.dir = IOC4_GPCR_DIR_0;
gpcr.fields.int_out_en = 1;
writel(gpcr.raw, &idd->idd_misc_regs->gpcr_s.raw);
/* Reset to power-on state */
writel(0, &idd->idd_misc_regs->int_out.raw);
mmiowb();
/* Set up square wave */
int_out.raw = 0;
int_out.fields.count = IOC4_CALIBRATE_COUNT;
int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE;
int_out.fields.diag = 0;
writel(int_out.raw, &idd->idd_misc_regs->int_out.raw);
mmiowb();
/* Check square wave period averaged over some number of cycles */
do {
int_out.raw = readl(&idd->idd_misc_regs->int_out.raw);
state = int_out.fields.int_out;
if (!last_state && state) {
count++;
if (count == IOC4_CALIBRATE_END) {
ktime_get_ts(&end_ts);
break;
} else if (count == IOC4_CALIBRATE_DISCARD)
ktime_get_ts(&start_ts);
}
last_state = state;
} while (1);
/* Calculation rearranged to preserve intermediate precision.
* Logically:
* 1. "end - start" gives us the measurement period over all
* the square wave cycles.
* 2. Divide by number of square wave cycles to get the period
* of a square wave cycle.
* 3. Divide by 2*(int_out.fields.count+1), which is the formula
* by which the IOC4 generates the square wave, to get the
* period of an IOC4 INT_OUT count.
*/
end = end_ts.tv_sec * NSEC_PER_SEC + end_ts.tv_nsec;
start = start_ts.tv_sec * NSEC_PER_SEC + start_ts.tv_nsec;
period = (end - start) /
(IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1));
/* Bounds check the result. */
if (period > IOC4_CALIBRATE_LOW_LIMIT ||
period < IOC4_CALIBRATE_HIGH_LIMIT) {
printk(KERN_INFO
"IOC4 %s: Clock calibration failed. Assuming"
"PCI clock is %d ns.\n",
pci_name(idd->idd_pdev),
IOC4_CALIBRATE_DEFAULT / IOC4_EXTINT_COUNT_DIVISOR);
period = IOC4_CALIBRATE_DEFAULT;
} else {
u64 ns = period;
do_div(ns, IOC4_EXTINT_COUNT_DIVISOR);
printk(KERN_DEBUG
"IOC4 %s: PCI clock is %llu ns.\n",
pci_name(idd->idd_pdev), (unsigned long long)ns);
}
/* Remember results. We store the extint clock period rather
* than the PCI clock period so that greater precision is
* retained. Divide by IOC4_EXTINT_COUNT_DIVISOR to get
* PCI clock period.
*/
idd->count_period = period;
}
/* There are three variants of IOC4 cards: IO9, IO10, and PCI-RT.
* Each brings out different combinations of IOC4 signals, thus.
* the IOC4 subdrivers need to know to which we're attached.
*
* We look for the presence of a SCSI (IO9) or SATA (IO10) controller
* on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
* If neither is present, it's a PCI-RT.
*/
static unsigned int __devinit
ioc4_variant(struct ioc4_driver_data *idd)
{
struct pci_dev *pdev = NULL;
int found = 0;
/* IO9: Look for a QLogic ISP 12160 at the same bus and slot 3. */
do {
pdev = pci_get_device(PCI_VENDOR_ID_QLOGIC,
PCI_DEVICE_ID_QLOGIC_ISP12160, pdev);
if (pdev &&
idd->idd_pdev->bus->number == pdev->bus->number &&
3 == PCI_SLOT(pdev->devfn))
found = 1;
} while (pdev && !found);
if (NULL != pdev) {
pci_dev_put(pdev);
return IOC4_VARIANT_IO9;
}
/* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */
pdev = NULL;
do {
pdev = pci_get_device(PCI_VENDOR_ID_VITESSE,
PCI_DEVICE_ID_VITESSE_VSC7174, pdev);
if (pdev &&
idd->idd_pdev->bus->number == pdev->bus->number &&
3 == PCI_SLOT(pdev->devfn))
found = 1;
} while (pdev && !found);
if (NULL != pdev) {
pci_dev_put(pdev);
return IOC4_VARIANT_IO10;
}
/* PCI-RT: No SCSI/SATA controller will be present */
return IOC4_VARIANT_PCI_RT;
}
static void
ioc4_load_modules(struct work_struct *work)
{
request_module("sgiioc4");
}
static DECLARE_WORK(ioc4_load_modules_work, ioc4_load_modules);
/* Adds a new instance of an IOC4 card */
static int __devinit
ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
{
struct ioc4_driver_data *idd;
struct ioc4_submodule *is;
uint32_t pcmd;
int ret;
/* Enable IOC4 and take ownership of it */
if ((ret = pci_enable_device(pdev))) {
printk(KERN_WARNING
"%s: Failed to enable IOC4 device for pci_dev %s.\n",
__func__, pci_name(pdev));
goto out;
}
pci_set_master(pdev);
/* Set up per-IOC4 data */
idd = kmalloc(sizeof(struct ioc4_driver_data), GFP_KERNEL);
if (!idd) {
printk(KERN_WARNING
"%s: Failed to allocate IOC4 data for pci_dev %s.\n",
__func__, pci_name(pdev));
ret = -ENODEV;
goto out_idd;
}
idd->idd_pdev = pdev;
idd->idd_pci_id = pci_id;
/* Map IOC4 misc registers. These are shared between subdevices
* so the main IOC4 module manages them.
*/
idd->idd_bar0 = pci_resource_start(idd->idd_pdev, 0);
if (!idd->idd_bar0) {
printk(KERN_WARNING
"%s: Unable to find IOC4 misc resource "
"for pci_dev %s.\n",
__func__, pci_name(idd->idd_pdev));
ret = -ENODEV;
goto out_pci;
}
if (!request_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs),
"ioc4_misc")) {
printk(KERN_WARNING
"%s: Unable to request IOC4 misc region "
"for pci_dev %s.\n",
__func__, pci_name(idd->idd_pdev));
ret = -ENODEV;
goto out_pci;
}
idd->idd_misc_regs = ioremap(idd->idd_bar0,
sizeof(struct ioc4_misc_regs));
if (!idd->idd_misc_regs) {
printk(KERN_WARNING
"%s: Unable to remap IOC4 misc region "
"for pci_dev %s.\n",
__func__, pci_name(idd->idd_pdev));
ret = -ENODEV;
goto out_misc_region;
}
/* Failsafe portion of per-IOC4 initialization */
/* Detect card variant */
idd->idd_variant = ioc4_variant(idd);
printk(KERN_INFO "IOC4 %s: %s card detected.\n", pci_name(pdev),
idd->idd_variant == IOC4_VARIANT_IO9 ? "IO9" :
idd->idd_variant == IOC4_VARIANT_PCI_RT ? "PCI-RT" :
idd->idd_variant == IOC4_VARIANT_IO10 ? "IO10" : "unknown");
/* Initialize IOC4 */
pci_read_config_dword(idd->idd_pdev, PCI_COMMAND, &pcmd);
pci_write_config_dword(idd->idd_pdev, PCI_COMMAND,
pcmd | PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
/* Determine PCI clock */
ioc4_clock_calibrate(idd);
/* Disable/clear all interrupts. Need to do this here lest
* one submodule request the shared IOC4 IRQ, but interrupt
* is generated by a different subdevice.
*/
/* Disable */
writel(~0, &idd->idd_misc_regs->other_iec.raw);
writel(~0, &idd->idd_misc_regs->sio_iec);
/* Clear (i.e. acknowledge) */
writel(~0, &idd->idd_misc_regs->other_ir.raw);
writel(~0, &idd->idd_misc_regs->sio_ir);
/* Track PCI-device specific data */
idd->idd_serial_data = NULL;
pci_set_drvdata(idd->idd_pdev, idd);
mutex_lock(&ioc4_mutex);
list_add_tail(&idd->idd_list, &ioc4_devices);
/* Add this IOC4 to all submodules */
list_for_each_entry(is, &ioc4_submodules, is_list) {
if (is->is_probe && is->is_probe(idd)) {
printk(KERN_WARNING
"%s: IOC4 submodule 0x%s probe failed "
"for pci_dev %s.\n",
__func__, module_name(is->is_owner),
pci_name(idd->idd_pdev));
}
}
mutex_unlock(&ioc4_mutex);
/* Request sgiioc4 IDE driver on boards that bring that functionality
* off of IOC4. The root filesystem may be hosted on a drive connected
* to IOC4, so we need to make sure the sgiioc4 driver is loaded as it
* won't be picked up by modprobes due to the ioc4 module owning the
* PCI device.
*/
if (idd->idd_variant != IOC4_VARIANT_PCI_RT) {
/* Request the module from a work procedure as the modprobe
* goes out to a userland helper and that will hang if done
* directly from ioc4_probe().
*/
printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n");
schedule_work(&ioc4_load_modules_work);
}
return 0;
out_misc_region:
release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
out_pci:
kfree(idd);
out_idd:
pci_disable_device(pdev);
out:
return ret;
}
/* Removes a particular instance of an IOC4 card. */
static void __devexit
ioc4_remove(struct pci_dev *pdev)
{
struct ioc4_submodule *is;
struct ioc4_driver_data *idd;
idd = pci_get_drvdata(pdev);
/* Remove this IOC4 from all submodules */
mutex_lock(&ioc4_mutex);
list_for_each_entry(is, &ioc4_submodules, is_list) {
if (is->is_remove && is->is_remove(idd)) {
printk(KERN_WARNING
"%s: IOC4 submodule 0x%s remove failed "
"for pci_dev %s.\n",
__func__, module_name(is->is_owner),
pci_name(idd->idd_pdev));
}
}
mutex_unlock(&ioc4_mutex);
/* Release resources */
iounmap(idd->idd_misc_regs);
if (!idd->idd_bar0) {
printk(KERN_WARNING
"%s: Unable to get IOC4 misc mapping for pci_dev %s. "
"Device removal may be incomplete.\n",
__func__, pci_name(idd->idd_pdev));
}
release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
/* Disable IOC4 and relinquish */
pci_disable_device(pdev);
/* Remove and free driver data */
mutex_lock(&ioc4_mutex);
list_del(&idd->idd_list);
mutex_unlock(&ioc4_mutex);
kfree(idd);
}
static struct pci_device_id ioc4_id_table[] = {
{PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC4, PCI_ANY_ID,
PCI_ANY_ID, 0x0b4000, 0xFFFFFF},
{0}
};
static struct pci_driver ioc4_driver = {
.name = "IOC4",
.id_table = ioc4_id_table,
.probe = ioc4_probe,
.remove = __devexit_p(ioc4_remove),
};
MODULE_DEVICE_TABLE(pci, ioc4_id_table);
/*********************
* Module management *
*********************/
/* Module load */
static int __init
ioc4_init(void)
{
return pci_register_driver(&ioc4_driver);
}
/* Module unload */
static void __exit
ioc4_exit(void)
{
/* Ensure ioc4_load_modules() has completed before exiting */
flush_work_sync(&ioc4_load_modules_work);
pci_unregister_driver(&ioc4_driver);
}
module_init(ioc4_init);
module_exit(ioc4_exit);
MODULE_AUTHOR("Brent Casavant - Silicon Graphics, Inc. <bcasavan@sgi.com>");
MODULE_DESCRIPTION("PCI driver master module for SGI IOC4 Base-IO Card");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ioc4_register_submodule);
EXPORT_SYMBOL(ioc4_unregister_submodule);
| gpl-2.0 |
Mirenk/android_kernel_htc_msm8974 | sound/pci/ac97/ac97_patch.c | 7931 | 129899 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Universal interface for Audio Codec '97
*
* For more details look to AC '97 component specification revision 2.2
* by Intel Corporation (http://developer.intel.com) and to datasheets
* for specific codecs.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include "ac97_local.h"
#include "ac97_patch.h"
/*
* Forward declarations
*/
static struct snd_kcontrol *snd_ac97_find_mixer_ctl(struct snd_ac97 *ac97,
const char *name);
static int snd_ac97_add_vmaster(struct snd_ac97 *ac97, char *name,
const unsigned int *tlv, const char **slaves);
/*
* Chip specific initialization
*/
static int patch_build_controls(struct snd_ac97 * ac97, const struct snd_kcontrol_new *controls, int count)
{
int idx, err;
for (idx = 0; idx < count; idx++)
if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&controls[idx], ac97))) < 0)
return err;
return 0;
}
/* replace with a new TLV */
static void reset_tlv(struct snd_ac97 *ac97, const char *name,
const unsigned int *tlv)
{
struct snd_ctl_elem_id sid;
struct snd_kcontrol *kctl;
memset(&sid, 0, sizeof(sid));
strcpy(sid.name, name);
sid.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
kctl = snd_ctl_find_id(ac97->bus->card, &sid);
if (kctl && kctl->tlv.p)
kctl->tlv.p = tlv;
}
/* set to the page, update bits and restore the page */
static int ac97_update_bits_page(struct snd_ac97 *ac97, unsigned short reg, unsigned short mask, unsigned short value, unsigned short page)
{
unsigned short page_save;
int ret;
mutex_lock(&ac97->page_mutex);
page_save = snd_ac97_read(ac97, AC97_INT_PAGING) & AC97_PAGE_MASK;
snd_ac97_update_bits(ac97, AC97_INT_PAGING, AC97_PAGE_MASK, page);
ret = snd_ac97_update_bits(ac97, reg, mask, value);
snd_ac97_update_bits(ac97, AC97_INT_PAGING, AC97_PAGE_MASK, page_save);
mutex_unlock(&ac97->page_mutex); /* unlock paging */
return ret;
}
/*
* shared line-in/mic controls
*/
static int ac97_enum_text_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo,
const char **texts, unsigned int nums)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = nums;
if (uinfo->value.enumerated.item > nums - 1)
uinfo->value.enumerated.item = nums - 1;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static int ac97_surround_jack_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char *texts[] = { "Shared", "Independent" };
return ac97_enum_text_info(kcontrol, uinfo, texts, 2);
}
static int ac97_surround_jack_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = ac97->indep_surround;
return 0;
}
static int ac97_surround_jack_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned char indep = !!ucontrol->value.enumerated.item[0];
if (indep != ac97->indep_surround) {
ac97->indep_surround = indep;
if (ac97->build_ops->update_jacks)
ac97->build_ops->update_jacks(ac97);
return 1;
}
return 0;
}
static int ac97_channel_mode_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char *texts[] = { "2ch", "4ch", "6ch", "8ch" };
return ac97_enum_text_info(kcontrol, uinfo, texts,
kcontrol->private_value);
}
static int ac97_channel_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = ac97->channel_mode;
return 0;
}
static int ac97_channel_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned char mode = ucontrol->value.enumerated.item[0];
if (mode >= kcontrol->private_value)
return -EINVAL;
if (mode != ac97->channel_mode) {
ac97->channel_mode = mode;
if (ac97->build_ops->update_jacks)
ac97->build_ops->update_jacks(ac97);
return 1;
}
return 0;
}
#define AC97_SURROUND_JACK_MODE_CTL \
{ \
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = "Surround Jack Mode", \
.info = ac97_surround_jack_mode_info, \
.get = ac97_surround_jack_mode_get, \
.put = ac97_surround_jack_mode_put, \
}
/* 6ch */
#define AC97_CHANNEL_MODE_CTL \
{ \
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = "Channel Mode", \
.info = ac97_channel_mode_info, \
.get = ac97_channel_mode_get, \
.put = ac97_channel_mode_put, \
.private_value = 3, \
}
/* 4ch */
#define AC97_CHANNEL_MODE_4CH_CTL \
{ \
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = "Channel Mode", \
.info = ac97_channel_mode_info, \
.get = ac97_channel_mode_get, \
.put = ac97_channel_mode_put, \
.private_value = 2, \
}
/* 8ch */
#define AC97_CHANNEL_MODE_8CH_CTL \
{ \
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = "Channel Mode", \
.info = ac97_channel_mode_info, \
.get = ac97_channel_mode_get, \
.put = ac97_channel_mode_put, \
.private_value = 4, \
}
static inline int is_surround_on(struct snd_ac97 *ac97)
{
return ac97->channel_mode >= 1;
}
static inline int is_clfe_on(struct snd_ac97 *ac97)
{
return ac97->channel_mode >= 2;
}
/* system has shared jacks with surround out enabled */
static inline int is_shared_surrout(struct snd_ac97 *ac97)
{
return !ac97->indep_surround && is_surround_on(ac97);
}
/* system has shared jacks with center/lfe out enabled */
static inline int is_shared_clfeout(struct snd_ac97 *ac97)
{
return !ac97->indep_surround && is_clfe_on(ac97);
}
/* system has shared jacks with line in enabled */
static inline int is_shared_linein(struct snd_ac97 *ac97)
{
return !ac97->indep_surround && !is_surround_on(ac97);
}
/* system has shared jacks with mic in enabled */
static inline int is_shared_micin(struct snd_ac97 *ac97)
{
return !ac97->indep_surround && !is_clfe_on(ac97);
}
static inline int alc850_is_aux_back_surround(struct snd_ac97 *ac97)
{
return is_surround_on(ac97);
}
/* The following snd_ac97_ymf753_... items added by David Shust (dshust@shustring.com) */
/* Modified for YMF743 by Keita Maehara <maehara@debian.org> */
/* It is possible to indicate to the Yamaha YMF7x3 the type of
speakers being used. */
static int snd_ac97_ymf7x3_info_speaker(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
static char *texts[3] = {
"Standard", "Small", "Smaller"
};
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 3;
if (uinfo->value.enumerated.item > 2)
uinfo->value.enumerated.item = 2;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static int snd_ac97_ymf7x3_get_speaker(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = ac97->regs[AC97_YMF7X3_3D_MODE_SEL];
val = (val >> 10) & 3;
if (val > 0) /* 0 = invalid */
val--;
ucontrol->value.enumerated.item[0] = val;
return 0;
}
static int snd_ac97_ymf7x3_put_speaker(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
if (ucontrol->value.enumerated.item[0] > 2)
return -EINVAL;
val = (ucontrol->value.enumerated.item[0] + 1) << 10;
return snd_ac97_update(ac97, AC97_YMF7X3_3D_MODE_SEL, val);
}
static const struct snd_kcontrol_new snd_ac97_ymf7x3_controls_speaker =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "3D Control - Speaker",
.info = snd_ac97_ymf7x3_info_speaker,
.get = snd_ac97_ymf7x3_get_speaker,
.put = snd_ac97_ymf7x3_put_speaker,
};
/* It is possible to indicate to the Yamaha YMF7x3 the source to
direct to the S/PDIF output. */
static int snd_ac97_ymf7x3_spdif_source_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
static char *texts[2] = { "AC-Link", "A/D Converter" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 2;
if (uinfo->value.enumerated.item > 1)
uinfo->value.enumerated.item = 1;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static int snd_ac97_ymf7x3_spdif_source_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = ac97->regs[AC97_YMF7X3_DIT_CTRL];
ucontrol->value.enumerated.item[0] = (val >> 1) & 1;
return 0;
}
static int snd_ac97_ymf7x3_spdif_source_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
if (ucontrol->value.enumerated.item[0] > 1)
return -EINVAL;
val = ucontrol->value.enumerated.item[0] << 1;
return snd_ac97_update_bits(ac97, AC97_YMF7X3_DIT_CTRL, 0x0002, val);
}
static int patch_yamaha_ymf7x3_3d(struct snd_ac97 *ac97)
{
struct snd_kcontrol *kctl;
int err;
kctl = snd_ac97_cnew(&snd_ac97_controls_3d[0], ac97);
err = snd_ctl_add(ac97->bus->card, kctl);
if (err < 0)
return err;
strcpy(kctl->id.name, "3D Control - Wide");
kctl->private_value = AC97_SINGLE_VALUE(AC97_3D_CONTROL, 9, 7, 0);
snd_ac97_write_cache(ac97, AC97_3D_CONTROL, 0x0000);
err = snd_ctl_add(ac97->bus->card,
snd_ac97_cnew(&snd_ac97_ymf7x3_controls_speaker,
ac97));
if (err < 0)
return err;
snd_ac97_write_cache(ac97, AC97_YMF7X3_3D_MODE_SEL, 0x0c00);
return 0;
}
static const struct snd_kcontrol_new snd_ac97_yamaha_ymf743_controls_spdif[3] =
{
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("", PLAYBACK, SWITCH),
AC97_YMF7X3_DIT_CTRL, 0, 1, 0),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, NONE) "Source",
.info = snd_ac97_ymf7x3_spdif_source_info,
.get = snd_ac97_ymf7x3_spdif_source_get,
.put = snd_ac97_ymf7x3_spdif_source_put,
},
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("", NONE, NONE) "Mute",
AC97_YMF7X3_DIT_CTRL, 2, 1, 1)
};
static int patch_yamaha_ymf743_build_spdif(struct snd_ac97 *ac97)
{
int err;
err = patch_build_controls(ac97, &snd_ac97_controls_spdif[0], 3);
if (err < 0)
return err;
err = patch_build_controls(ac97,
snd_ac97_yamaha_ymf743_controls_spdif, 3);
if (err < 0)
return err;
/* set default PCM S/PDIF params */
/* PCM audio,no copyright,no preemphasis,PCM coder,original */
snd_ac97_write_cache(ac97, AC97_YMF7X3_DIT_CTRL, 0xa201);
return 0;
}
static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
.build_spdif = patch_yamaha_ymf743_build_spdif,
.build_3d = patch_yamaha_ymf7x3_3d,
};
static int patch_yamaha_ymf743(struct snd_ac97 *ac97)
{
ac97->build_ops = &patch_yamaha_ymf743_ops;
ac97->caps |= AC97_BC_BASS_TREBLE;
ac97->caps |= 0x04 << 10; /* Yamaha 3D enhancement */
ac97->rates[AC97_RATES_SPDIF] = SNDRV_PCM_RATE_48000; /* 48k only */
ac97->ext_id |= AC97_EI_SPDIF; /* force the detection of spdif */
return 0;
}
/* The AC'97 spec states that the S/PDIF signal is to be output at pin 48.
The YMF753 will output the S/PDIF signal to pin 43, 47 (EAPD), or 48.
By default, no output pin is selected, and the S/PDIF signal is not output.
There is also a bit to mute S/PDIF output in a vendor-specific register. */
static int snd_ac97_ymf753_spdif_output_pin_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static char *texts[3] = { "Disabled", "Pin 43", "Pin 48" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 3;
if (uinfo->value.enumerated.item > 2)
uinfo->value.enumerated.item = 2;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static int snd_ac97_ymf753_spdif_output_pin_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = ac97->regs[AC97_YMF7X3_DIT_CTRL];
ucontrol->value.enumerated.item[0] = (val & 0x0008) ? 2 : (val & 0x0020) ? 1 : 0;
return 0;
}
static int snd_ac97_ymf753_spdif_output_pin_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
if (ucontrol->value.enumerated.item[0] > 2)
return -EINVAL;
val = (ucontrol->value.enumerated.item[0] == 2) ? 0x0008 :
(ucontrol->value.enumerated.item[0] == 1) ? 0x0020 : 0;
return snd_ac97_update_bits(ac97, AC97_YMF7X3_DIT_CTRL, 0x0028, val);
/* The following can be used to direct S/PDIF output to pin 47 (EAPD).
snd_ac97_write_cache(ac97, 0x62, snd_ac97_read(ac97, 0x62) | 0x0008); */
}
static const struct snd_kcontrol_new snd_ac97_ymf753_controls_spdif[3] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
.info = snd_ac97_ymf7x3_spdif_source_info,
.get = snd_ac97_ymf7x3_spdif_source_get,
.put = snd_ac97_ymf7x3_spdif_source_put,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Output Pin",
.info = snd_ac97_ymf753_spdif_output_pin_info,
.get = snd_ac97_ymf753_spdif_output_pin_get,
.put = snd_ac97_ymf753_spdif_output_pin_put,
},
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("", NONE, NONE) "Mute",
AC97_YMF7X3_DIT_CTRL, 2, 1, 1)
};
static int patch_yamaha_ymf753_post_spdif(struct snd_ac97 * ac97)
{
int err;
if ((err = patch_build_controls(ac97, snd_ac97_ymf753_controls_spdif, ARRAY_SIZE(snd_ac97_ymf753_controls_spdif))) < 0)
return err;
return 0;
}
static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
.build_3d = patch_yamaha_ymf7x3_3d,
.build_post_spdif = patch_yamaha_ymf753_post_spdif
};
static int patch_yamaha_ymf753(struct snd_ac97 * ac97)
{
/* Patch for Yamaha YMF753, Copyright (c) by David Shust, dshust@shustring.com.
This chip has nonstandard and extended behaviour with regard to its S/PDIF output.
The AC'97 spec states that the S/PDIF signal is to be output at pin 48.
The YMF753 will ouput the S/PDIF signal to pin 43, 47 (EAPD), or 48.
By default, no output pin is selected, and the S/PDIF signal is not output.
There is also a bit to mute S/PDIF output in a vendor-specific register.
*/
ac97->build_ops = &patch_yamaha_ymf753_ops;
ac97->caps |= AC97_BC_BASS_TREBLE;
ac97->caps |= 0x04 << 10; /* Yamaha 3D enhancement */
return 0;
}
/*
* May 2, 2003 Liam Girdwood <lrg@slimlogic.co.uk>
* removed broken wolfson00 patch.
* added support for WM9705,WM9708,WM9709,WM9710,WM9711,WM9712 and WM9717.
*/
static const struct snd_kcontrol_new wm97xx_snd_ac97_controls[] = {
AC97_DOUBLE("Front Playback Volume", AC97_WM97XX_FMIXER_VOL, 8, 0, 31, 1),
AC97_SINGLE("Front Playback Switch", AC97_WM97XX_FMIXER_VOL, 15, 1, 1),
};
static int patch_wolfson_wm9703_specific(struct snd_ac97 * ac97)
{
/* This is known to work for the ViewSonic ViewPad 1000
* Randolph Bentson <bentson@holmsjoen.com>
* WM9703/9707/9708/9717
*/
int err, i;
for (i = 0; i < ARRAY_SIZE(wm97xx_snd_ac97_controls); i++) {
if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm97xx_snd_ac97_controls[i], ac97))) < 0)
return err;
}
snd_ac97_write_cache(ac97, AC97_WM97XX_FMIXER_VOL, 0x0808);
return 0;
}
static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
.build_specific = patch_wolfson_wm9703_specific,
};
static int patch_wolfson03(struct snd_ac97 * ac97)
{
ac97->build_ops = &patch_wolfson_wm9703_ops;
return 0;
}
static const struct snd_kcontrol_new wm9704_snd_ac97_controls[] = {
AC97_DOUBLE("Front Playback Volume", AC97_WM97XX_FMIXER_VOL, 8, 0, 31, 1),
AC97_SINGLE("Front Playback Switch", AC97_WM97XX_FMIXER_VOL, 15, 1, 1),
AC97_DOUBLE("Rear Playback Volume", AC97_WM9704_RMIXER_VOL, 8, 0, 31, 1),
AC97_SINGLE("Rear Playback Switch", AC97_WM9704_RMIXER_VOL, 15, 1, 1),
AC97_DOUBLE("Rear DAC Volume", AC97_WM9704_RPCM_VOL, 8, 0, 31, 1),
AC97_DOUBLE("Surround Volume", AC97_SURROUND_MASTER, 8, 0, 31, 1),
};
static int patch_wolfson_wm9704_specific(struct snd_ac97 * ac97)
{
int err, i;
for (i = 0; i < ARRAY_SIZE(wm9704_snd_ac97_controls); i++) {
if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm9704_snd_ac97_controls[i], ac97))) < 0)
return err;
}
/* patch for DVD noise */
snd_ac97_write_cache(ac97, AC97_WM9704_TEST, 0x0200);
return 0;
}
static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
.build_specific = patch_wolfson_wm9704_specific,
};
static int patch_wolfson04(struct snd_ac97 * ac97)
{
/* WM9704M/9704Q */
ac97->build_ops = &patch_wolfson_wm9704_ops;
return 0;
}
static int patch_wolfson05(struct snd_ac97 * ac97)
{
/* WM9705, WM9710 */
ac97->build_ops = &patch_wolfson_wm9703_ops;
#ifdef CONFIG_TOUCHSCREEN_WM9705
/* WM9705 touchscreen uses AUX and VIDEO for touch */
ac97->flags |= AC97_HAS_NO_VIDEO | AC97_HAS_NO_AUX;
#endif
return 0;
}
static const char* wm9711_alc_select[] = {"None", "Left", "Right", "Stereo"};
static const char* wm9711_alc_mix[] = {"Stereo", "Right", "Left", "None"};
static const char* wm9711_out3_src[] = {"Left", "VREF", "Left + Right", "Mono"};
static const char* wm9711_out3_lrsrc[] = {"Master Mix", "Headphone Mix"};
static const char* wm9711_rec_adc[] = {"Stereo", "Left", "Right", "Mute"};
static const char* wm9711_base[] = {"Linear Control", "Adaptive Boost"};
static const char* wm9711_rec_gain[] = {"+1.5dB Steps", "+0.75dB Steps"};
static const char* wm9711_mic[] = {"Mic 1", "Differential", "Mic 2", "Stereo"};
static const char* wm9711_rec_sel[] =
{"Mic 1", "NC", "NC", "Master Mix", "Line", "Headphone Mix", "Phone Mix", "Phone"};
static const char* wm9711_ng_type[] = {"Constant Gain", "Mute"};
static const struct ac97_enum wm9711_enum[] = {
AC97_ENUM_SINGLE(AC97_PCI_SVID, 14, 4, wm9711_alc_select),
AC97_ENUM_SINGLE(AC97_VIDEO, 10, 4, wm9711_alc_mix),
AC97_ENUM_SINGLE(AC97_AUX, 9, 4, wm9711_out3_src),
AC97_ENUM_SINGLE(AC97_AUX, 8, 2, wm9711_out3_lrsrc),
AC97_ENUM_SINGLE(AC97_REC_SEL, 12, 4, wm9711_rec_adc),
AC97_ENUM_SINGLE(AC97_MASTER_TONE, 15, 2, wm9711_base),
AC97_ENUM_DOUBLE(AC97_REC_GAIN, 14, 6, 2, wm9711_rec_gain),
AC97_ENUM_SINGLE(AC97_MIC, 5, 4, wm9711_mic),
AC97_ENUM_DOUBLE(AC97_REC_SEL, 8, 0, 8, wm9711_rec_sel),
AC97_ENUM_SINGLE(AC97_PCI_SVID, 5, 2, wm9711_ng_type),
};
static const struct snd_kcontrol_new wm9711_snd_ac97_controls[] = {
AC97_SINGLE("ALC Target Volume", AC97_CODEC_CLASS_REV, 12, 15, 0),
AC97_SINGLE("ALC Hold Time", AC97_CODEC_CLASS_REV, 8, 15, 0),
AC97_SINGLE("ALC Decay Time", AC97_CODEC_CLASS_REV, 4, 15, 0),
AC97_SINGLE("ALC Attack Time", AC97_CODEC_CLASS_REV, 0, 15, 0),
AC97_ENUM("ALC Function", wm9711_enum[0]),
AC97_SINGLE("ALC Max Volume", AC97_PCI_SVID, 11, 7, 1),
AC97_SINGLE("ALC ZC Timeout", AC97_PCI_SVID, 9, 3, 1),
AC97_SINGLE("ALC ZC Switch", AC97_PCI_SVID, 8, 1, 0),
AC97_SINGLE("ALC NG Switch", AC97_PCI_SVID, 7, 1, 0),
AC97_ENUM("ALC NG Type", wm9711_enum[9]),
AC97_SINGLE("ALC NG Threshold", AC97_PCI_SVID, 0, 31, 1),
AC97_SINGLE("Side Tone Switch", AC97_VIDEO, 15, 1, 1),
AC97_SINGLE("Side Tone Volume", AC97_VIDEO, 12, 7, 1),
AC97_ENUM("ALC Headphone Mux", wm9711_enum[1]),
AC97_SINGLE("ALC Headphone Volume", AC97_VIDEO, 7, 7, 1),
AC97_SINGLE("Out3 Switch", AC97_AUX, 15, 1, 1),
AC97_SINGLE("Out3 ZC Switch", AC97_AUX, 7, 1, 0),
AC97_ENUM("Out3 Mux", wm9711_enum[2]),
AC97_ENUM("Out3 LR Mux", wm9711_enum[3]),
AC97_SINGLE("Out3 Volume", AC97_AUX, 0, 31, 1),
AC97_SINGLE("Beep to Headphone Switch", AC97_PC_BEEP, 15, 1, 1),
AC97_SINGLE("Beep to Headphone Volume", AC97_PC_BEEP, 12, 7, 1),
AC97_SINGLE("Beep to Side Tone Switch", AC97_PC_BEEP, 11, 1, 1),
AC97_SINGLE("Beep to Side Tone Volume", AC97_PC_BEEP, 8, 7, 1),
AC97_SINGLE("Beep to Phone Switch", AC97_PC_BEEP, 7, 1, 1),
AC97_SINGLE("Beep to Phone Volume", AC97_PC_BEEP, 4, 7, 1),
AC97_SINGLE("Aux to Headphone Switch", AC97_CD, 15, 1, 1),
AC97_SINGLE("Aux to Headphone Volume", AC97_CD, 12, 7, 1),
AC97_SINGLE("Aux to Side Tone Switch", AC97_CD, 11, 1, 1),
AC97_SINGLE("Aux to Side Tone Volume", AC97_CD, 8, 7, 1),
AC97_SINGLE("Aux to Phone Switch", AC97_CD, 7, 1, 1),
AC97_SINGLE("Aux to Phone Volume", AC97_CD, 4, 7, 1),
AC97_SINGLE("Phone to Headphone Switch", AC97_PHONE, 15, 1, 1),
AC97_SINGLE("Phone to Master Switch", AC97_PHONE, 14, 1, 1),
AC97_SINGLE("Line to Headphone Switch", AC97_LINE, 15, 1, 1),
AC97_SINGLE("Line to Master Switch", AC97_LINE, 14, 1, 1),
AC97_SINGLE("Line to Phone Switch", AC97_LINE, 13, 1, 1),
AC97_SINGLE("PCM Playback to Headphone Switch", AC97_PCM, 15, 1, 1),
AC97_SINGLE("PCM Playback to Master Switch", AC97_PCM, 14, 1, 1),
AC97_SINGLE("PCM Playback to Phone Switch", AC97_PCM, 13, 1, 1),
AC97_SINGLE("Capture 20dB Boost Switch", AC97_REC_SEL, 14, 1, 0),
AC97_ENUM("Capture to Phone Mux", wm9711_enum[4]),
AC97_SINGLE("Capture to Phone 20dB Boost Switch", AC97_REC_SEL, 11, 1, 1),
AC97_ENUM("Capture Select", wm9711_enum[8]),
AC97_SINGLE("3D Upper Cut-off Switch", AC97_3D_CONTROL, 5, 1, 1),
AC97_SINGLE("3D Lower Cut-off Switch", AC97_3D_CONTROL, 4, 1, 1),
AC97_ENUM("Bass Control", wm9711_enum[5]),
AC97_SINGLE("Bass Cut-off Switch", AC97_MASTER_TONE, 12, 1, 1),
AC97_SINGLE("Tone Cut-off Switch", AC97_MASTER_TONE, 4, 1, 1),
AC97_SINGLE("Playback Attenuate (-6dB) Switch", AC97_MASTER_TONE, 6, 1, 0),
AC97_SINGLE("ADC Switch", AC97_REC_GAIN, 15, 1, 1),
AC97_ENUM("Capture Volume Steps", wm9711_enum[6]),
AC97_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 1),
AC97_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0),
AC97_SINGLE("Mic 1 to Phone Switch", AC97_MIC, 14, 1, 1),
AC97_SINGLE("Mic 2 to Phone Switch", AC97_MIC, 13, 1, 1),
AC97_ENUM("Mic Select Source", wm9711_enum[7]),
AC97_SINGLE("Mic 1 Volume", AC97_MIC, 8, 31, 1),
AC97_SINGLE("Mic 2 Volume", AC97_MIC, 0, 31, 1),
AC97_SINGLE("Mic 20dB Boost Switch", AC97_MIC, 7, 1, 0),
AC97_SINGLE("Master Left Inv Switch", AC97_MASTER, 6, 1, 0),
AC97_SINGLE("Master ZC Switch", AC97_MASTER, 7, 1, 0),
AC97_SINGLE("Headphone ZC Switch", AC97_HEADPHONE, 7, 1, 0),
AC97_SINGLE("Mono ZC Switch", AC97_MASTER_MONO, 7, 1, 0),
};
static int patch_wolfson_wm9711_specific(struct snd_ac97 * ac97)
{
int err, i;
for (i = 0; i < ARRAY_SIZE(wm9711_snd_ac97_controls); i++) {
if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm9711_snd_ac97_controls[i], ac97))) < 0)
return err;
}
snd_ac97_write_cache(ac97, AC97_CODEC_CLASS_REV, 0x0808);
snd_ac97_write_cache(ac97, AC97_PCI_SVID, 0x0808);
snd_ac97_write_cache(ac97, AC97_VIDEO, 0x0808);
snd_ac97_write_cache(ac97, AC97_AUX, 0x0808);
snd_ac97_write_cache(ac97, AC97_PC_BEEP, 0x0808);
snd_ac97_write_cache(ac97, AC97_CD, 0x0000);
return 0;
}
static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
.build_specific = patch_wolfson_wm9711_specific,
};
static int patch_wolfson11(struct snd_ac97 * ac97)
{
/* WM9711, WM9712 */
ac97->build_ops = &patch_wolfson_wm9711_ops;
ac97->flags |= AC97_HAS_NO_REC_GAIN | AC97_STEREO_MUTES | AC97_HAS_NO_MIC |
AC97_HAS_NO_PC_BEEP | AC97_HAS_NO_VIDEO | AC97_HAS_NO_CD;
return 0;
}
static const char* wm9713_mic_mixer[] = {"Stereo", "Mic 1", "Mic 2", "Mute"};
static const char* wm9713_rec_mux[] = {"Stereo", "Left", "Right", "Mute"};
static const char* wm9713_rec_src[] =
{"Mic 1", "Mic 2", "Line", "Mono In", "Headphone Mix", "Master Mix",
"Mono Mix", "Zh"};
static const char* wm9713_rec_gain[] = {"+1.5dB Steps", "+0.75dB Steps"};
static const char* wm9713_alc_select[] = {"None", "Left", "Right", "Stereo"};
static const char* wm9713_mono_pga[] = {"Vmid", "Zh", "Mono Mix", "Inv 1"};
static const char* wm9713_spk_pga[] =
{"Vmid", "Zh", "Headphone Mix", "Master Mix", "Inv", "NC", "NC", "NC"};
static const char* wm9713_hp_pga[] = {"Vmid", "Zh", "Headphone Mix", "NC"};
static const char* wm9713_out3_pga[] = {"Vmid", "Zh", "Inv 1", "NC"};
static const char* wm9713_out4_pga[] = {"Vmid", "Zh", "Inv 2", "NC"};
static const char* wm9713_dac_inv[] =
{"Off", "Mono Mix", "Master Mix", "Headphone Mix L", "Headphone Mix R",
"Headphone Mix Mono", "NC", "Vmid"};
static const char* wm9713_base[] = {"Linear Control", "Adaptive Boost"};
static const char* wm9713_ng_type[] = {"Constant Gain", "Mute"};
static const struct ac97_enum wm9713_enum[] = {
AC97_ENUM_SINGLE(AC97_LINE, 3, 4, wm9713_mic_mixer),
AC97_ENUM_SINGLE(AC97_VIDEO, 14, 4, wm9713_rec_mux),
AC97_ENUM_SINGLE(AC97_VIDEO, 9, 4, wm9713_rec_mux),
AC97_ENUM_DOUBLE(AC97_VIDEO, 3, 0, 8, wm9713_rec_src),
AC97_ENUM_DOUBLE(AC97_CD, 14, 6, 2, wm9713_rec_gain),
AC97_ENUM_SINGLE(AC97_PCI_SVID, 14, 4, wm9713_alc_select),
AC97_ENUM_SINGLE(AC97_REC_GAIN, 14, 4, wm9713_mono_pga),
AC97_ENUM_DOUBLE(AC97_REC_GAIN, 11, 8, 8, wm9713_spk_pga),
AC97_ENUM_DOUBLE(AC97_REC_GAIN, 6, 4, 4, wm9713_hp_pga),
AC97_ENUM_SINGLE(AC97_REC_GAIN, 2, 4, wm9713_out3_pga),
AC97_ENUM_SINGLE(AC97_REC_GAIN, 0, 4, wm9713_out4_pga),
AC97_ENUM_DOUBLE(AC97_REC_GAIN_MIC, 13, 10, 8, wm9713_dac_inv),
AC97_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 15, 2, wm9713_base),
AC97_ENUM_SINGLE(AC97_PCI_SVID, 5, 2, wm9713_ng_type),
};
static const struct snd_kcontrol_new wm13_snd_ac97_controls[] = {
AC97_DOUBLE("Line In Volume", AC97_PC_BEEP, 8, 0, 31, 1),
AC97_SINGLE("Line In to Headphone Switch", AC97_PC_BEEP, 15, 1, 1),
AC97_SINGLE("Line In to Master Switch", AC97_PC_BEEP, 14, 1, 1),
AC97_SINGLE("Line In to Mono Switch", AC97_PC_BEEP, 13, 1, 1),
AC97_DOUBLE("PCM Playback Volume", AC97_PHONE, 8, 0, 31, 1),
AC97_SINGLE("PCM Playback to Headphone Switch", AC97_PHONE, 15, 1, 1),
AC97_SINGLE("PCM Playback to Master Switch", AC97_PHONE, 14, 1, 1),
AC97_SINGLE("PCM Playback to Mono Switch", AC97_PHONE, 13, 1, 1),
AC97_SINGLE("Mic 1 Volume", AC97_MIC, 8, 31, 1),
AC97_SINGLE("Mic 2 Volume", AC97_MIC, 0, 31, 1),
AC97_SINGLE("Mic 1 to Mono Switch", AC97_LINE, 7, 1, 1),
AC97_SINGLE("Mic 2 to Mono Switch", AC97_LINE, 6, 1, 1),
AC97_SINGLE("Mic Boost (+20dB) Switch", AC97_LINE, 5, 1, 0),
AC97_ENUM("Mic to Headphone Mux", wm9713_enum[0]),
AC97_SINGLE("Mic Headphone Mixer Volume", AC97_LINE, 0, 7, 1),
AC97_SINGLE("Capture Switch", AC97_CD, 15, 1, 1),
AC97_ENUM("Capture Volume Steps", wm9713_enum[4]),
AC97_DOUBLE("Capture Volume", AC97_CD, 8, 0, 15, 0),
AC97_SINGLE("Capture ZC Switch", AC97_CD, 7, 1, 0),
AC97_ENUM("Capture to Headphone Mux", wm9713_enum[1]),
AC97_SINGLE("Capture to Headphone Volume", AC97_VIDEO, 11, 7, 1),
AC97_ENUM("Capture to Mono Mux", wm9713_enum[2]),
AC97_SINGLE("Capture to Mono Boost (+20dB) Switch", AC97_VIDEO, 8, 1, 0),
AC97_SINGLE("Capture ADC Boost (+20dB) Switch", AC97_VIDEO, 6, 1, 0),
AC97_ENUM("Capture Select", wm9713_enum[3]),
AC97_SINGLE("ALC Target Volume", AC97_CODEC_CLASS_REV, 12, 15, 0),
AC97_SINGLE("ALC Hold Time", AC97_CODEC_CLASS_REV, 8, 15, 0),
AC97_SINGLE("ALC Decay Time ", AC97_CODEC_CLASS_REV, 4, 15, 0),
AC97_SINGLE("ALC Attack Time", AC97_CODEC_CLASS_REV, 0, 15, 0),
AC97_ENUM("ALC Function", wm9713_enum[5]),
AC97_SINGLE("ALC Max Volume", AC97_PCI_SVID, 11, 7, 0),
AC97_SINGLE("ALC ZC Timeout", AC97_PCI_SVID, 9, 3, 0),
AC97_SINGLE("ALC ZC Switch", AC97_PCI_SVID, 8, 1, 0),
AC97_SINGLE("ALC NG Switch", AC97_PCI_SVID, 7, 1, 0),
AC97_ENUM("ALC NG Type", wm9713_enum[13]),
AC97_SINGLE("ALC NG Threshold", AC97_PCI_SVID, 0, 31, 0),
AC97_DOUBLE("Master ZC Switch", AC97_MASTER, 14, 6, 1, 0),
AC97_DOUBLE("Headphone ZC Switch", AC97_HEADPHONE, 14, 6, 1, 0),
AC97_DOUBLE("Out3/4 ZC Switch", AC97_MASTER_MONO, 14, 6, 1, 0),
AC97_SINGLE("Master Right Switch", AC97_MASTER, 7, 1, 1),
AC97_SINGLE("Headphone Right Switch", AC97_HEADPHONE, 7, 1, 1),
AC97_SINGLE("Out3/4 Right Switch", AC97_MASTER_MONO, 7, 1, 1),
AC97_SINGLE("Mono In to Headphone Switch", AC97_MASTER_TONE, 15, 1, 1),
AC97_SINGLE("Mono In to Master Switch", AC97_MASTER_TONE, 14, 1, 1),
AC97_SINGLE("Mono In Volume", AC97_MASTER_TONE, 8, 31, 1),
AC97_SINGLE("Mono Switch", AC97_MASTER_TONE, 7, 1, 1),
AC97_SINGLE("Mono ZC Switch", AC97_MASTER_TONE, 6, 1, 0),
AC97_SINGLE("Mono Volume", AC97_MASTER_TONE, 0, 31, 1),
AC97_SINGLE("Beep to Headphone Switch", AC97_AUX, 15, 1, 1),
AC97_SINGLE("Beep to Headphone Volume", AC97_AUX, 12, 7, 1),
AC97_SINGLE("Beep to Master Switch", AC97_AUX, 11, 1, 1),
AC97_SINGLE("Beep to Master Volume", AC97_AUX, 8, 7, 1),
AC97_SINGLE("Beep to Mono Switch", AC97_AUX, 7, 1, 1),
AC97_SINGLE("Beep to Mono Volume", AC97_AUX, 4, 7, 1),
AC97_SINGLE("Voice to Headphone Switch", AC97_PCM, 15, 1, 1),
AC97_SINGLE("Voice to Headphone Volume", AC97_PCM, 12, 7, 1),
AC97_SINGLE("Voice to Master Switch", AC97_PCM, 11, 1, 1),
AC97_SINGLE("Voice to Master Volume", AC97_PCM, 8, 7, 1),
AC97_SINGLE("Voice to Mono Switch", AC97_PCM, 7, 1, 1),
AC97_SINGLE("Voice to Mono Volume", AC97_PCM, 4, 7, 1),
AC97_SINGLE("Aux to Headphone Switch", AC97_REC_SEL, 15, 1, 1),
AC97_SINGLE("Aux to Headphone Volume", AC97_REC_SEL, 12, 7, 1),
AC97_SINGLE("Aux to Master Switch", AC97_REC_SEL, 11, 1, 1),
AC97_SINGLE("Aux to Master Volume", AC97_REC_SEL, 8, 7, 1),
AC97_SINGLE("Aux to Mono Switch", AC97_REC_SEL, 7, 1, 1),
AC97_SINGLE("Aux to Mono Volume", AC97_REC_SEL, 4, 7, 1),
AC97_ENUM("Mono Input Mux", wm9713_enum[6]),
AC97_ENUM("Master Input Mux", wm9713_enum[7]),
AC97_ENUM("Headphone Input Mux", wm9713_enum[8]),
AC97_ENUM("Out 3 Input Mux", wm9713_enum[9]),
AC97_ENUM("Out 4 Input Mux", wm9713_enum[10]),
AC97_ENUM("Bass Control", wm9713_enum[12]),
AC97_SINGLE("Bass Cut-off Switch", AC97_GENERAL_PURPOSE, 12, 1, 1),
AC97_SINGLE("Tone Cut-off Switch", AC97_GENERAL_PURPOSE, 4, 1, 1),
AC97_SINGLE("Playback Attenuate (-6dB) Switch", AC97_GENERAL_PURPOSE, 6, 1, 0),
AC97_SINGLE("Bass Volume", AC97_GENERAL_PURPOSE, 8, 15, 1),
AC97_SINGLE("Tone Volume", AC97_GENERAL_PURPOSE, 0, 15, 1),
};
static const struct snd_kcontrol_new wm13_snd_ac97_controls_3d[] = {
AC97_ENUM("Inv Input Mux", wm9713_enum[11]),
AC97_SINGLE("3D Upper Cut-off Switch", AC97_REC_GAIN_MIC, 5, 1, 0),
AC97_SINGLE("3D Lower Cut-off Switch", AC97_REC_GAIN_MIC, 4, 1, 0),
AC97_SINGLE("3D Depth", AC97_REC_GAIN_MIC, 0, 15, 1),
};
static int patch_wolfson_wm9713_3d (struct snd_ac97 * ac97)
{
int err, i;
for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_3d); i++) {
if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_3d[i], ac97))) < 0)
return err;
}
return 0;
}
static int patch_wolfson_wm9713_specific(struct snd_ac97 * ac97)
{
int err, i;
for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls); i++) {
if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls[i], ac97))) < 0)
return err;
}
snd_ac97_write_cache(ac97, AC97_PC_BEEP, 0x0808);
snd_ac97_write_cache(ac97, AC97_PHONE, 0x0808);
snd_ac97_write_cache(ac97, AC97_MIC, 0x0808);
snd_ac97_write_cache(ac97, AC97_LINE, 0x00da);
snd_ac97_write_cache(ac97, AC97_CD, 0x0808);
snd_ac97_write_cache(ac97, AC97_VIDEO, 0xd612);
snd_ac97_write_cache(ac97, AC97_REC_GAIN, 0x1ba0);
return 0;
}
#ifdef CONFIG_PM
static void patch_wolfson_wm9713_suspend (struct snd_ac97 * ac97)
{
snd_ac97_write_cache(ac97, AC97_EXTENDED_MID, 0xfeff);
snd_ac97_write_cache(ac97, AC97_EXTENDED_MSTATUS, 0xffff);
}
static void patch_wolfson_wm9713_resume (struct snd_ac97 * ac97)
{
snd_ac97_write_cache(ac97, AC97_EXTENDED_MID, 0xda00);
snd_ac97_write_cache(ac97, AC97_EXTENDED_MSTATUS, 0x3810);
snd_ac97_write_cache(ac97, AC97_POWERDOWN, 0x0);
}
#endif
static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
.build_specific = patch_wolfson_wm9713_specific,
.build_3d = patch_wolfson_wm9713_3d,
#ifdef CONFIG_PM
.suspend = patch_wolfson_wm9713_suspend,
.resume = patch_wolfson_wm9713_resume
#endif
};
static int patch_wolfson13(struct snd_ac97 * ac97)
{
/* WM9713, WM9714 */
ac97->build_ops = &patch_wolfson_wm9713_ops;
ac97->flags |= AC97_HAS_NO_REC_GAIN | AC97_STEREO_MUTES | AC97_HAS_NO_PHONE |
AC97_HAS_NO_PC_BEEP | AC97_HAS_NO_VIDEO | AC97_HAS_NO_CD | AC97_HAS_NO_TONE |
AC97_HAS_NO_STD_PCM;
ac97->scaps &= ~AC97_SCAP_MODEM;
snd_ac97_write_cache(ac97, AC97_EXTENDED_MID, 0xda00);
snd_ac97_write_cache(ac97, AC97_EXTENDED_MSTATUS, 0x3810);
snd_ac97_write_cache(ac97, AC97_POWERDOWN, 0x0);
return 0;
}
/*
* Tritech codec
*/
static int patch_tritech_tr28028(struct snd_ac97 * ac97)
{
snd_ac97_write_cache(ac97, 0x26, 0x0300);
snd_ac97_write_cache(ac97, 0x26, 0x0000);
snd_ac97_write_cache(ac97, AC97_SURROUND_MASTER, 0x0000);
snd_ac97_write_cache(ac97, AC97_SPDIF, 0x0000);
return 0;
}
/*
* Sigmatel STAC97xx codecs
*/
static int patch_sigmatel_stac9700_3d(struct snd_ac97 * ac97)
{
struct snd_kcontrol *kctl;
int err;
if ((err = snd_ctl_add(ac97->bus->card, kctl = snd_ac97_cnew(&snd_ac97_controls_3d[0], ac97))) < 0)
return err;
strcpy(kctl->id.name, "3D Control Sigmatel - Depth");
kctl->private_value = AC97_SINGLE_VALUE(AC97_3D_CONTROL, 2, 3, 0);
snd_ac97_write_cache(ac97, AC97_3D_CONTROL, 0x0000);
return 0;
}
static int patch_sigmatel_stac9708_3d(struct snd_ac97 * ac97)
{
struct snd_kcontrol *kctl;
int err;
if ((err = snd_ctl_add(ac97->bus->card, kctl = snd_ac97_cnew(&snd_ac97_controls_3d[0], ac97))) < 0)
return err;
strcpy(kctl->id.name, "3D Control Sigmatel - Depth");
kctl->private_value = AC97_SINGLE_VALUE(AC97_3D_CONTROL, 0, 3, 0);
if ((err = snd_ctl_add(ac97->bus->card, kctl = snd_ac97_cnew(&snd_ac97_controls_3d[0], ac97))) < 0)
return err;
strcpy(kctl->id.name, "3D Control Sigmatel - Rear Depth");
kctl->private_value = AC97_SINGLE_VALUE(AC97_3D_CONTROL, 2, 3, 0);
snd_ac97_write_cache(ac97, AC97_3D_CONTROL, 0x0000);
return 0;
}
static const struct snd_kcontrol_new snd_ac97_sigmatel_4speaker =
AC97_SINGLE("Sigmatel 4-Speaker Stereo Playback Switch",
AC97_SIGMATEL_DAC2INVERT, 2, 1, 0);
/* "Sigmatel " removed due to excessive name length: */
static const struct snd_kcontrol_new snd_ac97_sigmatel_phaseinvert =
AC97_SINGLE("Surround Phase Inversion Playback Switch",
AC97_SIGMATEL_DAC2INVERT, 3, 1, 0);
static const struct snd_kcontrol_new snd_ac97_sigmatel_controls[] = {
AC97_SINGLE("Sigmatel DAC 6dB Attenuate", AC97_SIGMATEL_ANALOG, 1, 1, 0),
AC97_SINGLE("Sigmatel ADC 6dB Attenuate", AC97_SIGMATEL_ANALOG, 0, 1, 0)
};
static int patch_sigmatel_stac97xx_specific(struct snd_ac97 * ac97)
{
int err;
snd_ac97_write_cache(ac97, AC97_SIGMATEL_ANALOG, snd_ac97_read(ac97, AC97_SIGMATEL_ANALOG) & ~0x0003);
if (snd_ac97_try_bit(ac97, AC97_SIGMATEL_ANALOG, 1))
if ((err = patch_build_controls(ac97, &snd_ac97_sigmatel_controls[0], 1)) < 0)
return err;
if (snd_ac97_try_bit(ac97, AC97_SIGMATEL_ANALOG, 0))
if ((err = patch_build_controls(ac97, &snd_ac97_sigmatel_controls[1], 1)) < 0)
return err;
if (snd_ac97_try_bit(ac97, AC97_SIGMATEL_DAC2INVERT, 2))
if ((err = patch_build_controls(ac97, &snd_ac97_sigmatel_4speaker, 1)) < 0)
return err;
if (snd_ac97_try_bit(ac97, AC97_SIGMATEL_DAC2INVERT, 3))
if ((err = patch_build_controls(ac97, &snd_ac97_sigmatel_phaseinvert, 1)) < 0)
return err;
return 0;
}
static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
.build_3d = patch_sigmatel_stac9700_3d,
.build_specific = patch_sigmatel_stac97xx_specific
};
static int patch_sigmatel_stac9700(struct snd_ac97 * ac97)
{
ac97->build_ops = &patch_sigmatel_stac9700_ops;
return 0;
}
static int snd_ac97_stac9708_put_bias(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
int err;
mutex_lock(&ac97->page_mutex);
snd_ac97_write(ac97, AC97_SIGMATEL_BIAS1, 0xabba);
err = snd_ac97_update_bits(ac97, AC97_SIGMATEL_BIAS2, 0x0010,
(ucontrol->value.integer.value[0] & 1) << 4);
snd_ac97_write(ac97, AC97_SIGMATEL_BIAS1, 0);
mutex_unlock(&ac97->page_mutex);
return err;
}
static const struct snd_kcontrol_new snd_ac97_stac9708_bias_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Sigmatel Output Bias Switch",
.info = snd_ac97_info_volsw,
.get = snd_ac97_get_volsw,
.put = snd_ac97_stac9708_put_bias,
.private_value = AC97_SINGLE_VALUE(AC97_SIGMATEL_BIAS2, 4, 1, 0),
};
static int patch_sigmatel_stac9708_specific(struct snd_ac97 *ac97)
{
int err;
/* the register bit is writable, but the function is not implemented: */
snd_ac97_remove_ctl(ac97, "PCM Out Path & Mute", NULL);
snd_ac97_rename_vol_ctl(ac97, "Headphone Playback", "Sigmatel Surround Playback");
if ((err = patch_build_controls(ac97, &snd_ac97_stac9708_bias_control, 1)) < 0)
return err;
return patch_sigmatel_stac97xx_specific(ac97);
}
static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
.build_3d = patch_sigmatel_stac9708_3d,
.build_specific = patch_sigmatel_stac9708_specific
};
static int patch_sigmatel_stac9708(struct snd_ac97 * ac97)
{
unsigned int codec72, codec6c;
ac97->build_ops = &patch_sigmatel_stac9708_ops;
ac97->caps |= 0x10; /* HP (sigmatel surround) support */
codec72 = snd_ac97_read(ac97, AC97_SIGMATEL_BIAS2) & 0x8000;
codec6c = snd_ac97_read(ac97, AC97_SIGMATEL_ANALOG);
if ((codec72==0) && (codec6c==0)) {
snd_ac97_write_cache(ac97, AC97_SIGMATEL_CIC1, 0xabba);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_CIC2, 0x1000);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_BIAS1, 0xabba);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_BIAS2, 0x0007);
} else if ((codec72==0x8000) && (codec6c==0)) {
snd_ac97_write_cache(ac97, AC97_SIGMATEL_CIC1, 0xabba);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_CIC2, 0x1001);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_DAC2INVERT, 0x0008);
} else if ((codec72==0x8000) && (codec6c==0x0080)) {
/* nothing */
}
snd_ac97_write_cache(ac97, AC97_SIGMATEL_MULTICHN, 0x0000);
return 0;
}
static int patch_sigmatel_stac9721(struct snd_ac97 * ac97)
{
ac97->build_ops = &patch_sigmatel_stac9700_ops;
if (snd_ac97_read(ac97, AC97_SIGMATEL_ANALOG) == 0) {
// patch for SigmaTel
snd_ac97_write_cache(ac97, AC97_SIGMATEL_CIC1, 0xabba);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_CIC2, 0x4000);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_BIAS1, 0xabba);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_BIAS2, 0x0002);
}
snd_ac97_write_cache(ac97, AC97_SIGMATEL_MULTICHN, 0x0000);
return 0;
}
static int patch_sigmatel_stac9744(struct snd_ac97 * ac97)
{
// patch for SigmaTel
ac97->build_ops = &patch_sigmatel_stac9700_ops;
snd_ac97_write_cache(ac97, AC97_SIGMATEL_CIC1, 0xabba);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_CIC2, 0x0000); /* is this correct? --jk */
snd_ac97_write_cache(ac97, AC97_SIGMATEL_BIAS1, 0xabba);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_BIAS2, 0x0002);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_MULTICHN, 0x0000);
return 0;
}
static int patch_sigmatel_stac9756(struct snd_ac97 * ac97)
{
// patch for SigmaTel
ac97->build_ops = &patch_sigmatel_stac9700_ops;
snd_ac97_write_cache(ac97, AC97_SIGMATEL_CIC1, 0xabba);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_CIC2, 0x0000); /* is this correct? --jk */
snd_ac97_write_cache(ac97, AC97_SIGMATEL_BIAS1, 0xabba);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_BIAS2, 0x0002);
snd_ac97_write_cache(ac97, AC97_SIGMATEL_MULTICHN, 0x0000);
return 0;
}
static int snd_ac97_stac9758_output_jack_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static char *texts[5] = { "Input/Disabled", "Front Output",
"Rear Output", "Center/LFE Output", "Mixer Output" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 5;
if (uinfo->value.enumerated.item > 4)
uinfo->value.enumerated.item = 4;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static int snd_ac97_stac9758_output_jack_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
int shift = kcontrol->private_value;
unsigned short val;
val = ac97->regs[AC97_SIGMATEL_OUTSEL] >> shift;
if (!(val & 4))
ucontrol->value.enumerated.item[0] = 0;
else
ucontrol->value.enumerated.item[0] = 1 + (val & 3);
return 0;
}
static int snd_ac97_stac9758_output_jack_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
int shift = kcontrol->private_value;
unsigned short val;
if (ucontrol->value.enumerated.item[0] > 4)
return -EINVAL;
if (ucontrol->value.enumerated.item[0] == 0)
val = 0;
else
val = 4 | (ucontrol->value.enumerated.item[0] - 1);
return ac97_update_bits_page(ac97, AC97_SIGMATEL_OUTSEL,
7 << shift, val << shift, 0);
}
static int snd_ac97_stac9758_input_jack_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static char *texts[7] = { "Mic2 Jack", "Mic1 Jack", "Line In Jack",
"Front Jack", "Rear Jack", "Center/LFE Jack", "Mute" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 7;
if (uinfo->value.enumerated.item > 6)
uinfo->value.enumerated.item = 6;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static int snd_ac97_stac9758_input_jack_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
int shift = kcontrol->private_value;
unsigned short val;
val = ac97->regs[AC97_SIGMATEL_INSEL];
ucontrol->value.enumerated.item[0] = (val >> shift) & 7;
return 0;
}
static int snd_ac97_stac9758_input_jack_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
int shift = kcontrol->private_value;
return ac97_update_bits_page(ac97, AC97_SIGMATEL_INSEL, 7 << shift,
ucontrol->value.enumerated.item[0] << shift, 0);
}
static int snd_ac97_stac9758_phonesel_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static char *texts[3] = { "None", "Front Jack", "Rear Jack" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 3;
if (uinfo->value.enumerated.item > 2)
uinfo->value.enumerated.item = 2;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static int snd_ac97_stac9758_phonesel_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = ac97->regs[AC97_SIGMATEL_IOMISC] & 3;
return 0;
}
static int snd_ac97_stac9758_phonesel_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
return ac97_update_bits_page(ac97, AC97_SIGMATEL_IOMISC, 3,
ucontrol->value.enumerated.item[0], 0);
}
#define STAC9758_OUTPUT_JACK(xname, shift) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_ac97_stac9758_output_jack_info, \
.get = snd_ac97_stac9758_output_jack_get, \
.put = snd_ac97_stac9758_output_jack_put, \
.private_value = shift }
#define STAC9758_INPUT_JACK(xname, shift) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_ac97_stac9758_input_jack_info, \
.get = snd_ac97_stac9758_input_jack_get, \
.put = snd_ac97_stac9758_input_jack_put, \
.private_value = shift }
static const struct snd_kcontrol_new snd_ac97_sigmatel_stac9758_controls[] = {
STAC9758_OUTPUT_JACK("Mic1 Jack", 1),
STAC9758_OUTPUT_JACK("LineIn Jack", 4),
STAC9758_OUTPUT_JACK("Front Jack", 7),
STAC9758_OUTPUT_JACK("Rear Jack", 10),
STAC9758_OUTPUT_JACK("Center/LFE Jack", 13),
STAC9758_INPUT_JACK("Mic Input Source", 0),
STAC9758_INPUT_JACK("Line Input Source", 8),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Headphone Amp",
.info = snd_ac97_stac9758_phonesel_info,
.get = snd_ac97_stac9758_phonesel_get,
.put = snd_ac97_stac9758_phonesel_put
},
AC97_SINGLE("Exchange Center/LFE", AC97_SIGMATEL_IOMISC, 4, 1, 0),
AC97_SINGLE("Headphone +3dB Boost", AC97_SIGMATEL_IOMISC, 8, 1, 0)
};
static int patch_sigmatel_stac9758_specific(struct snd_ac97 *ac97)
{
int err;
err = patch_sigmatel_stac97xx_specific(ac97);
if (err < 0)
return err;
err = patch_build_controls(ac97, snd_ac97_sigmatel_stac9758_controls,
ARRAY_SIZE(snd_ac97_sigmatel_stac9758_controls));
if (err < 0)
return err;
/* DAC-A direct */
snd_ac97_rename_vol_ctl(ac97, "Headphone Playback", "Front Playback");
/* DAC-A to Mix = PCM */
/* DAC-B direct = Surround */
/* DAC-B to Mix */
snd_ac97_rename_vol_ctl(ac97, "Video Playback", "Surround Mix Playback");
/* DAC-C direct = Center/LFE */
return 0;
}
static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
.build_3d = patch_sigmatel_stac9700_3d,
.build_specific = patch_sigmatel_stac9758_specific
};
static int patch_sigmatel_stac9758(struct snd_ac97 * ac97)
{
static unsigned short regs[4] = {
AC97_SIGMATEL_OUTSEL,
AC97_SIGMATEL_IOMISC,
AC97_SIGMATEL_INSEL,
AC97_SIGMATEL_VARIOUS
};
static unsigned short def_regs[4] = {
/* OUTSEL */ 0xd794, /* CL:CL, SR:SR, LO:MX, LI:DS, MI:DS */
/* IOMISC */ 0x2001,
/* INSEL */ 0x0201, /* LI:LI, MI:M1 */
/* VARIOUS */ 0x0040
};
static unsigned short m675_regs[4] = {
/* OUTSEL */ 0xfc70, /* CL:MX, SR:MX, LO:DS, LI:MX, MI:DS */
/* IOMISC */ 0x2102, /* HP amp on */
/* INSEL */ 0x0203, /* LI:LI, MI:FR */
/* VARIOUS */ 0x0041 /* stereo mic */
};
unsigned short *pregs = def_regs;
int i;
/* Gateway M675 notebook */
if (ac97->pci &&
ac97->subsystem_vendor == 0x107b &&
ac97->subsystem_device == 0x0601)
pregs = m675_regs;
// patch for SigmaTel
ac97->build_ops = &patch_sigmatel_stac9758_ops;
/* FIXME: assume only page 0 for writing cache */
snd_ac97_update_bits(ac97, AC97_INT_PAGING, AC97_PAGE_MASK, AC97_PAGE_VENDOR);
for (i = 0; i < 4; i++)
snd_ac97_write_cache(ac97, regs[i], pregs[i]);
ac97->flags |= AC97_STEREO_MUTES;
return 0;
}
/*
* Cirrus Logic CS42xx codecs
*/
static const struct snd_kcontrol_new snd_ac97_cirrus_controls_spdif[2] = {
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH), AC97_CSR_SPDIF, 15, 1, 0),
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "AC97-SPSA", AC97_CSR_ACMODE, 0, 3, 0)
};
static int patch_cirrus_build_spdif(struct snd_ac97 * ac97)
{
int err;
/* con mask, pro mask, default */
if ((err = patch_build_controls(ac97, &snd_ac97_controls_spdif[0], 3)) < 0)
return err;
/* switch, spsa */
if ((err = patch_build_controls(ac97, &snd_ac97_cirrus_controls_spdif[0], 1)) < 0)
return err;
switch (ac97->id & AC97_ID_CS_MASK) {
case AC97_ID_CS4205:
if ((err = patch_build_controls(ac97, &snd_ac97_cirrus_controls_spdif[1], 1)) < 0)
return err;
break;
}
/* set default PCM S/PDIF params */
/* consumer,PCM audio,no copyright,no preemphasis,PCM coder,original,48000Hz */
snd_ac97_write_cache(ac97, AC97_CSR_SPDIF, 0x0a20);
return 0;
}
static const struct snd_ac97_build_ops patch_cirrus_ops = {
.build_spdif = patch_cirrus_build_spdif
};
static int patch_cirrus_spdif(struct snd_ac97 * ac97)
{
/* Basically, the cs4201/cs4205/cs4297a has non-standard sp/dif registers.
WHY CAN'T ANYONE FOLLOW THE BLOODY SPEC? *sigh*
- sp/dif EA ID is not set, but sp/dif is always present.
- enable/disable is spdif register bit 15.
- sp/dif control register is 0x68. differs from AC97:
- valid is bit 14 (vs 15)
- no DRS
- only 44.1/48k [00 = 48, 01=44,1] (AC97 is 00=44.1, 10=48)
- sp/dif ssource select is in 0x5e bits 0,1.
*/
ac97->build_ops = &patch_cirrus_ops;
ac97->flags |= AC97_CS_SPDIF;
ac97->rates[AC97_RATES_SPDIF] &= ~SNDRV_PCM_RATE_32000;
ac97->ext_id |= AC97_EI_SPDIF; /* force the detection of spdif */
snd_ac97_write_cache(ac97, AC97_CSR_ACMODE, 0x0080);
return 0;
}
static int patch_cirrus_cs4299(struct snd_ac97 * ac97)
{
/* force the detection of PC Beep */
ac97->flags |= AC97_HAS_PC_BEEP;
return patch_cirrus_spdif(ac97);
}
/*
* Conexant codecs
*/
static const struct snd_kcontrol_new snd_ac97_conexant_controls_spdif[1] = {
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH), AC97_CXR_AUDIO_MISC, 3, 1, 0),
};
static int patch_conexant_build_spdif(struct snd_ac97 * ac97)
{
int err;
/* con mask, pro mask, default */
if ((err = patch_build_controls(ac97, &snd_ac97_controls_spdif[0], 3)) < 0)
return err;
/* switch */
if ((err = patch_build_controls(ac97, &snd_ac97_conexant_controls_spdif[0], 1)) < 0)
return err;
/* set default PCM S/PDIF params */
/* consumer,PCM audio,no copyright,no preemphasis,PCM coder,original,48000Hz */
snd_ac97_write_cache(ac97, AC97_CXR_AUDIO_MISC,
snd_ac97_read(ac97, AC97_CXR_AUDIO_MISC) & ~(AC97_CXR_SPDIFEN|AC97_CXR_COPYRGT|AC97_CXR_SPDIF_MASK));
return 0;
}
static const struct snd_ac97_build_ops patch_conexant_ops = {
.build_spdif = patch_conexant_build_spdif
};
static int patch_conexant(struct snd_ac97 * ac97)
{
ac97->build_ops = &patch_conexant_ops;
ac97->flags |= AC97_CX_SPDIF;
ac97->ext_id |= AC97_EI_SPDIF; /* force the detection of spdif */
ac97->rates[AC97_RATES_SPDIF] = SNDRV_PCM_RATE_48000; /* 48k only */
return 0;
}
static int patch_cx20551(struct snd_ac97 *ac97)
{
snd_ac97_update_bits(ac97, 0x5c, 0x01, 0x01);
return 0;
}
/*
* Analog Device AD18xx, AD19xx codecs
*/
#ifdef CONFIG_PM
static void ad18xx_resume(struct snd_ac97 *ac97)
{
static unsigned short setup_regs[] = {
AC97_AD_MISC, AC97_AD_SERIAL_CFG, AC97_AD_JACK_SPDIF,
};
int i, codec;
for (i = 0; i < (int)ARRAY_SIZE(setup_regs); i++) {
unsigned short reg = setup_regs[i];
if (test_bit(reg, ac97->reg_accessed)) {
snd_ac97_write(ac97, reg, ac97->regs[reg]);
snd_ac97_read(ac97, reg);
}
}
if (! (ac97->flags & AC97_AD_MULTI))
/* normal restore */
snd_ac97_restore_status(ac97);
else {
/* restore the AD18xx codec configurations */
for (codec = 0; codec < 3; codec++) {
if (! ac97->spec.ad18xx.id[codec])
continue;
/* select single codec */
snd_ac97_update_bits(ac97, AC97_AD_SERIAL_CFG, 0x7000,
ac97->spec.ad18xx.unchained[codec] | ac97->spec.ad18xx.chained[codec]);
ac97->bus->ops->write(ac97, AC97_AD_CODEC_CFG, ac97->spec.ad18xx.codec_cfg[codec]);
}
/* select all codecs */
snd_ac97_update_bits(ac97, AC97_AD_SERIAL_CFG, 0x7000, 0x7000);
/* restore status */
for (i = 2; i < 0x7c ; i += 2) {
if (i == AC97_POWERDOWN || i == AC97_EXTENDED_ID)
continue;
if (test_bit(i, ac97->reg_accessed)) {
/* handle multi codecs for AD18xx */
if (i == AC97_PCM) {
for (codec = 0; codec < 3; codec++) {
if (! ac97->spec.ad18xx.id[codec])
continue;
/* select single codec */
snd_ac97_update_bits(ac97, AC97_AD_SERIAL_CFG, 0x7000,
ac97->spec.ad18xx.unchained[codec] | ac97->spec.ad18xx.chained[codec]);
/* update PCM bits */
ac97->bus->ops->write(ac97, AC97_PCM, ac97->spec.ad18xx.pcmreg[codec]);
}
/* select all codecs */
snd_ac97_update_bits(ac97, AC97_AD_SERIAL_CFG, 0x7000, 0x7000);
continue;
} else if (i == AC97_AD_TEST ||
i == AC97_AD_CODEC_CFG ||
i == AC97_AD_SERIAL_CFG)
continue; /* ignore */
}
snd_ac97_write(ac97, i, ac97->regs[i]);
snd_ac97_read(ac97, i);
}
}
snd_ac97_restore_iec958(ac97);
}
static void ad1888_resume(struct snd_ac97 *ac97)
{
ad18xx_resume(ac97);
snd_ac97_write_cache(ac97, AC97_CODEC_CLASS_REV, 0x8080);
}
#endif
static const struct snd_ac97_res_table ad1819_restbl[] = {
{ AC97_PHONE, 0x9f1f },
{ AC97_MIC, 0x9f1f },
{ AC97_LINE, 0x9f1f },
{ AC97_CD, 0x9f1f },
{ AC97_VIDEO, 0x9f1f },
{ AC97_AUX, 0x9f1f },
{ AC97_PCM, 0x9f1f },
{ } /* terminator */
};
static int patch_ad1819(struct snd_ac97 * ac97)
{
unsigned short scfg;
// patch for Analog Devices
scfg = snd_ac97_read(ac97, AC97_AD_SERIAL_CFG);
snd_ac97_write_cache(ac97, AC97_AD_SERIAL_CFG, scfg | 0x7000); /* select all codecs */
ac97->res_table = ad1819_restbl;
return 0;
}
static unsigned short patch_ad1881_unchained(struct snd_ac97 * ac97, int idx, unsigned short mask)
{
unsigned short val;
// test for unchained codec
snd_ac97_update_bits(ac97, AC97_AD_SERIAL_CFG, 0x7000, mask);
snd_ac97_write_cache(ac97, AC97_AD_CODEC_CFG, 0x0000); /* ID0C, ID1C, SDIE = off */
val = snd_ac97_read(ac97, AC97_VENDOR_ID2);
if ((val & 0xff40) != 0x5340)
return 0;
ac97->spec.ad18xx.unchained[idx] = mask;
ac97->spec.ad18xx.id[idx] = val;
ac97->spec.ad18xx.codec_cfg[idx] = 0x0000;
return mask;
}
static int patch_ad1881_chained1(struct snd_ac97 * ac97, int idx, unsigned short codec_bits)
{
static int cfg_bits[3] = { 1<<12, 1<<14, 1<<13 };
unsigned short val;
snd_ac97_update_bits(ac97, AC97_AD_SERIAL_CFG, 0x7000, cfg_bits[idx]);
snd_ac97_write_cache(ac97, AC97_AD_CODEC_CFG, 0x0004); // SDIE
val = snd_ac97_read(ac97, AC97_VENDOR_ID2);
if ((val & 0xff40) != 0x5340)
return 0;
if (codec_bits)
snd_ac97_write_cache(ac97, AC97_AD_CODEC_CFG, codec_bits);
ac97->spec.ad18xx.chained[idx] = cfg_bits[idx];
ac97->spec.ad18xx.id[idx] = val;
ac97->spec.ad18xx.codec_cfg[idx] = codec_bits ? codec_bits : 0x0004;
return 1;
}
static void patch_ad1881_chained(struct snd_ac97 * ac97, int unchained_idx, int cidx1, int cidx2)
{
// already detected?
if (ac97->spec.ad18xx.unchained[cidx1] || ac97->spec.ad18xx.chained[cidx1])
cidx1 = -1;
if (ac97->spec.ad18xx.unchained[cidx2] || ac97->spec.ad18xx.chained[cidx2])
cidx2 = -1;
if (cidx1 < 0 && cidx2 < 0)
return;
// test for chained codecs
snd_ac97_update_bits(ac97, AC97_AD_SERIAL_CFG, 0x7000,
ac97->spec.ad18xx.unchained[unchained_idx]);
snd_ac97_write_cache(ac97, AC97_AD_CODEC_CFG, 0x0002); // ID1C
ac97->spec.ad18xx.codec_cfg[unchained_idx] = 0x0002;
if (cidx1 >= 0) {
if (cidx2 < 0)
patch_ad1881_chained1(ac97, cidx1, 0);
else if (patch_ad1881_chained1(ac97, cidx1, 0x0006)) // SDIE | ID1C
patch_ad1881_chained1(ac97, cidx2, 0);
else if (patch_ad1881_chained1(ac97, cidx2, 0x0006)) // SDIE | ID1C
patch_ad1881_chained1(ac97, cidx1, 0);
} else if (cidx2 >= 0) {
patch_ad1881_chained1(ac97, cidx2, 0);
}
}
static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
#ifdef CONFIG_PM
.resume = ad18xx_resume
#endif
};
static int patch_ad1881(struct snd_ac97 * ac97)
{
static const char cfg_idxs[3][2] = {
{2, 1},
{0, 2},
{0, 1}
};
// patch for Analog Devices
unsigned short codecs[3];
unsigned short val;
int idx, num;
val = snd_ac97_read(ac97, AC97_AD_SERIAL_CFG);
snd_ac97_write_cache(ac97, AC97_AD_SERIAL_CFG, val);
codecs[0] = patch_ad1881_unchained(ac97, 0, (1<<12));
codecs[1] = patch_ad1881_unchained(ac97, 1, (1<<14));
codecs[2] = patch_ad1881_unchained(ac97, 2, (1<<13));
if (! (codecs[0] || codecs[1] || codecs[2]))
goto __end;
for (idx = 0; idx < 3; idx++)
if (ac97->spec.ad18xx.unchained[idx])
patch_ad1881_chained(ac97, idx, cfg_idxs[idx][0], cfg_idxs[idx][1]);
if (ac97->spec.ad18xx.id[1]) {
ac97->flags |= AC97_AD_MULTI;
ac97->scaps |= AC97_SCAP_SURROUND_DAC;
}
if (ac97->spec.ad18xx.id[2]) {
ac97->flags |= AC97_AD_MULTI;
ac97->scaps |= AC97_SCAP_CENTER_LFE_DAC;
}
__end:
/* select all codecs */
snd_ac97_update_bits(ac97, AC97_AD_SERIAL_CFG, 0x7000, 0x7000);
/* check if only one codec is present */
for (idx = num = 0; idx < 3; idx++)
if (ac97->spec.ad18xx.id[idx])
num++;
if (num == 1) {
/* ok, deselect all ID bits */
snd_ac97_write_cache(ac97, AC97_AD_CODEC_CFG, 0x0000);
ac97->spec.ad18xx.codec_cfg[0] =
ac97->spec.ad18xx.codec_cfg[1] =
ac97->spec.ad18xx.codec_cfg[2] = 0x0000;
}
/* required for AD1886/AD1885 combination */
ac97->ext_id = snd_ac97_read(ac97, AC97_EXTENDED_ID);
if (ac97->spec.ad18xx.id[0]) {
ac97->id &= 0xffff0000;
ac97->id |= ac97->spec.ad18xx.id[0];
}
ac97->build_ops = &patch_ad1881_build_ops;
return 0;
}
static const struct snd_kcontrol_new snd_ac97_controls_ad1885[] = {
AC97_SINGLE("Digital Mono Direct", AC97_AD_MISC, 11, 1, 0),
/* AC97_SINGLE("Digital Audio Mode", AC97_AD_MISC, 12, 1, 0), */ /* seems problematic */
AC97_SINGLE("Low Power Mixer", AC97_AD_MISC, 14, 1, 0),
AC97_SINGLE("Zero Fill DAC", AC97_AD_MISC, 15, 1, 0),
AC97_SINGLE("Headphone Jack Sense", AC97_AD_JACK_SPDIF, 9, 1, 1), /* inverted */
AC97_SINGLE("Line Jack Sense", AC97_AD_JACK_SPDIF, 8, 1, 1), /* inverted */
};
static const DECLARE_TLV_DB_SCALE(db_scale_6bit_6db_max, -8850, 150, 0);
static int patch_ad1885_specific(struct snd_ac97 * ac97)
{
int err;
if ((err = patch_build_controls(ac97, snd_ac97_controls_ad1885, ARRAY_SIZE(snd_ac97_controls_ad1885))) < 0)
return err;
reset_tlv(ac97, "Headphone Playback Volume",
db_scale_6bit_6db_max);
return 0;
}
static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
.build_specific = &patch_ad1885_specific,
#ifdef CONFIG_PM
.resume = ad18xx_resume
#endif
};
static int patch_ad1885(struct snd_ac97 * ac97)
{
patch_ad1881(ac97);
/* This is required to deal with the Intel D815EEAL2 */
/* i.e. Line out is actually headphone out from codec */
/* set default */
snd_ac97_write_cache(ac97, AC97_AD_MISC, 0x0404);
ac97->build_ops = &patch_ad1885_build_ops;
return 0;
}
static int patch_ad1886_specific(struct snd_ac97 * ac97)
{
reset_tlv(ac97, "Headphone Playback Volume",
db_scale_6bit_6db_max);
return 0;
}
static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
.build_specific = &patch_ad1886_specific,
#ifdef CONFIG_PM
.resume = ad18xx_resume
#endif
};
static int patch_ad1886(struct snd_ac97 * ac97)
{
patch_ad1881(ac97);
/* Presario700 workaround */
/* for Jack Sense/SPDIF Register misetting causing */
snd_ac97_write_cache(ac97, AC97_AD_JACK_SPDIF, 0x0010);
ac97->build_ops = &patch_ad1886_build_ops;
return 0;
}
/* MISC bits (AD1888/AD1980/AD1985 register 0x76) */
#define AC97_AD198X_MBC 0x0003 /* mic boost */
#define AC97_AD198X_MBC_20 0x0000 /* +20dB */
#define AC97_AD198X_MBC_10 0x0001 /* +10dB */
#define AC97_AD198X_MBC_30 0x0002 /* +30dB */
#define AC97_AD198X_VREFD 0x0004 /* VREF high-Z */
#define AC97_AD198X_VREFH 0x0008 /* 0=2.25V, 1=3.7V */
#define AC97_AD198X_VREF_0 0x000c /* 0V (AD1985 only) */
#define AC97_AD198X_VREF_MASK (AC97_AD198X_VREFH | AC97_AD198X_VREFD)
#define AC97_AD198X_VREF_SHIFT 2
#define AC97_AD198X_SRU 0x0010 /* sample rate unlock */
#define AC97_AD198X_LOSEL 0x0020 /* LINE_OUT amplifiers input select */
#define AC97_AD198X_2MIC 0x0040 /* 2-channel mic select */
#define AC97_AD198X_SPRD 0x0080 /* SPREAD enable */
#define AC97_AD198X_DMIX0 0x0100 /* downmix mode: */
/* 0 = 6-to-4, 1 = 6-to-2 downmix */
#define AC97_AD198X_DMIX1 0x0200 /* downmix mode: 1 = enabled */
#define AC97_AD198X_HPSEL 0x0400 /* headphone amplifier input select */
#define AC97_AD198X_CLDIS 0x0800 /* center/lfe disable */
#define AC97_AD198X_LODIS 0x1000 /* LINE_OUT disable */
#define AC97_AD198X_MSPLT 0x2000 /* mute split */
#define AC97_AD198X_AC97NC 0x4000 /* AC97 no compatible mode */
#define AC97_AD198X_DACZ 0x8000 /* DAC zero-fill mode */
/* MISC 1 bits (AD1986 register 0x76) */
#define AC97_AD1986_MBC 0x0003 /* mic boost */
#define AC97_AD1986_MBC_20 0x0000 /* +20dB */
#define AC97_AD1986_MBC_10 0x0001 /* +10dB */
#define AC97_AD1986_MBC_30 0x0002 /* +30dB */
#define AC97_AD1986_LISEL0 0x0004 /* LINE_IN select bit 0 */
#define AC97_AD1986_LISEL1 0x0008 /* LINE_IN select bit 1 */
#define AC97_AD1986_LISEL_MASK (AC97_AD1986_LISEL1 | AC97_AD1986_LISEL0)
#define AC97_AD1986_LISEL_LI 0x0000 /* LINE_IN pins as LINE_IN source */
#define AC97_AD1986_LISEL_SURR 0x0004 /* SURROUND pins as LINE_IN source */
#define AC97_AD1986_LISEL_MIC 0x0008 /* MIC_1/2 pins as LINE_IN source */
#define AC97_AD1986_SRU 0x0010 /* sample rate unlock */
#define AC97_AD1986_SOSEL 0x0020 /* SURROUND_OUT amplifiers input sel */
#define AC97_AD1986_2MIC 0x0040 /* 2-channel mic select */
#define AC97_AD1986_SPRD 0x0080 /* SPREAD enable */
#define AC97_AD1986_DMIX0 0x0100 /* downmix mode: */
/* 0 = 6-to-4, 1 = 6-to-2 downmix */
#define AC97_AD1986_DMIX1 0x0200 /* downmix mode: 1 = enabled */
#define AC97_AD1986_CLDIS 0x0800 /* center/lfe disable */
#define AC97_AD1986_SODIS 0x1000 /* SURROUND_OUT disable */
#define AC97_AD1986_MSPLT 0x2000 /* mute split (read only 1) */
#define AC97_AD1986_AC97NC 0x4000 /* AC97 no compatible mode (r/o 1) */
#define AC97_AD1986_DACZ 0x8000 /* DAC zero-fill mode */
/* MISC 2 bits (AD1986 register 0x70) */
#define AC97_AD_MISC2 0x70 /* Misc Control Bits 2 (AD1986) */
#define AC97_AD1986_CVREF0 0x0004 /* C/LFE VREF_OUT 2.25V */
#define AC97_AD1986_CVREF1 0x0008 /* C/LFE VREF_OUT 0V */
#define AC97_AD1986_CVREF2 0x0010 /* C/LFE VREF_OUT 3.7V */
#define AC97_AD1986_CVREF_MASK \
(AC97_AD1986_CVREF2 | AC97_AD1986_CVREF1 | AC97_AD1986_CVREF0)
#define AC97_AD1986_JSMAP 0x0020 /* Jack Sense Mapping 1 = alternate */
#define AC97_AD1986_MMDIS 0x0080 /* Mono Mute Disable */
#define AC97_AD1986_MVREF0 0x0400 /* MIC VREF_OUT 2.25V */
#define AC97_AD1986_MVREF1 0x0800 /* MIC VREF_OUT 0V */
#define AC97_AD1986_MVREF2 0x1000 /* MIC VREF_OUT 3.7V */
#define AC97_AD1986_MVREF_MASK \
(AC97_AD1986_MVREF2 | AC97_AD1986_MVREF1 | AC97_AD1986_MVREF0)
/* MISC 3 bits (AD1986 register 0x7a) */
#define AC97_AD_MISC3 0x7a /* Misc Control Bits 3 (AD1986) */
#define AC97_AD1986_MMIX 0x0004 /* Mic Mix, left/right */
#define AC97_AD1986_GPO 0x0008 /* General Purpose Out */
#define AC97_AD1986_LOHPEN 0x0010 /* LINE_OUT headphone drive */
#define AC97_AD1986_LVREF0 0x0100 /* LINE_OUT VREF_OUT 2.25V */
#define AC97_AD1986_LVREF1 0x0200 /* LINE_OUT VREF_OUT 0V */
#define AC97_AD1986_LVREF2 0x0400 /* LINE_OUT VREF_OUT 3.7V */
#define AC97_AD1986_LVREF_MASK \
(AC97_AD1986_LVREF2 | AC97_AD1986_LVREF1 | AC97_AD1986_LVREF0)
#define AC97_AD1986_JSINVA 0x0800 /* Jack Sense Invert SENSE_A */
#define AC97_AD1986_LOSEL 0x1000 /* LINE_OUT amplifiers input select */
#define AC97_AD1986_HPSEL0 0x2000 /* Headphone amplifiers */
/* input select Surround DACs */
#define AC97_AD1986_HPSEL1 0x4000 /* Headphone amplifiers input */
/* select C/LFE DACs */
#define AC97_AD1986_JSINVB 0x8000 /* Jack Sense Invert SENSE_B */
/* Serial Config bits (AD1986 register 0x74) (incomplete) */
#define AC97_AD1986_OMS0 0x0100 /* Optional Mic Selector bit 0 */
#define AC97_AD1986_OMS1 0x0200 /* Optional Mic Selector bit 1 */
#define AC97_AD1986_OMS2 0x0400 /* Optional Mic Selector bit 2 */
#define AC97_AD1986_OMS_MASK \
(AC97_AD1986_OMS2 | AC97_AD1986_OMS1 | AC97_AD1986_OMS0)
#define AC97_AD1986_OMS_M 0x0000 /* MIC_1/2 pins are MIC sources */
#define AC97_AD1986_OMS_L 0x0100 /* LINE_IN pins are MIC sources */
#define AC97_AD1986_OMS_C 0x0200 /* Center/LFE pins are MCI sources */
#define AC97_AD1986_OMS_MC 0x0400 /* Mix of MIC and C/LFE pins */
/* are MIC sources */
#define AC97_AD1986_OMS_ML 0x0500 /* MIX of MIC and LINE_IN pins */
/* are MIC sources */
#define AC97_AD1986_OMS_LC 0x0600 /* MIX of LINE_IN and C/LFE pins */
/* are MIC sources */
#define AC97_AD1986_OMS_MLC 0x0700 /* MIX of MIC, LINE_IN, C/LFE pins */
/* are MIC sources */
static int snd_ac97_ad198x_spdif_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static char *texts[2] = { "AC-Link", "A/D Converter" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 2;
if (uinfo->value.enumerated.item > 1)
uinfo->value.enumerated.item = 1;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static int snd_ac97_ad198x_spdif_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = ac97->regs[AC97_AD_SERIAL_CFG];
ucontrol->value.enumerated.item[0] = (val >> 2) & 1;
return 0;
}
static int snd_ac97_ad198x_spdif_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
if (ucontrol->value.enumerated.item[0] > 1)
return -EINVAL;
val = ucontrol->value.enumerated.item[0] << 2;
return snd_ac97_update_bits(ac97, AC97_AD_SERIAL_CFG, 0x0004, val);
}
static const struct snd_kcontrol_new snd_ac97_ad198x_spdif_source = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
.info = snd_ac97_ad198x_spdif_source_info,
.get = snd_ac97_ad198x_spdif_source_get,
.put = snd_ac97_ad198x_spdif_source_put,
};
static int patch_ad198x_post_spdif(struct snd_ac97 * ac97)
{
return patch_build_controls(ac97, &snd_ac97_ad198x_spdif_source, 1);
}
static const struct snd_kcontrol_new snd_ac97_ad1981x_jack_sense[] = {
AC97_SINGLE("Headphone Jack Sense", AC97_AD_JACK_SPDIF, 11, 1, 0),
AC97_SINGLE("Line Jack Sense", AC97_AD_JACK_SPDIF, 12, 1, 0),
};
/* black list to avoid HP/Line jack-sense controls
* (SS vendor << 16 | device)
*/
static unsigned int ad1981_jacks_blacklist[] = {
0x10140523, /* Thinkpad R40 */
0x10140534, /* Thinkpad X31 */
0x10140537, /* Thinkpad T41p */
0x1014053e, /* Thinkpad R40e */
0x10140554, /* Thinkpad T42p/R50p */
0x10140567, /* Thinkpad T43p 2668-G7U */
0x10140581, /* Thinkpad X41-2527 */
0x10280160, /* Dell Dimension 2400 */
0x104380b0, /* Asus A7V8X-MX */
0x11790241, /* Toshiba Satellite A-15 S127 */
0x1179ff10, /* Toshiba P500 */
0x144dc01a, /* Samsung NP-X20C004/SEG */
0 /* end */
};
static int check_list(struct snd_ac97 *ac97, const unsigned int *list)
{
u32 subid = ((u32)ac97->subsystem_vendor << 16) | ac97->subsystem_device;
for (; *list; list++)
if (*list == subid)
return 1;
return 0;
}
static int patch_ad1981a_specific(struct snd_ac97 * ac97)
{
if (check_list(ac97, ad1981_jacks_blacklist))
return 0;
return patch_build_controls(ac97, snd_ac97_ad1981x_jack_sense,
ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
}
static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
.build_post_spdif = patch_ad198x_post_spdif,
.build_specific = patch_ad1981a_specific,
#ifdef CONFIG_PM
.resume = ad18xx_resume
#endif
};
/* white list to enable HP jack-sense bits
* (SS vendor << 16 | device)
*/
static unsigned int ad1981_jacks_whitelist[] = {
0x0e11005a, /* HP nc4000/4010 */
0x103c0890, /* HP nc6000 */
0x103c0938, /* HP nc4220 */
0x103c099c, /* HP nx6110 */
0x103c0944, /* HP nc6220 */
0x103c0934, /* HP nc8220 */
0x103c006d, /* HP nx9105 */
0x103c300d, /* HP Compaq dc5100 SFF(PT003AW) */
0x17340088, /* FSC Scenic-W */
0 /* end */
};
static void check_ad1981_hp_jack_sense(struct snd_ac97 *ac97)
{
if (check_list(ac97, ad1981_jacks_whitelist))
/* enable headphone jack sense */
snd_ac97_update_bits(ac97, AC97_AD_JACK_SPDIF, 1<<11, 1<<11);
}
static int patch_ad1981a(struct snd_ac97 *ac97)
{
patch_ad1881(ac97);
ac97->build_ops = &patch_ad1981a_build_ops;
snd_ac97_update_bits(ac97, AC97_AD_MISC, AC97_AD198X_MSPLT, AC97_AD198X_MSPLT);
ac97->flags |= AC97_STEREO_MUTES;
check_ad1981_hp_jack_sense(ac97);
return 0;
}
static const struct snd_kcontrol_new snd_ac97_ad198x_2cmic =
AC97_SINGLE("Stereo Mic", AC97_AD_MISC, 6, 1, 0);
static int patch_ad1981b_specific(struct snd_ac97 *ac97)
{
int err;
if ((err = patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1)) < 0)
return err;
if (check_list(ac97, ad1981_jacks_blacklist))
return 0;
return patch_build_controls(ac97, snd_ac97_ad1981x_jack_sense,
ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
}
static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
.build_post_spdif = patch_ad198x_post_spdif,
.build_specific = patch_ad1981b_specific,
#ifdef CONFIG_PM
.resume = ad18xx_resume
#endif
};
static int patch_ad1981b(struct snd_ac97 *ac97)
{
patch_ad1881(ac97);
ac97->build_ops = &patch_ad1981b_build_ops;
snd_ac97_update_bits(ac97, AC97_AD_MISC, AC97_AD198X_MSPLT, AC97_AD198X_MSPLT);
ac97->flags |= AC97_STEREO_MUTES;
check_ad1981_hp_jack_sense(ac97);
return 0;
}
#define snd_ac97_ad1888_lohpsel_info snd_ctl_boolean_mono_info
static int snd_ac97_ad1888_lohpsel_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = ac97->regs[AC97_AD_MISC];
ucontrol->value.integer.value[0] = !(val & AC97_AD198X_LOSEL);
if (ac97->spec.ad18xx.lo_as_master)
ucontrol->value.integer.value[0] =
!ucontrol->value.integer.value[0];
return 0;
}
static int snd_ac97_ad1888_lohpsel_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = !ucontrol->value.integer.value[0];
if (ac97->spec.ad18xx.lo_as_master)
val = !val;
val = val ? (AC97_AD198X_LOSEL | AC97_AD198X_HPSEL) : 0;
return snd_ac97_update_bits(ac97, AC97_AD_MISC,
AC97_AD198X_LOSEL | AC97_AD198X_HPSEL, val);
}
static int snd_ac97_ad1888_downmix_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static char *texts[3] = {"Off", "6 -> 4", "6 -> 2"};
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 3;
if (uinfo->value.enumerated.item > 2)
uinfo->value.enumerated.item = 2;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static int snd_ac97_ad1888_downmix_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = ac97->regs[AC97_AD_MISC];
if (!(val & AC97_AD198X_DMIX1))
ucontrol->value.enumerated.item[0] = 0;
else
ucontrol->value.enumerated.item[0] = 1 + ((val >> 8) & 1);
return 0;
}
static int snd_ac97_ad1888_downmix_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
if (ucontrol->value.enumerated.item[0] > 2)
return -EINVAL;
if (ucontrol->value.enumerated.item[0] == 0)
val = 0;
else
val = AC97_AD198X_DMIX1 |
((ucontrol->value.enumerated.item[0] - 1) << 8);
return snd_ac97_update_bits(ac97, AC97_AD_MISC,
AC97_AD198X_DMIX0 | AC97_AD198X_DMIX1, val);
}
static void ad1888_update_jacks(struct snd_ac97 *ac97)
{
unsigned short val = 0;
/* clear LODIS if shared jack is to be used for Surround out */
if (!ac97->spec.ad18xx.lo_as_master && is_shared_linein(ac97))
val |= (1 << 12);
/* clear CLDIS if shared jack is to be used for C/LFE out */
if (is_shared_micin(ac97))
val |= (1 << 11);
/* shared Line-In */
snd_ac97_update_bits(ac97, AC97_AD_MISC, (1 << 11) | (1 << 12), val);
}
static const struct snd_kcontrol_new snd_ac97_ad1888_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Exchange Front/Surround",
.info = snd_ac97_ad1888_lohpsel_info,
.get = snd_ac97_ad1888_lohpsel_get,
.put = snd_ac97_ad1888_lohpsel_put
},
AC97_SINGLE("V_REFOUT Enable", AC97_AD_MISC, AC97_AD_VREFD_SHIFT, 1, 1),
AC97_SINGLE("High Pass Filter Enable", AC97_AD_TEST2,
AC97_AD_HPFD_SHIFT, 1, 1),
AC97_SINGLE("Spread Front to Surround and Center/LFE", AC97_AD_MISC, 7, 1, 0),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Downmix",
.info = snd_ac97_ad1888_downmix_info,
.get = snd_ac97_ad1888_downmix_get,
.put = snd_ac97_ad1888_downmix_put
},
AC97_SURROUND_JACK_MODE_CTL,
AC97_CHANNEL_MODE_CTL,
AC97_SINGLE("Headphone Jack Sense", AC97_AD_JACK_SPDIF, 10, 1, 0),
AC97_SINGLE("Line Jack Sense", AC97_AD_JACK_SPDIF, 12, 1, 0),
};
static int patch_ad1888_specific(struct snd_ac97 *ac97)
{
if (!ac97->spec.ad18xx.lo_as_master) {
/* rename 0x04 as "Master" and 0x02 as "Master Surround" */
snd_ac97_rename_vol_ctl(ac97, "Master Playback",
"Master Surround Playback");
snd_ac97_rename_vol_ctl(ac97, "Headphone Playback",
"Master Playback");
}
return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
}
static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
.build_post_spdif = patch_ad198x_post_spdif,
.build_specific = patch_ad1888_specific,
#ifdef CONFIG_PM
.resume = ad1888_resume,
#endif
.update_jacks = ad1888_update_jacks,
};
static int patch_ad1888(struct snd_ac97 * ac97)
{
unsigned short misc;
patch_ad1881(ac97);
ac97->build_ops = &patch_ad1888_build_ops;
/*
* LO can be used as a real line-out on some devices,
* and we need to revert the front/surround mixer switches
*/
if (ac97->subsystem_vendor == 0x1043 &&
ac97->subsystem_device == 0x1193) /* ASUS A9T laptop */
ac97->spec.ad18xx.lo_as_master = 1;
misc = snd_ac97_read(ac97, AC97_AD_MISC);
/* AD-compatible mode */
/* Stereo mutes enabled */
misc |= AC97_AD198X_MSPLT | AC97_AD198X_AC97NC;
if (!ac97->spec.ad18xx.lo_as_master)
/* Switch FRONT/SURROUND LINE-OUT/HP-OUT default connection */
/* it seems that most vendors connect line-out connector to
* headphone out of AC'97
*/
misc |= AC97_AD198X_LOSEL | AC97_AD198X_HPSEL;
snd_ac97_write_cache(ac97, AC97_AD_MISC, misc);
ac97->flags |= AC97_STEREO_MUTES;
return 0;
}
static int patch_ad1980_specific(struct snd_ac97 *ac97)
{
int err;
if ((err = patch_ad1888_specific(ac97)) < 0)
return err;
return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
}
static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
.build_post_spdif = patch_ad198x_post_spdif,
.build_specific = patch_ad1980_specific,
#ifdef CONFIG_PM
.resume = ad18xx_resume,
#endif
.update_jacks = ad1888_update_jacks,
};
static int patch_ad1980(struct snd_ac97 * ac97)
{
patch_ad1888(ac97);
ac97->build_ops = &patch_ad1980_build_ops;
return 0;
}
static int snd_ac97_ad1985_vrefout_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
static char *texts[4] = {"High-Z", "3.7 V", "2.25 V", "0 V"};
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 4;
if (uinfo->value.enumerated.item > 3)
uinfo->value.enumerated.item = 3;
strcpy(uinfo->value.enumerated.name,
texts[uinfo->value.enumerated.item]);
return 0;
}
static int snd_ac97_ad1985_vrefout_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
static const int reg2ctrl[4] = {2, 0, 1, 3};
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = (ac97->regs[AC97_AD_MISC] & AC97_AD198X_VREF_MASK)
>> AC97_AD198X_VREF_SHIFT;
ucontrol->value.enumerated.item[0] = reg2ctrl[val];
return 0;
}
static int snd_ac97_ad1985_vrefout_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
static const int ctrl2reg[4] = {1, 2, 0, 3};
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
if (ucontrol->value.enumerated.item[0] > 3)
return -EINVAL;
val = ctrl2reg[ucontrol->value.enumerated.item[0]]
<< AC97_AD198X_VREF_SHIFT;
return snd_ac97_update_bits(ac97, AC97_AD_MISC,
AC97_AD198X_VREF_MASK, val);
}
static const struct snd_kcontrol_new snd_ac97_ad1985_controls[] = {
AC97_SINGLE("Exchange Center/LFE", AC97_AD_SERIAL_CFG, 3, 1, 0),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Exchange Front/Surround",
.info = snd_ac97_ad1888_lohpsel_info,
.get = snd_ac97_ad1888_lohpsel_get,
.put = snd_ac97_ad1888_lohpsel_put
},
AC97_SINGLE("High Pass Filter Enable", AC97_AD_TEST2, 12, 1, 1),
AC97_SINGLE("Spread Front to Surround and Center/LFE",
AC97_AD_MISC, 7, 1, 0),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Downmix",
.info = snd_ac97_ad1888_downmix_info,
.get = snd_ac97_ad1888_downmix_get,
.put = snd_ac97_ad1888_downmix_put
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "V_REFOUT",
.info = snd_ac97_ad1985_vrefout_info,
.get = snd_ac97_ad1985_vrefout_get,
.put = snd_ac97_ad1985_vrefout_put
},
AC97_SURROUND_JACK_MODE_CTL,
AC97_CHANNEL_MODE_CTL,
AC97_SINGLE("Headphone Jack Sense", AC97_AD_JACK_SPDIF, 10, 1, 0),
AC97_SINGLE("Line Jack Sense", AC97_AD_JACK_SPDIF, 12, 1, 0),
};
static void ad1985_update_jacks(struct snd_ac97 *ac97)
{
ad1888_update_jacks(ac97);
/* clear OMS if shared jack is to be used for C/LFE out */
snd_ac97_update_bits(ac97, AC97_AD_SERIAL_CFG, 1 << 9,
is_shared_micin(ac97) ? 1 << 9 : 0);
}
static int patch_ad1985_specific(struct snd_ac97 *ac97)
{
int err;
/* rename 0x04 as "Master" and 0x02 as "Master Surround" */
snd_ac97_rename_vol_ctl(ac97, "Master Playback",
"Master Surround Playback");
snd_ac97_rename_vol_ctl(ac97, "Headphone Playback", "Master Playback");
if ((err = patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1)) < 0)
return err;
return patch_build_controls(ac97, snd_ac97_ad1985_controls,
ARRAY_SIZE(snd_ac97_ad1985_controls));
}
static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
.build_post_spdif = patch_ad198x_post_spdif,
.build_specific = patch_ad1985_specific,
#ifdef CONFIG_PM
.resume = ad18xx_resume,
#endif
.update_jacks = ad1985_update_jacks,
};
static int patch_ad1985(struct snd_ac97 * ac97)
{
unsigned short misc;
patch_ad1881(ac97);
ac97->build_ops = &patch_ad1985_build_ops;
misc = snd_ac97_read(ac97, AC97_AD_MISC);
/* switch front/surround line-out/hp-out */
/* AD-compatible mode */
/* Stereo mutes enabled */
snd_ac97_write_cache(ac97, AC97_AD_MISC, misc |
AC97_AD198X_LOSEL |
AC97_AD198X_HPSEL |
AC97_AD198X_MSPLT |
AC97_AD198X_AC97NC);
ac97->flags |= AC97_STEREO_MUTES;
/* update current jack configuration */
ad1985_update_jacks(ac97);
/* on AD1985 rev. 3, AC'97 revision bits are zero */
ac97->ext_id = (ac97->ext_id & ~AC97_EI_REV_MASK) | AC97_EI_REV_23;
return 0;
}
#define snd_ac97_ad1986_bool_info snd_ctl_boolean_mono_info
static int snd_ac97_ad1986_lososel_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = ac97->regs[AC97_AD_MISC3];
ucontrol->value.integer.value[0] = (val & AC97_AD1986_LOSEL) != 0;
return 0;
}
static int snd_ac97_ad1986_lososel_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
int ret0;
int ret1;
int sprd = (ac97->regs[AC97_AD_MISC] & AC97_AD1986_SPRD) != 0;
ret0 = snd_ac97_update_bits(ac97, AC97_AD_MISC3, AC97_AD1986_LOSEL,
ucontrol->value.integer.value[0] != 0
? AC97_AD1986_LOSEL : 0);
if (ret0 < 0)
return ret0;
/* SOSEL is set to values of "Spread" or "Exchange F/S" controls */
ret1 = snd_ac97_update_bits(ac97, AC97_AD_MISC, AC97_AD1986_SOSEL,
(ucontrol->value.integer.value[0] != 0
|| sprd)
? AC97_AD1986_SOSEL : 0);
if (ret1 < 0)
return ret1;
return (ret0 > 0 || ret1 > 0) ? 1 : 0;
}
static int snd_ac97_ad1986_spread_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = ac97->regs[AC97_AD_MISC];
ucontrol->value.integer.value[0] = (val & AC97_AD1986_SPRD) != 0;
return 0;
}
static int snd_ac97_ad1986_spread_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
int ret0;
int ret1;
int sprd = (ac97->regs[AC97_AD_MISC3] & AC97_AD1986_LOSEL) != 0;
ret0 = snd_ac97_update_bits(ac97, AC97_AD_MISC, AC97_AD1986_SPRD,
ucontrol->value.integer.value[0] != 0
? AC97_AD1986_SPRD : 0);
if (ret0 < 0)
return ret0;
/* SOSEL is set to values of "Spread" or "Exchange F/S" controls */
ret1 = snd_ac97_update_bits(ac97, AC97_AD_MISC, AC97_AD1986_SOSEL,
(ucontrol->value.integer.value[0] != 0
|| sprd)
? AC97_AD1986_SOSEL : 0);
if (ret1 < 0)
return ret1;
return (ret0 > 0 || ret1 > 0) ? 1 : 0;
}
static int snd_ac97_ad1986_miclisel_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = ac97->spec.ad18xx.swap_mic_linein;
return 0;
}
static int snd_ac97_ad1986_miclisel_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned char swap = ucontrol->value.integer.value[0] != 0;
if (swap != ac97->spec.ad18xx.swap_mic_linein) {
ac97->spec.ad18xx.swap_mic_linein = swap;
if (ac97->build_ops->update_jacks)
ac97->build_ops->update_jacks(ac97);
return 1;
}
return 0;
}
static int snd_ac97_ad1986_vrefout_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
/* Use MIC_1/2 V_REFOUT as the "get" value */
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
unsigned short reg = ac97->regs[AC97_AD_MISC2];
if ((reg & AC97_AD1986_MVREF0) != 0)
val = 2;
else if ((reg & AC97_AD1986_MVREF1) != 0)
val = 3;
else if ((reg & AC97_AD1986_MVREF2) != 0)
val = 1;
else
val = 0;
ucontrol->value.enumerated.item[0] = val;
return 0;
}
static int snd_ac97_ad1986_vrefout_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short cval;
unsigned short lval;
unsigned short mval;
int cret;
int lret;
int mret;
switch (ucontrol->value.enumerated.item[0])
{
case 0: /* High-Z */
cval = 0;
lval = 0;
mval = 0;
break;
case 1: /* 3.7 V */
cval = AC97_AD1986_CVREF2;
lval = AC97_AD1986_LVREF2;
mval = AC97_AD1986_MVREF2;
break;
case 2: /* 2.25 V */
cval = AC97_AD1986_CVREF0;
lval = AC97_AD1986_LVREF0;
mval = AC97_AD1986_MVREF0;
break;
case 3: /* 0 V */
cval = AC97_AD1986_CVREF1;
lval = AC97_AD1986_LVREF1;
mval = AC97_AD1986_MVREF1;
break;
default:
return -EINVAL;
}
cret = snd_ac97_update_bits(ac97, AC97_AD_MISC2,
AC97_AD1986_CVREF_MASK, cval);
if (cret < 0)
return cret;
lret = snd_ac97_update_bits(ac97, AC97_AD_MISC3,
AC97_AD1986_LVREF_MASK, lval);
if (lret < 0)
return lret;
mret = snd_ac97_update_bits(ac97, AC97_AD_MISC2,
AC97_AD1986_MVREF_MASK, mval);
if (mret < 0)
return mret;
return (cret > 0 || lret > 0 || mret > 0) ? 1 : 0;
}
static const struct snd_kcontrol_new snd_ac97_ad1986_controls[] = {
AC97_SINGLE("Exchange Center/LFE", AC97_AD_SERIAL_CFG, 3, 1, 0),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Exchange Front/Surround",
.info = snd_ac97_ad1986_bool_info,
.get = snd_ac97_ad1986_lososel_get,
.put = snd_ac97_ad1986_lososel_put
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Exchange Mic/Line In",
.info = snd_ac97_ad1986_bool_info,
.get = snd_ac97_ad1986_miclisel_get,
.put = snd_ac97_ad1986_miclisel_put
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Spread Front to Surround and Center/LFE",
.info = snd_ac97_ad1986_bool_info,
.get = snd_ac97_ad1986_spread_get,
.put = snd_ac97_ad1986_spread_put
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Downmix",
.info = snd_ac97_ad1888_downmix_info,
.get = snd_ac97_ad1888_downmix_get,
.put = snd_ac97_ad1888_downmix_put
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "V_REFOUT",
.info = snd_ac97_ad1985_vrefout_info,
.get = snd_ac97_ad1986_vrefout_get,
.put = snd_ac97_ad1986_vrefout_put
},
AC97_SURROUND_JACK_MODE_CTL,
AC97_CHANNEL_MODE_CTL,
AC97_SINGLE("Headphone Jack Sense", AC97_AD_JACK_SPDIF, 10, 1, 0),
AC97_SINGLE("Line Jack Sense", AC97_AD_JACK_SPDIF, 12, 1, 0)
};
static void ad1986_update_jacks(struct snd_ac97 *ac97)
{
unsigned short misc_val = 0;
unsigned short ser_val;
/* disable SURROUND and CENTER/LFE if not surround mode */
if (!is_surround_on(ac97))
misc_val |= AC97_AD1986_SODIS;
if (!is_clfe_on(ac97))
misc_val |= AC97_AD1986_CLDIS;
/* select line input (default=LINE_IN, SURROUND or MIC_1/2) */
if (is_shared_linein(ac97))
misc_val |= AC97_AD1986_LISEL_SURR;
else if (ac97->spec.ad18xx.swap_mic_linein != 0)
misc_val |= AC97_AD1986_LISEL_MIC;
snd_ac97_update_bits(ac97, AC97_AD_MISC,
AC97_AD1986_SODIS | AC97_AD1986_CLDIS |
AC97_AD1986_LISEL_MASK,
misc_val);
/* select microphone input (MIC_1/2, Center/LFE or LINE_IN) */
if (is_shared_micin(ac97))
ser_val = AC97_AD1986_OMS_C;
else if (ac97->spec.ad18xx.swap_mic_linein != 0)
ser_val = AC97_AD1986_OMS_L;
else
ser_val = AC97_AD1986_OMS_M;
snd_ac97_update_bits(ac97, AC97_AD_SERIAL_CFG,
AC97_AD1986_OMS_MASK,
ser_val);
}
static int patch_ad1986_specific(struct snd_ac97 *ac97)
{
int err;
if ((err = patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1)) < 0)
return err;
return patch_build_controls(ac97, snd_ac97_ad1986_controls,
ARRAY_SIZE(snd_ac97_ad1985_controls));
}
static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
.build_post_spdif = patch_ad198x_post_spdif,
.build_specific = patch_ad1986_specific,
#ifdef CONFIG_PM
.resume = ad18xx_resume,
#endif
.update_jacks = ad1986_update_jacks,
};
static int patch_ad1986(struct snd_ac97 * ac97)
{
patch_ad1881(ac97);
ac97->build_ops = &patch_ad1986_build_ops;
ac97->flags |= AC97_STEREO_MUTES;
/* update current jack configuration */
ad1986_update_jacks(ac97);
return 0;
}
/*
* realtek ALC203: use mono-out for pin 37
*/
static int patch_alc203(struct snd_ac97 *ac97)
{
snd_ac97_update_bits(ac97, 0x7a, 0x400, 0x400);
return 0;
}
/*
* realtek ALC65x/850 codecs
*/
static void alc650_update_jacks(struct snd_ac97 *ac97)
{
int shared;
/* shared Line-In / Surround Out */
shared = is_shared_surrout(ac97);
snd_ac97_update_bits(ac97, AC97_ALC650_MULTICH, 1 << 9,
shared ? (1 << 9) : 0);
/* update shared Mic In / Center/LFE Out */
shared = is_shared_clfeout(ac97);
/* disable/enable vref */
snd_ac97_update_bits(ac97, AC97_ALC650_CLOCK, 1 << 12,
shared ? (1 << 12) : 0);
/* turn on/off center-on-mic */
snd_ac97_update_bits(ac97, AC97_ALC650_MULTICH, 1 << 10,
shared ? (1 << 10) : 0);
/* GPIO0 high for mic */
snd_ac97_update_bits(ac97, AC97_ALC650_GPIO_STATUS, 0x100,
shared ? 0 : 0x100);
}
static const struct snd_kcontrol_new snd_ac97_controls_alc650[] = {
AC97_SINGLE("Duplicate Front", AC97_ALC650_MULTICH, 0, 1, 0),
AC97_SINGLE("Surround Down Mix", AC97_ALC650_MULTICH, 1, 1, 0),
AC97_SINGLE("Center/LFE Down Mix", AC97_ALC650_MULTICH, 2, 1, 0),
AC97_SINGLE("Exchange Center/LFE", AC97_ALC650_MULTICH, 3, 1, 0),
/* 4: Analog Input To Surround */
/* 5: Analog Input To Center/LFE */
/* 6: Independent Master Volume Right */
/* 7: Independent Master Volume Left */
/* 8: reserved */
/* 9: Line-In/Surround share */
/* 10: Mic/CLFE share */
/* 11-13: in IEC958 controls */
AC97_SINGLE("Swap Surround Slot", AC97_ALC650_MULTICH, 14, 1, 0),
#if 0 /* always set in patch_alc650 */
AC97_SINGLE("IEC958 Input Clock Enable", AC97_ALC650_CLOCK, 0, 1, 0),
AC97_SINGLE("IEC958 Input Pin Enable", AC97_ALC650_CLOCK, 1, 1, 0),
AC97_SINGLE("Surround DAC Switch", AC97_ALC650_SURR_DAC_VOL, 15, 1, 1),
AC97_DOUBLE("Surround DAC Volume", AC97_ALC650_SURR_DAC_VOL, 8, 0, 31, 1),
AC97_SINGLE("Center/LFE DAC Switch", AC97_ALC650_LFE_DAC_VOL, 15, 1, 1),
AC97_DOUBLE("Center/LFE DAC Volume", AC97_ALC650_LFE_DAC_VOL, 8, 0, 31, 1),
#endif
AC97_SURROUND_JACK_MODE_CTL,
AC97_CHANNEL_MODE_CTL,
};
static const struct snd_kcontrol_new snd_ac97_spdif_controls_alc650[] = {
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), AC97_ALC650_MULTICH, 11, 1, 0),
AC97_SINGLE("Analog to IEC958 Output", AC97_ALC650_MULTICH, 12, 1, 0),
/* disable this controls since it doesn't work as expected */
/* AC97_SINGLE("IEC958 Input Monitor", AC97_ALC650_MULTICH, 13, 1, 0), */
};
static const DECLARE_TLV_DB_SCALE(db_scale_5bit_3db_max, -4350, 150, 0);
static int patch_alc650_specific(struct snd_ac97 * ac97)
{
int err;
if ((err = patch_build_controls(ac97, snd_ac97_controls_alc650, ARRAY_SIZE(snd_ac97_controls_alc650))) < 0)
return err;
if (ac97->ext_id & AC97_EI_SPDIF) {
if ((err = patch_build_controls(ac97, snd_ac97_spdif_controls_alc650, ARRAY_SIZE(snd_ac97_spdif_controls_alc650))) < 0)
return err;
}
if (ac97->id != AC97_ID_ALC650F)
reset_tlv(ac97, "Master Playback Volume",
db_scale_5bit_3db_max);
return 0;
}
static const struct snd_ac97_build_ops patch_alc650_ops = {
.build_specific = patch_alc650_specific,
.update_jacks = alc650_update_jacks
};
static int patch_alc650(struct snd_ac97 * ac97)
{
unsigned short val;
ac97->build_ops = &patch_alc650_ops;
/* determine the revision */
val = snd_ac97_read(ac97, AC97_ALC650_REVISION) & 0x3f;
if (val < 3)
ac97->id = 0x414c4720; /* Old version */
else if (val < 0x10)
ac97->id = 0x414c4721; /* D version */
else if (val < 0x20)
ac97->id = 0x414c4722; /* E version */
else if (val < 0x30)
ac97->id = 0x414c4723; /* F version */
/* revision E or F */
/* FIXME: what about revision D ? */
ac97->spec.dev_flags = (ac97->id == 0x414c4722 ||
ac97->id == 0x414c4723);
/* enable AC97_ALC650_GPIO_SETUP, AC97_ALC650_CLOCK for R/W */
snd_ac97_write_cache(ac97, AC97_ALC650_GPIO_STATUS,
snd_ac97_read(ac97, AC97_ALC650_GPIO_STATUS) | 0x8000);
/* Enable SPDIF-IN only on Rev.E and above */
val = snd_ac97_read(ac97, AC97_ALC650_CLOCK);
/* SPDIF IN with pin 47 */
if (ac97->spec.dev_flags &&
/* ASUS A6KM requires EAPD */
! (ac97->subsystem_vendor == 0x1043 &&
ac97->subsystem_device == 0x1103))
val |= 0x03; /* enable */
else
val &= ~0x03; /* disable */
snd_ac97_write_cache(ac97, AC97_ALC650_CLOCK, val);
/* set default: slot 3,4,7,8,6,9
spdif-in monitor off, analog-spdif off, spdif-in off
center on mic off, surround on line-in off
downmix off, duplicate front off
*/
snd_ac97_write_cache(ac97, AC97_ALC650_MULTICH, 0);
/* set GPIO0 for mic bias */
/* GPIO0 pin output, no interrupt, high */
snd_ac97_write_cache(ac97, AC97_ALC650_GPIO_SETUP,
snd_ac97_read(ac97, AC97_ALC650_GPIO_SETUP) | 0x01);
snd_ac97_write_cache(ac97, AC97_ALC650_GPIO_STATUS,
(snd_ac97_read(ac97, AC97_ALC650_GPIO_STATUS) | 0x100) & ~0x10);
/* full DAC volume */
snd_ac97_write_cache(ac97, AC97_ALC650_SURR_DAC_VOL, 0x0808);
snd_ac97_write_cache(ac97, AC97_ALC650_LFE_DAC_VOL, 0x0808);
return 0;
}
static void alc655_update_jacks(struct snd_ac97 *ac97)
{
int shared;
/* shared Line-In / Surround Out */
shared = is_shared_surrout(ac97);
ac97_update_bits_page(ac97, AC97_ALC650_MULTICH, 1 << 9,
shared ? (1 << 9) : 0, 0);
/* update shared Mic In / Center/LFE Out */
shared = is_shared_clfeout(ac97);
/* misc control; vrefout disable */
snd_ac97_update_bits(ac97, AC97_ALC650_CLOCK, 1 << 12,
shared ? (1 << 12) : 0);
ac97_update_bits_page(ac97, AC97_ALC650_MULTICH, 1 << 10,
shared ? (1 << 10) : 0, 0);
}
static const struct snd_kcontrol_new snd_ac97_controls_alc655[] = {
AC97_PAGE_SINGLE("Duplicate Front", AC97_ALC650_MULTICH, 0, 1, 0, 0),
AC97_SURROUND_JACK_MODE_CTL,
AC97_CHANNEL_MODE_CTL,
};
static int alc655_iec958_route_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static char *texts_655[3] = { "PCM", "Analog In", "IEC958 In" };
static char *texts_658[4] = { "PCM", "Analog1 In", "Analog2 In", "IEC958 In" };
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = ac97->spec.dev_flags ? 4 : 3;
if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
strcpy(uinfo->value.enumerated.name,
ac97->spec.dev_flags ?
texts_658[uinfo->value.enumerated.item] :
texts_655[uinfo->value.enumerated.item]);
return 0;
}
static int alc655_iec958_route_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = ac97->regs[AC97_ALC650_MULTICH];
val = (val >> 12) & 3;
if (ac97->spec.dev_flags && val == 3)
val = 0;
ucontrol->value.enumerated.item[0] = val;
return 0;
}
static int alc655_iec958_route_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
return ac97_update_bits_page(ac97, AC97_ALC650_MULTICH, 3 << 12,
(unsigned short)ucontrol->value.enumerated.item[0] << 12,
0);
}
static const struct snd_kcontrol_new snd_ac97_spdif_controls_alc655[] = {
AC97_PAGE_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), AC97_ALC650_MULTICH, 11, 1, 0, 0),
/* disable this controls since it doesn't work as expected */
/* AC97_PAGE_SINGLE("IEC958 Input Monitor", AC97_ALC650_MULTICH, 14, 1, 0, 0), */
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
.info = alc655_iec958_route_info,
.get = alc655_iec958_route_get,
.put = alc655_iec958_route_put,
},
};
static int patch_alc655_specific(struct snd_ac97 * ac97)
{
int err;
if ((err = patch_build_controls(ac97, snd_ac97_controls_alc655, ARRAY_SIZE(snd_ac97_controls_alc655))) < 0)
return err;
if (ac97->ext_id & AC97_EI_SPDIF) {
if ((err = patch_build_controls(ac97, snd_ac97_spdif_controls_alc655, ARRAY_SIZE(snd_ac97_spdif_controls_alc655))) < 0)
return err;
}
return 0;
}
static const struct snd_ac97_build_ops patch_alc655_ops = {
.build_specific = patch_alc655_specific,
.update_jacks = alc655_update_jacks
};
static int patch_alc655(struct snd_ac97 * ac97)
{
unsigned int val;
if (ac97->id == AC97_ID_ALC658) {
ac97->spec.dev_flags = 1; /* ALC658 */
if ((snd_ac97_read(ac97, AC97_ALC650_REVISION) & 0x3f) == 2) {
ac97->id = AC97_ID_ALC658D;
ac97->spec.dev_flags = 2;
}
}
ac97->build_ops = &patch_alc655_ops;
/* assume only page 0 for writing cache */
snd_ac97_update_bits(ac97, AC97_INT_PAGING, AC97_PAGE_MASK, AC97_PAGE_VENDOR);
/* adjust default values */
val = snd_ac97_read(ac97, 0x7a); /* misc control */
if (ac97->spec.dev_flags) /* ALC658 */
val &= ~(1 << 1); /* Pin 47 is spdif input pin */
else { /* ALC655 */
if (ac97->subsystem_vendor == 0x1462 &&
(ac97->subsystem_device == 0x0131 || /* MSI S270 laptop */
ac97->subsystem_device == 0x0161 || /* LG K1 Express */
ac97->subsystem_device == 0x0351 || /* MSI L725 laptop */
ac97->subsystem_device == 0x0471 || /* MSI L720 laptop */
ac97->subsystem_device == 0x0061)) /* MSI S250 laptop */
val &= ~(1 << 1); /* Pin 47 is EAPD (for internal speaker) */
else
val |= (1 << 1); /* Pin 47 is spdif input pin */
/* this seems missing on some hardwares */
ac97->ext_id |= AC97_EI_SPDIF;
}
val &= ~(1 << 12); /* vref enable */
snd_ac97_write_cache(ac97, 0x7a, val);
/* set default: spdif-in enabled,
spdif-in monitor off, spdif-in PCM off
center on mic off, surround on line-in off
duplicate front off
*/
snd_ac97_write_cache(ac97, AC97_ALC650_MULTICH, 1<<15);
/* full DAC volume */
snd_ac97_write_cache(ac97, AC97_ALC650_SURR_DAC_VOL, 0x0808);
snd_ac97_write_cache(ac97, AC97_ALC650_LFE_DAC_VOL, 0x0808);
/* update undocumented bit... */
if (ac97->id == AC97_ID_ALC658D)
snd_ac97_update_bits(ac97, 0x74, 0x0800, 0x0800);
return 0;
}
#define AC97_ALC850_JACK_SELECT 0x76
#define AC97_ALC850_MISC1 0x7a
#define AC97_ALC850_MULTICH 0x6a
static void alc850_update_jacks(struct snd_ac97 *ac97)
{
int shared;
int aux_is_back_surround;
/* shared Line-In / Surround Out */
shared = is_shared_surrout(ac97);
/* SURR 1kOhm (bit4), Amp (bit5) */
snd_ac97_update_bits(ac97, AC97_ALC850_MISC1, (1<<4)|(1<<5),
shared ? (1<<5) : (1<<4));
/* LINE-IN = 0, SURROUND = 2 */
snd_ac97_update_bits(ac97, AC97_ALC850_JACK_SELECT, 7 << 12,
shared ? (2<<12) : (0<<12));
/* update shared Mic In / Center/LFE Out */
shared = is_shared_clfeout(ac97);
/* Vref disable (bit12), 1kOhm (bit13) */
snd_ac97_update_bits(ac97, AC97_ALC850_MISC1, (1<<12)|(1<<13),
shared ? (1<<12) : (1<<13));
/* MIC-IN = 1, CENTER-LFE = 5 */
snd_ac97_update_bits(ac97, AC97_ALC850_JACK_SELECT, 7 << 4,
shared ? (5<<4) : (1<<4));
aux_is_back_surround = alc850_is_aux_back_surround(ac97);
/* Aux is Back Surround */
snd_ac97_update_bits(ac97, AC97_ALC850_MULTICH, 1 << 10,
aux_is_back_surround ? (1<<10) : (0<<10));
}
static const struct snd_kcontrol_new snd_ac97_controls_alc850[] = {
AC97_PAGE_SINGLE("Duplicate Front", AC97_ALC650_MULTICH, 0, 1, 0, 0),
AC97_SINGLE("Mic Front Input Switch", AC97_ALC850_JACK_SELECT, 15, 1, 1),
AC97_SURROUND_JACK_MODE_CTL,
AC97_CHANNEL_MODE_8CH_CTL,
};
static int patch_alc850_specific(struct snd_ac97 *ac97)
{
int err;
if ((err = patch_build_controls(ac97, snd_ac97_controls_alc850, ARRAY_SIZE(snd_ac97_controls_alc850))) < 0)
return err;
if (ac97->ext_id & AC97_EI_SPDIF) {
if ((err = patch_build_controls(ac97, snd_ac97_spdif_controls_alc655, ARRAY_SIZE(snd_ac97_spdif_controls_alc655))) < 0)
return err;
}
return 0;
}
static const struct snd_ac97_build_ops patch_alc850_ops = {
.build_specific = patch_alc850_specific,
.update_jacks = alc850_update_jacks
};
static int patch_alc850(struct snd_ac97 *ac97)
{
ac97->build_ops = &patch_alc850_ops;
ac97->spec.dev_flags = 0; /* for IEC958 playback route - ALC655 compatible */
ac97->flags |= AC97_HAS_8CH;
/* assume only page 0 for writing cache */
snd_ac97_update_bits(ac97, AC97_INT_PAGING, AC97_PAGE_MASK, AC97_PAGE_VENDOR);
/* adjust default values */
/* set default: spdif-in enabled,
spdif-in monitor off, spdif-in PCM off
center on mic off, surround on line-in off
duplicate front off
NB default bit 10=0 = Aux is Capture, not Back Surround
*/
snd_ac97_write_cache(ac97, AC97_ALC650_MULTICH, 1<<15);
/* SURR_OUT: on, Surr 1kOhm: on, Surr Amp: off, Front 1kOhm: off
* Front Amp: on, Vref: enable, Center 1kOhm: on, Mix: on
*/
snd_ac97_write_cache(ac97, 0x7a, (1<<1)|(1<<4)|(0<<5)|(1<<6)|
(1<<7)|(0<<12)|(1<<13)|(0<<14));
/* detection UIO2,3: all path floating, UIO3: MIC, Vref2: disable,
* UIO1: FRONT, Vref3: disable, UIO3: LINE, Front-Mic: mute
*/
snd_ac97_write_cache(ac97, 0x76, (0<<0)|(0<<2)|(1<<4)|(1<<7)|(2<<8)|
(1<<11)|(0<<12)|(1<<15));
/* full DAC volume */
snd_ac97_write_cache(ac97, AC97_ALC650_SURR_DAC_VOL, 0x0808);
snd_ac97_write_cache(ac97, AC97_ALC650_LFE_DAC_VOL, 0x0808);
return 0;
}
static int patch_aztech_azf3328_specific(struct snd_ac97 *ac97)
{
struct snd_kcontrol *kctl_3d_center =
snd_ac97_find_mixer_ctl(ac97, "3D Control - Center");
struct snd_kcontrol *kctl_3d_depth =
snd_ac97_find_mixer_ctl(ac97, "3D Control - Depth");
/*
* 3D register is different from AC97 standard layout
* (also do some renaming, to resemble Windows driver naming)
*/
if (kctl_3d_center) {
kctl_3d_center->private_value =
AC97_SINGLE_VALUE(AC97_3D_CONTROL, 1, 0x07, 0);
snd_ac97_rename_vol_ctl(ac97,
"3D Control - Center", "3D Control - Width"
);
}
if (kctl_3d_depth)
kctl_3d_depth->private_value =
AC97_SINGLE_VALUE(AC97_3D_CONTROL, 8, 0x03, 0);
/* Aztech Windows driver calls the
equivalent control "Modem Playback", thus rename it: */
snd_ac97_rename_vol_ctl(ac97,
"Master Mono Playback", "Modem Playback"
);
snd_ac97_rename_vol_ctl(ac97,
"Headphone Playback", "FM Synth Playback"
);
return 0;
}
static const struct snd_ac97_build_ops patch_aztech_azf3328_ops = {
.build_specific = patch_aztech_azf3328_specific
};
static int patch_aztech_azf3328(struct snd_ac97 *ac97)
{
ac97->build_ops = &patch_aztech_azf3328_ops;
return 0;
}
/*
* C-Media CM97xx codecs
*/
static void cm9738_update_jacks(struct snd_ac97 *ac97)
{
/* shared Line-In / Surround Out */
snd_ac97_update_bits(ac97, AC97_CM9738_VENDOR_CTRL, 1 << 10,
is_shared_surrout(ac97) ? (1 << 10) : 0);
}
static const struct snd_kcontrol_new snd_ac97_cm9738_controls[] = {
AC97_SINGLE("Duplicate Front", AC97_CM9738_VENDOR_CTRL, 13, 1, 0),
AC97_SURROUND_JACK_MODE_CTL,
AC97_CHANNEL_MODE_4CH_CTL,
};
static int patch_cm9738_specific(struct snd_ac97 * ac97)
{
return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
}
static const struct snd_ac97_build_ops patch_cm9738_ops = {
.build_specific = patch_cm9738_specific,
.update_jacks = cm9738_update_jacks
};
static int patch_cm9738(struct snd_ac97 * ac97)
{
ac97->build_ops = &patch_cm9738_ops;
/* FIXME: can anyone confirm below? */
/* CM9738 has no PCM volume although the register reacts */
ac97->flags |= AC97_HAS_NO_PCM_VOL;
snd_ac97_write_cache(ac97, AC97_PCM, 0x8000);
return 0;
}
static int snd_ac97_cmedia_spdif_playback_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static char *texts[] = { "Analog", "Digital" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 2;
if (uinfo->value.enumerated.item > 1)
uinfo->value.enumerated.item = 1;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static int snd_ac97_cmedia_spdif_playback_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
unsigned short val;
val = ac97->regs[AC97_CM9739_SPDIF_CTRL];
ucontrol->value.enumerated.item[0] = (val >> 1) & 0x01;
return 0;
}
static int snd_ac97_cmedia_spdif_playback_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
return snd_ac97_update_bits(ac97, AC97_CM9739_SPDIF_CTRL,
0x01 << 1,
(ucontrol->value.enumerated.item[0] & 0x01) << 1);
}
static const struct snd_kcontrol_new snd_ac97_cm9739_controls_spdif[] = {
/* BIT 0: SPDI_EN - always true */
{ /* BIT 1: SPDIFS */
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
.info = snd_ac97_cmedia_spdif_playback_source_info,
.get = snd_ac97_cmedia_spdif_playback_source_get,
.put = snd_ac97_cmedia_spdif_playback_source_put,
},
/* BIT 2: IG_SPIV */
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,NONE) "Valid Switch", AC97_CM9739_SPDIF_CTRL, 2, 1, 0),
/* BIT 3: SPI2F */
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,NONE) "Monitor", AC97_CM9739_SPDIF_CTRL, 3, 1, 0),
/* BIT 4: SPI2SDI */
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), AC97_CM9739_SPDIF_CTRL, 4, 1, 0),
/* BIT 8: SPD32 - 32bit SPDIF - not supported yet */
};
static void cm9739_update_jacks(struct snd_ac97 *ac97)
{
/* shared Line-In / Surround Out */
snd_ac97_update_bits(ac97, AC97_CM9739_MULTI_CHAN, 1 << 10,
is_shared_surrout(ac97) ? (1 << 10) : 0);
/* shared Mic In / Center/LFE Out **/
snd_ac97_update_bits(ac97, AC97_CM9739_MULTI_CHAN, 0x3000,
is_shared_clfeout(ac97) ? 0x1000 : 0x2000);
}
static const struct snd_kcontrol_new snd_ac97_cm9739_controls[] = {
AC97_SURROUND_JACK_MODE_CTL,
AC97_CHANNEL_MODE_CTL,
};
static int patch_cm9739_specific(struct snd_ac97 * ac97)
{
return patch_build_controls(ac97, snd_ac97_cm9739_controls, ARRAY_SIZE(snd_ac97_cm9739_controls));
}
static int patch_cm9739_post_spdif(struct snd_ac97 * ac97)
{
return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
}
static const struct snd_ac97_build_ops patch_cm9739_ops = {
.build_specific = patch_cm9739_specific,
.build_post_spdif = patch_cm9739_post_spdif,
.update_jacks = cm9739_update_jacks
};
static int patch_cm9739(struct snd_ac97 * ac97)
{
unsigned short val;
ac97->build_ops = &patch_cm9739_ops;
/* CM9739/A has no Master and PCM volume although the register reacts */
ac97->flags |= AC97_HAS_NO_MASTER_VOL | AC97_HAS_NO_PCM_VOL;
snd_ac97_write_cache(ac97, AC97_MASTER, 0x8000);
snd_ac97_write_cache(ac97, AC97_PCM, 0x8000);
/* check spdif */
val = snd_ac97_read(ac97, AC97_EXTENDED_STATUS);
if (val & AC97_EA_SPCV) {
/* enable spdif in */
snd_ac97_write_cache(ac97, AC97_CM9739_SPDIF_CTRL,
snd_ac97_read(ac97, AC97_CM9739_SPDIF_CTRL) | 0x01);
ac97->rates[AC97_RATES_SPDIF] = SNDRV_PCM_RATE_48000; /* 48k only */
} else {
ac97->ext_id &= ~AC97_EI_SPDIF; /* disable extended-id */
ac97->rates[AC97_RATES_SPDIF] = 0;
}
/* set-up multi channel */
/* bit 14: 0 = SPDIF, 1 = EAPD */
/* bit 13: enable internal vref output for mic */
/* bit 12: disable center/lfe (swithable) */
/* bit 10: disable surround/line (switchable) */
/* bit 9: mix 2 surround off */
/* bit 4: undocumented; 0 mutes the CM9739A, which defaults to 1 */
/* bit 3: undocumented; surround? */
/* bit 0: dB */
val = snd_ac97_read(ac97, AC97_CM9739_MULTI_CHAN) & (1 << 4);
val |= (1 << 3);
val |= (1 << 13);
if (! (ac97->ext_id & AC97_EI_SPDIF))
val |= (1 << 14);
snd_ac97_write_cache(ac97, AC97_CM9739_MULTI_CHAN, val);
/* FIXME: set up GPIO */
snd_ac97_write_cache(ac97, 0x70, 0x0100);
snd_ac97_write_cache(ac97, 0x72, 0x0020);
/* Special exception for ASUS W1000/CMI9739. It does not have an SPDIF in. */
if (ac97->pci &&
ac97->subsystem_vendor == 0x1043 &&
ac97->subsystem_device == 0x1843) {
snd_ac97_write_cache(ac97, AC97_CM9739_SPDIF_CTRL,
snd_ac97_read(ac97, AC97_CM9739_SPDIF_CTRL) & ~0x01);
snd_ac97_write_cache(ac97, AC97_CM9739_MULTI_CHAN,
snd_ac97_read(ac97, AC97_CM9739_MULTI_CHAN) | (1 << 14));
}
return 0;
}
#define AC97_CM9761_MULTI_CHAN 0x64
#define AC97_CM9761_FUNC 0x66
#define AC97_CM9761_SPDIF_CTRL 0x6c
static void cm9761_update_jacks(struct snd_ac97 *ac97)
{
/* FIXME: check the bits for each model
* model 83 is confirmed to work
*/
static unsigned short surr_on[3][2] = {
{ 0x0008, 0x0000 }, /* 9761-78 & 82 */
{ 0x0000, 0x0008 }, /* 9761-82 rev.B */
{ 0x0000, 0x0008 }, /* 9761-83 */
};
static unsigned short clfe_on[3][2] = {
{ 0x0000, 0x1000 }, /* 9761-78 & 82 */
{ 0x1000, 0x0000 }, /* 9761-82 rev.B */
{ 0x0000, 0x1000 }, /* 9761-83 */
};
static unsigned short surr_shared[3][2] = {
{ 0x0000, 0x0400 }, /* 9761-78 & 82 */
{ 0x0000, 0x0400 }, /* 9761-82 rev.B */
{ 0x0000, 0x0400 }, /* 9761-83 */
};
static unsigned short clfe_shared[3][2] = {
{ 0x2000, 0x0880 }, /* 9761-78 & 82 */
{ 0x0000, 0x2880 }, /* 9761-82 rev.B */
{ 0x2000, 0x0800 }, /* 9761-83 */
};
unsigned short val = 0;
val |= surr_on[ac97->spec.dev_flags][is_surround_on(ac97)];
val |= clfe_on[ac97->spec.dev_flags][is_clfe_on(ac97)];
val |= surr_shared[ac97->spec.dev_flags][is_shared_surrout(ac97)];
val |= clfe_shared[ac97->spec.dev_flags][is_shared_clfeout(ac97)];
snd_ac97_update_bits(ac97, AC97_CM9761_MULTI_CHAN, 0x3c88, val);
}
static const struct snd_kcontrol_new snd_ac97_cm9761_controls[] = {
AC97_SURROUND_JACK_MODE_CTL,
AC97_CHANNEL_MODE_CTL,
};
static int cm9761_spdif_out_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static char *texts[] = { "AC-Link", "ADC", "SPDIF-In" };
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 3;
if (uinfo->value.enumerated.item > 2)
uinfo->value.enumerated.item = 2;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static int cm9761_spdif_out_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
if (ac97->regs[AC97_CM9761_FUNC] & 0x1)
ucontrol->value.enumerated.item[0] = 2; /* SPDIF-loopback */
else if (ac97->regs[AC97_CM9761_SPDIF_CTRL] & 0x2)
ucontrol->value.enumerated.item[0] = 1; /* ADC loopback */
else
ucontrol->value.enumerated.item[0] = 0; /* AC-link */
return 0;
}
static int cm9761_spdif_out_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
if (ucontrol->value.enumerated.item[0] == 2)
return snd_ac97_update_bits(ac97, AC97_CM9761_FUNC, 0x1, 0x1);
snd_ac97_update_bits(ac97, AC97_CM9761_FUNC, 0x1, 0);
return snd_ac97_update_bits(ac97, AC97_CM9761_SPDIF_CTRL, 0x2,
ucontrol->value.enumerated.item[0] == 1 ? 0x2 : 0);
}
static const char *cm9761_dac_clock[] = { "AC-Link", "SPDIF-In", "Both" };
static const struct ac97_enum cm9761_dac_clock_enum =
AC97_ENUM_SINGLE(AC97_CM9761_SPDIF_CTRL, 9, 3, cm9761_dac_clock);
static const struct snd_kcontrol_new snd_ac97_cm9761_controls_spdif[] = {
{ /* BIT 1: SPDIFS */
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
.info = cm9761_spdif_out_source_info,
.get = cm9761_spdif_out_source_get,
.put = cm9761_spdif_out_source_put,
},
/* BIT 2: IG_SPIV */
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,NONE) "Valid Switch", AC97_CM9761_SPDIF_CTRL, 2, 1, 0),
/* BIT 3: SPI2F */
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,NONE) "Monitor", AC97_CM9761_SPDIF_CTRL, 3, 1, 0),
/* BIT 4: SPI2SDI */
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), AC97_CM9761_SPDIF_CTRL, 4, 1, 0),
/* BIT 9-10: DAC_CTL */
AC97_ENUM("DAC Clock Source", cm9761_dac_clock_enum),
};
static int patch_cm9761_post_spdif(struct snd_ac97 * ac97)
{
return patch_build_controls(ac97, snd_ac97_cm9761_controls_spdif, ARRAY_SIZE(snd_ac97_cm9761_controls_spdif));
}
static int patch_cm9761_specific(struct snd_ac97 * ac97)
{
return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
}
static const struct snd_ac97_build_ops patch_cm9761_ops = {
.build_specific = patch_cm9761_specific,
.build_post_spdif = patch_cm9761_post_spdif,
.update_jacks = cm9761_update_jacks
};
static int patch_cm9761(struct snd_ac97 *ac97)
{
unsigned short val;
/* CM9761 has no PCM volume although the register reacts */
/* Master volume seems to have _some_ influence on the analog
* input sounds
*/
ac97->flags |= /*AC97_HAS_NO_MASTER_VOL |*/ AC97_HAS_NO_PCM_VOL;
snd_ac97_write_cache(ac97, AC97_MASTER, 0x8808);
snd_ac97_write_cache(ac97, AC97_PCM, 0x8808);
ac97->spec.dev_flags = 0; /* 1 = model 82 revision B, 2 = model 83 */
if (ac97->id == AC97_ID_CM9761_82) {
unsigned short tmp;
/* check page 1, reg 0x60 */
val = snd_ac97_read(ac97, AC97_INT_PAGING);
snd_ac97_write_cache(ac97, AC97_INT_PAGING, (val & ~0x0f) | 0x01);
tmp = snd_ac97_read(ac97, 0x60);
ac97->spec.dev_flags = tmp & 1; /* revision B? */
snd_ac97_write_cache(ac97, AC97_INT_PAGING, val);
} else if (ac97->id == AC97_ID_CM9761_83)
ac97->spec.dev_flags = 2;
ac97->build_ops = &patch_cm9761_ops;
/* enable spdif */
/* force the SPDIF bit in ext_id - codec doesn't set this bit! */
ac97->ext_id |= AC97_EI_SPDIF;
/* to be sure: we overwrite the ext status bits */
snd_ac97_write_cache(ac97, AC97_EXTENDED_STATUS, 0x05c0);
/* Don't set 0x0200 here. This results in the silent analog output */
snd_ac97_write_cache(ac97, AC97_CM9761_SPDIF_CTRL, 0x0001); /* enable spdif-in */
ac97->rates[AC97_RATES_SPDIF] = SNDRV_PCM_RATE_48000; /* 48k only */
/* set-up multi channel */
/* bit 15: pc master beep off
* bit 14: pin47 = EAPD/SPDIF
* bit 13: vref ctl [= cm9739]
* bit 12: CLFE control (reverted on rev B)
* bit 11: Mic/center share (reverted on rev B)
* bit 10: suddound/line share
* bit 9: Analog-in mix -> surround
* bit 8: Analog-in mix -> CLFE
* bit 7: Mic/LFE share (mic/center/lfe)
* bit 5: vref select (9761A)
* bit 4: front control
* bit 3: surround control (revereted with rev B)
* bit 2: front mic
* bit 1: stereo mic
* bit 0: mic boost level (0=20dB, 1=30dB)
*/
#if 0
if (ac97->spec.dev_flags)
val = 0x0214;
else
val = 0x321c;
#endif
val = snd_ac97_read(ac97, AC97_CM9761_MULTI_CHAN);
val |= (1 << 4); /* front on */
snd_ac97_write_cache(ac97, AC97_CM9761_MULTI_CHAN, val);
/* FIXME: set up GPIO */
snd_ac97_write_cache(ac97, 0x70, 0x0100);
snd_ac97_write_cache(ac97, 0x72, 0x0020);
return 0;
}
#define AC97_CM9780_SIDE 0x60
#define AC97_CM9780_JACK 0x62
#define AC97_CM9780_MIXER 0x64
#define AC97_CM9780_MULTI_CHAN 0x66
#define AC97_CM9780_SPDIF 0x6c
static const char *cm9780_ch_select[] = { "Front", "Side", "Center/LFE", "Rear" };
static const struct ac97_enum cm9780_ch_select_enum =
AC97_ENUM_SINGLE(AC97_CM9780_MULTI_CHAN, 6, 4, cm9780_ch_select);
static const struct snd_kcontrol_new cm9780_controls[] = {
AC97_DOUBLE("Side Playback Switch", AC97_CM9780_SIDE, 15, 7, 1, 1),
AC97_DOUBLE("Side Playback Volume", AC97_CM9780_SIDE, 8, 0, 31, 0),
AC97_ENUM("Side Playback Route", cm9780_ch_select_enum),
};
static int patch_cm9780_specific(struct snd_ac97 *ac97)
{
return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
}
static const struct snd_ac97_build_ops patch_cm9780_ops = {
.build_specific = patch_cm9780_specific,
.build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
};
static int patch_cm9780(struct snd_ac97 *ac97)
{
unsigned short val;
ac97->build_ops = &patch_cm9780_ops;
/* enable spdif */
if (ac97->ext_id & AC97_EI_SPDIF) {
ac97->rates[AC97_RATES_SPDIF] = SNDRV_PCM_RATE_48000; /* 48k only */
val = snd_ac97_read(ac97, AC97_CM9780_SPDIF);
val |= 0x1; /* SPDI_EN */
snd_ac97_write_cache(ac97, AC97_CM9780_SPDIF, val);
}
return 0;
}
/*
* VIA VT1616 codec
*/
static const struct snd_kcontrol_new snd_ac97_controls_vt1616[] = {
AC97_SINGLE("DC Offset removal", 0x5a, 10, 1, 0),
AC97_SINGLE("Alternate Level to Surround Out", 0x5a, 15, 1, 0),
AC97_SINGLE("Downmix LFE and Center to Front", 0x5a, 12, 1, 0),
AC97_SINGLE("Downmix Surround to Front", 0x5a, 11, 1, 0),
};
static const char *slave_vols_vt1616[] = {
"Front Playback Volume",
"Surround Playback Volume",
"Center Playback Volume",
"LFE Playback Volume",
NULL
};
static const char *slave_sws_vt1616[] = {
"Front Playback Switch",
"Surround Playback Switch",
"Center Playback Switch",
"LFE Playback Switch",
NULL
};
/* find a mixer control element with the given name */
static struct snd_kcontrol *snd_ac97_find_mixer_ctl(struct snd_ac97 *ac97,
const char *name)
{
struct snd_ctl_elem_id id;
memset(&id, 0, sizeof(id));
id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
strcpy(id.name, name);
return snd_ctl_find_id(ac97->bus->card, &id);
}
/* create a virtual master control and add slaves */
static int snd_ac97_add_vmaster(struct snd_ac97 *ac97, char *name,
const unsigned int *tlv, const char **slaves)
{
struct snd_kcontrol *kctl;
const char **s;
int err;
kctl = snd_ctl_make_virtual_master(name, tlv);
if (!kctl)
return -ENOMEM;
err = snd_ctl_add(ac97->bus->card, kctl);
if (err < 0)
return err;
for (s = slaves; *s; s++) {
struct snd_kcontrol *sctl;
sctl = snd_ac97_find_mixer_ctl(ac97, *s);
if (!sctl) {
snd_printdd("Cannot find slave %s, skipped\n", *s);
continue;
}
err = snd_ctl_add_slave(kctl, sctl);
if (err < 0)
return err;
}
return 0;
}
static int patch_vt1616_specific(struct snd_ac97 * ac97)
{
struct snd_kcontrol *kctl;
int err;
if (snd_ac97_try_bit(ac97, 0x5a, 9))
if ((err = patch_build_controls(ac97, &snd_ac97_controls_vt1616[0], 1)) < 0)
return err;
if ((err = patch_build_controls(ac97, &snd_ac97_controls_vt1616[1], ARRAY_SIZE(snd_ac97_controls_vt1616) - 1)) < 0)
return err;
/* There is already a misnamed master switch. Rename it. */
kctl = snd_ac97_find_mixer_ctl(ac97, "Master Playback Volume");
if (!kctl)
return -EINVAL;
snd_ac97_rename_vol_ctl(ac97, "Master Playback", "Front Playback");
err = snd_ac97_add_vmaster(ac97, "Master Playback Volume",
kctl->tlv.p, slave_vols_vt1616);
if (err < 0)
return err;
err = snd_ac97_add_vmaster(ac97, "Master Playback Switch",
NULL, slave_sws_vt1616);
if (err < 0)
return err;
return 0;
}
static const struct snd_ac97_build_ops patch_vt1616_ops = {
.build_specific = patch_vt1616_specific
};
static int patch_vt1616(struct snd_ac97 * ac97)
{
ac97->build_ops = &patch_vt1616_ops;
return 0;
}
/*
* VT1617A codec
*/
/*
* unfortunately, the vt1617a stashes the twiddlers required for
* noodling the i/o jacks on 2 different regs. that means that we can't
* use the easy way provided by AC97_ENUM_DOUBLE() we have to write
* are own funcs.
*
* NB: this is absolutely and utterly different from the vt1618. dunno
* about the 1616.
*/
/* copied from ac97_surround_jack_mode_info() */
static int snd_ac97_vt1617a_smart51_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
/* ordering in this list reflects vt1617a docs for Reg 20 and
* 7a and Table 6 that lays out the matrix NB WRT Table6: SM51
* is SM51EN *AND* it's Bit14, not Bit15 so the table is very
* counter-intuitive */
static const char* texts[] = { "LineIn Mic1", "LineIn Mic1 Mic3",
"Surr LFE/C Mic3", "LineIn LFE/C Mic3",
"LineIn Mic2", "LineIn Mic2 Mic1",
"Surr LFE Mic1", "Surr LFE Mic1 Mic2"};
return ac97_enum_text_info(kcontrol, uinfo, texts, 8);
}
static int snd_ac97_vt1617a_smart51_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ushort usSM51, usMS;
struct snd_ac97 *pac97;
pac97 = snd_kcontrol_chip(kcontrol); /* grab codec handle */
/* grab our desired bits, then mash them together in a manner
* consistent with Table 6 on page 17 in the 1617a docs */
usSM51 = snd_ac97_read(pac97, 0x7a) >> 14;
usMS = snd_ac97_read(pac97, 0x20) >> 8;
ucontrol->value.enumerated.item[0] = (usSM51 << 1) + usMS;
return 0;
}
static int snd_ac97_vt1617a_smart51_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ushort usSM51, usMS, usReg;
struct snd_ac97 *pac97;
pac97 = snd_kcontrol_chip(kcontrol); /* grab codec handle */
usSM51 = ucontrol->value.enumerated.item[0] >> 1;
usMS = ucontrol->value.enumerated.item[0] & 1;
/* push our values into the register - consider that things will be left
* in a funky state if the write fails */
usReg = snd_ac97_read(pac97, 0x7a);
snd_ac97_write_cache(pac97, 0x7a, (usReg & 0x3FFF) + (usSM51 << 14));
usReg = snd_ac97_read(pac97, 0x20);
snd_ac97_write_cache(pac97, 0x20, (usReg & 0xFEFF) + (usMS << 8));
return 0;
}
static const struct snd_kcontrol_new snd_ac97_controls_vt1617a[] = {
AC97_SINGLE("Center/LFE Exchange", 0x5a, 8, 1, 0),
/*
* These are used to enable/disable surround sound on motherboards
* that have 3 bidirectional analog jacks
*/
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Smart 5.1 Select",
.info = snd_ac97_vt1617a_smart51_info,
.get = snd_ac97_vt1617a_smart51_get,
.put = snd_ac97_vt1617a_smart51_put,
},
};
static int patch_vt1617a(struct snd_ac97 * ac97)
{
int err = 0;
int val;
/* we choose to not fail out at this point, but we tell the
caller when we return */
err = patch_build_controls(ac97, &snd_ac97_controls_vt1617a[0],
ARRAY_SIZE(snd_ac97_controls_vt1617a));
/* bring analog power consumption to normal by turning off the
* headphone amplifier, like WinXP driver for EPIA SP
*/
/* We need to check the bit before writing it.
* On some (many?) hardwares, setting bit actually clears it!
*/
val = snd_ac97_read(ac97, 0x5c);
if (!(val & 0x20))
snd_ac97_write_cache(ac97, 0x5c, 0x20);
ac97->ext_id |= AC97_EI_SPDIF; /* force the detection of spdif */
ac97->rates[AC97_RATES_SPDIF] = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000;
ac97->build_ops = &patch_vt1616_ops;
return err;
}
/* VIA VT1618 8 CHANNEL AC97 CODEC
*
* VIA implements 'Smart 5.1' completely differently on the 1618 than
* it does on the 1617a. awesome! They seem to have sourced this
* particular revision of the technology from somebody else, it's
* called Universal Audio Jack and it shows up on some other folk's chips
* as well.
*
* ordering in this list reflects vt1618 docs for Reg 60h and
* the block diagram, DACs are as follows:
*
* OUT_O -> Front,
* OUT_1 -> Surround,
* OUT_2 -> C/LFE
*
* Unlike the 1617a, each OUT has a consistent set of mappings
* for all bitpatterns other than 00:
*
* 01 Unmixed Output
* 10 Line In
* 11 Mic In
*
* Special Case of 00:
*
* OUT_0 Mixed Output
* OUT_1 Reserved
* OUT_2 Reserved
*
* I have no idea what the hell Reserved does, but on an MSI
* CN700T, i have to set it to get 5.1 output - YMMV, bad
* shit may happen.
*
* If other chips use Universal Audio Jack, then this code might be applicable
* to them.
*/
struct vt1618_uaj_item {
unsigned short mask;
unsigned short shift;
const char *items[4];
};
/* This list reflects the vt1618 docs for Vendor Defined Register 0x60. */
static struct vt1618_uaj_item vt1618_uaj[3] = {
{
/* speaker jack */
.mask = 0x03,
.shift = 0,
.items = {
"Speaker Out", "DAC Unmixed Out", "Line In", "Mic In"
}
},
{
/* line jack */
.mask = 0x0c,
.shift = 2,
.items = {
"Surround Out", "DAC Unmixed Out", "Line In", "Mic In"
}
},
{
/* mic jack */
.mask = 0x30,
.shift = 4,
.items = {
"Center LFE Out", "DAC Unmixed Out", "Line In", "Mic In"
},
},
};
static int snd_ac97_vt1618_UAJ_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
return ac97_enum_text_info(kcontrol, uinfo,
vt1618_uaj[kcontrol->private_value].items,
4);
}
/* All of the vt1618 Universal Audio Jack twiddlers are on
* Vendor Defined Register 0x60, page 0. The bits, and thus
* the mask, are the only thing that changes
*/
static int snd_ac97_vt1618_UAJ_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
unsigned short datpag, uaj;
struct snd_ac97 *pac97 = snd_kcontrol_chip(kcontrol);
mutex_lock(&pac97->page_mutex);
datpag = snd_ac97_read(pac97, AC97_INT_PAGING) & AC97_PAGE_MASK;
snd_ac97_update_bits(pac97, AC97_INT_PAGING, AC97_PAGE_MASK, 0);
uaj = snd_ac97_read(pac97, 0x60) &
vt1618_uaj[kcontrol->private_value].mask;
snd_ac97_update_bits(pac97, AC97_INT_PAGING, AC97_PAGE_MASK, datpag);
mutex_unlock(&pac97->page_mutex);
ucontrol->value.enumerated.item[0] = uaj >>
vt1618_uaj[kcontrol->private_value].shift;
return 0;
}
static int snd_ac97_vt1618_UAJ_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
return ac97_update_bits_page(snd_kcontrol_chip(kcontrol), 0x60,
vt1618_uaj[kcontrol->private_value].mask,
ucontrol->value.enumerated.item[0]<<
vt1618_uaj[kcontrol->private_value].shift,
0);
}
/* config aux in jack - not found on 3 jack motherboards or soundcards */
static int snd_ac97_vt1618_aux_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
static const char *txt_aux[] = {"Aux In", "Back Surr Out"};
return ac97_enum_text_info(kcontrol, uinfo, txt_aux, 2);
}
static int snd_ac97_vt1618_aux_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.enumerated.item[0] =
(snd_ac97_read(snd_kcontrol_chip(kcontrol), 0x5c) & 0x0008)>>3;
return 0;
}
static int snd_ac97_vt1618_aux_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
/* toggle surround rear dac power */
snd_ac97_update_bits(snd_kcontrol_chip(kcontrol), 0x5c, 0x0008,
ucontrol->value.enumerated.item[0] << 3);
/* toggle aux in surround rear out jack */
return snd_ac97_update_bits(snd_kcontrol_chip(kcontrol), 0x76, 0x0008,
ucontrol->value.enumerated.item[0] << 3);
}
static const struct snd_kcontrol_new snd_ac97_controls_vt1618[] = {
AC97_SINGLE("Exchange Center/LFE", 0x5a, 8, 1, 0),
AC97_SINGLE("DC Offset", 0x5a, 10, 1, 0),
AC97_SINGLE("Soft Mute", 0x5c, 0, 1, 1),
AC97_SINGLE("Headphone Amp", 0x5c, 5, 1, 1),
AC97_DOUBLE("Back Surr Volume", 0x5e, 8, 0, 31, 1),
AC97_SINGLE("Back Surr Switch", 0x5e, 15, 1, 1),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Speaker Jack Mode",
.info = snd_ac97_vt1618_UAJ_info,
.get = snd_ac97_vt1618_UAJ_get,
.put = snd_ac97_vt1618_UAJ_put,
.private_value = 0
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line Jack Mode",
.info = snd_ac97_vt1618_UAJ_info,
.get = snd_ac97_vt1618_UAJ_get,
.put = snd_ac97_vt1618_UAJ_put,
.private_value = 1
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Mic Jack Mode",
.info = snd_ac97_vt1618_UAJ_info,
.get = snd_ac97_vt1618_UAJ_get,
.put = snd_ac97_vt1618_UAJ_put,
.private_value = 2
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Aux Jack Mode",
.info = snd_ac97_vt1618_aux_info,
.get = snd_ac97_vt1618_aux_get,
.put = snd_ac97_vt1618_aux_put,
}
};
static int patch_vt1618(struct snd_ac97 *ac97)
{
return patch_build_controls(ac97, snd_ac97_controls_vt1618,
ARRAY_SIZE(snd_ac97_controls_vt1618));
}
/*
*/
static void it2646_update_jacks(struct snd_ac97 *ac97)
{
/* shared Line-In / Surround Out */
snd_ac97_update_bits(ac97, 0x76, 1 << 9,
is_shared_surrout(ac97) ? (1<<9) : 0);
/* shared Mic / Center/LFE Out */
snd_ac97_update_bits(ac97, 0x76, 1 << 10,
is_shared_clfeout(ac97) ? (1<<10) : 0);
}
static const struct snd_kcontrol_new snd_ac97_controls_it2646[] = {
AC97_SURROUND_JACK_MODE_CTL,
AC97_CHANNEL_MODE_CTL,
};
static const struct snd_kcontrol_new snd_ac97_spdif_controls_it2646[] = {
AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), 0x76, 11, 1, 0),
AC97_SINGLE("Analog to IEC958 Output", 0x76, 12, 1, 0),
AC97_SINGLE("IEC958 Input Monitor", 0x76, 13, 1, 0),
};
static int patch_it2646_specific(struct snd_ac97 * ac97)
{
int err;
if ((err = patch_build_controls(ac97, snd_ac97_controls_it2646, ARRAY_SIZE(snd_ac97_controls_it2646))) < 0)
return err;
if ((err = patch_build_controls(ac97, snd_ac97_spdif_controls_it2646, ARRAY_SIZE(snd_ac97_spdif_controls_it2646))) < 0)
return err;
return 0;
}
static const struct snd_ac97_build_ops patch_it2646_ops = {
.build_specific = patch_it2646_specific,
.update_jacks = it2646_update_jacks
};
static int patch_it2646(struct snd_ac97 * ac97)
{
ac97->build_ops = &patch_it2646_ops;
/* full DAC volume */
snd_ac97_write_cache(ac97, 0x5E, 0x0808);
snd_ac97_write_cache(ac97, 0x7A, 0x0808);
return 0;
}
/*
* Si3036 codec
*/
#define AC97_SI3036_CHIP_ID 0x5a
#define AC97_SI3036_LINE_CFG 0x5c
static const struct snd_kcontrol_new snd_ac97_controls_si3036[] = {
AC97_DOUBLE("Modem Speaker Volume", 0x5c, 14, 12, 3, 1)
};
static int patch_si3036_specific(struct snd_ac97 * ac97)
{
int idx, err;
for (idx = 0; idx < ARRAY_SIZE(snd_ac97_controls_si3036); idx++)
if ((err = snd_ctl_add(ac97->bus->card, snd_ctl_new1(&snd_ac97_controls_si3036[idx], ac97))) < 0)
return err;
return 0;
}
static const struct snd_ac97_build_ops patch_si3036_ops = {
.build_specific = patch_si3036_specific,
};
static int mpatch_si3036(struct snd_ac97 * ac97)
{
ac97->build_ops = &patch_si3036_ops;
snd_ac97_write_cache(ac97, 0x5c, 0xf210 );
snd_ac97_write_cache(ac97, 0x68, 0);
return 0;
}
/*
* LM 4550 Codec
*
* We use a static resolution table since LM4550 codec cannot be
* properly autoprobed to determine the resolution via
* check_volume_resolution().
*/
static struct snd_ac97_res_table lm4550_restbl[] = {
{ AC97_MASTER, 0x1f1f },
{ AC97_HEADPHONE, 0x1f1f },
{ AC97_MASTER_MONO, 0x001f },
{ AC97_PC_BEEP, 0x001f }, /* LSB is ignored */
{ AC97_PHONE, 0x001f },
{ AC97_MIC, 0x001f },
{ AC97_LINE, 0x1f1f },
{ AC97_CD, 0x1f1f },
{ AC97_VIDEO, 0x1f1f },
{ AC97_AUX, 0x1f1f },
{ AC97_PCM, 0x1f1f },
{ AC97_REC_GAIN, 0x0f0f },
{ } /* terminator */
};
static int patch_lm4550(struct snd_ac97 *ac97)
{
ac97->res_table = lm4550_restbl;
return 0;
}
/*
* UCB1400 codec (http://www.semiconductors.philips.com/acrobat_download/datasheets/UCB1400-02.pdf)
*/
static const struct snd_kcontrol_new snd_ac97_controls_ucb1400[] = {
/* enable/disable headphone driver which allows direct connection to
stereo headphone without the use of external DC blocking
capacitors */
AC97_SINGLE("Headphone Driver", 0x6a, 6, 1, 0),
/* Filter used to compensate the DC offset is added in the ADC to remove idle
tones from the audio band. */
AC97_SINGLE("DC Filter", 0x6a, 4, 1, 0),
/* Control smart-low-power mode feature. Allows automatic power down
of unused blocks in the ADC analog front end and the PLL. */
AC97_SINGLE("Smart Low Power Mode", 0x6c, 4, 3, 0),
};
static int patch_ucb1400_specific(struct snd_ac97 * ac97)
{
int idx, err;
for (idx = 0; idx < ARRAY_SIZE(snd_ac97_controls_ucb1400); idx++)
if ((err = snd_ctl_add(ac97->bus->card, snd_ctl_new1(&snd_ac97_controls_ucb1400[idx], ac97))) < 0)
return err;
return 0;
}
static const struct snd_ac97_build_ops patch_ucb1400_ops = {
.build_specific = patch_ucb1400_specific,
};
static int patch_ucb1400(struct snd_ac97 * ac97)
{
ac97->build_ops = &patch_ucb1400_ops;
/* enable headphone driver and smart low power mode by default */
snd_ac97_write_cache(ac97, 0x6a, 0x0050);
snd_ac97_write_cache(ac97, 0x6c, 0x0030);
return 0;
}
| gpl-2.0 |
jejecule/kernel_despair_find7 | fs/proc/proc_tty.c | 10747 | 4805 | /*
* proc_tty.c -- handles /proc/tty
*
* Copyright 1997, Theodore Ts'o
*/
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/tty.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
/*
* The /proc/tty directory inodes...
*/
static struct proc_dir_entry *proc_tty_ldisc, *proc_tty_driver;
/*
* This is the handler for /proc/tty/drivers
*/
static void show_tty_range(struct seq_file *m, struct tty_driver *p,
dev_t from, int num)
{
seq_printf(m, "%-20s ", p->driver_name ? p->driver_name : "unknown");
seq_printf(m, "/dev/%-8s ", p->name);
if (p->num > 1) {
seq_printf(m, "%3d %d-%d ", MAJOR(from), MINOR(from),
MINOR(from) + num - 1);
} else {
seq_printf(m, "%3d %7d ", MAJOR(from), MINOR(from));
}
switch (p->type) {
case TTY_DRIVER_TYPE_SYSTEM:
seq_puts(m, "system");
if (p->subtype == SYSTEM_TYPE_TTY)
seq_puts(m, ":/dev/tty");
else if (p->subtype == SYSTEM_TYPE_SYSCONS)
seq_puts(m, ":console");
else if (p->subtype == SYSTEM_TYPE_CONSOLE)
seq_puts(m, ":vtmaster");
break;
case TTY_DRIVER_TYPE_CONSOLE:
seq_puts(m, "console");
break;
case TTY_DRIVER_TYPE_SERIAL:
seq_puts(m, "serial");
break;
case TTY_DRIVER_TYPE_PTY:
if (p->subtype == PTY_TYPE_MASTER)
seq_puts(m, "pty:master");
else if (p->subtype == PTY_TYPE_SLAVE)
seq_puts(m, "pty:slave");
else
seq_puts(m, "pty");
break;
default:
seq_printf(m, "type:%d.%d", p->type, p->subtype);
}
seq_putc(m, '\n');
}
static int show_tty_driver(struct seq_file *m, void *v)
{
struct tty_driver *p = list_entry(v, struct tty_driver, tty_drivers);
dev_t from = MKDEV(p->major, p->minor_start);
dev_t to = from + p->num;
if (&p->tty_drivers == tty_drivers.next) {
/* pseudo-drivers first */
seq_printf(m, "%-20s /dev/%-8s ", "/dev/tty", "tty");
seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 0);
seq_puts(m, "system:/dev/tty\n");
seq_printf(m, "%-20s /dev/%-8s ", "/dev/console", "console");
seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 1);
seq_puts(m, "system:console\n");
#ifdef CONFIG_UNIX98_PTYS
seq_printf(m, "%-20s /dev/%-8s ", "/dev/ptmx", "ptmx");
seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 2);
seq_puts(m, "system\n");
#endif
#ifdef CONFIG_VT
seq_printf(m, "%-20s /dev/%-8s ", "/dev/vc/0", "vc/0");
seq_printf(m, "%3d %7d ", TTY_MAJOR, 0);
seq_puts(m, "system:vtmaster\n");
#endif
}
while (MAJOR(from) < MAJOR(to)) {
dev_t next = MKDEV(MAJOR(from)+1, 0);
show_tty_range(m, p, from, next - from);
from = next;
}
if (from != to)
show_tty_range(m, p, from, to - from);
return 0;
}
/* iterator */
static void *t_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(&tty_mutex);
return seq_list_start(&tty_drivers, *pos);
}
static void *t_next(struct seq_file *m, void *v, loff_t *pos)
{
return seq_list_next(v, &tty_drivers, pos);
}
static void t_stop(struct seq_file *m, void *v)
{
mutex_unlock(&tty_mutex);
}
static const struct seq_operations tty_drivers_op = {
.start = t_start,
.next = t_next,
.stop = t_stop,
.show = show_tty_driver
};
static int tty_drivers_open(struct inode *inode, struct file *file)
{
return seq_open(file, &tty_drivers_op);
}
static const struct file_operations proc_tty_drivers_operations = {
.open = tty_drivers_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* This function is called by tty_register_driver() to handle
* registering the driver's /proc handler into /proc/tty/driver/<foo>
*/
void proc_tty_register_driver(struct tty_driver *driver)
{
struct proc_dir_entry *ent;
if (!driver->driver_name || driver->proc_entry ||
!driver->ops->proc_fops)
return;
ent = proc_create_data(driver->driver_name, 0, proc_tty_driver,
driver->ops->proc_fops, driver);
driver->proc_entry = ent;
}
/*
* This function is called by tty_unregister_driver()
*/
void proc_tty_unregister_driver(struct tty_driver *driver)
{
struct proc_dir_entry *ent;
ent = driver->proc_entry;
if (!ent)
return;
remove_proc_entry(driver->driver_name, proc_tty_driver);
driver->proc_entry = NULL;
}
/*
* Called by proc_root_init() to initialize the /proc/tty subtree
*/
void __init proc_tty_init(void)
{
if (!proc_mkdir("tty", NULL))
return;
proc_tty_ldisc = proc_mkdir("tty/ldisc", NULL);
/*
* /proc/tty/driver/serial reveals the exact character counts for
* serial links which is just too easy to abuse for inferring
* password lengths and inter-keystroke timings during password
* entry.
*/
proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR|S_IXUSR, NULL);
proc_create("tty/ldiscs", 0, NULL, &tty_ldiscs_proc_fops);
proc_create("tty/drivers", 0, NULL, &proc_tty_drivers_operations);
}
| gpl-2.0 |
charles1018/kernel_sony_14.4.A.0.157 | net/ceph/armor.c | 12795 | 1946 |
#include <linux/errno.h>
int ceph_armor(char *dst, const char *src, const char *end);
int ceph_unarmor(char *dst, const char *src, const char *end);
/*
* base64 encode/decode.
*/
static const char *pem_key =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
static int encode_bits(int c)
{
return pem_key[c];
}
static int decode_bits(char c)
{
if (c >= 'A' && c <= 'Z')
return c - 'A';
if (c >= 'a' && c <= 'z')
return c - 'a' + 26;
if (c >= '0' && c <= '9')
return c - '0' + 52;
if (c == '+')
return 62;
if (c == '/')
return 63;
if (c == '=')
return 0; /* just non-negative, please */
return -EINVAL;
}
int ceph_armor(char *dst, const char *src, const char *end)
{
int olen = 0;
int line = 0;
while (src < end) {
unsigned char a, b, c;
a = *src++;
*dst++ = encode_bits(a >> 2);
if (src < end) {
b = *src++;
*dst++ = encode_bits(((a & 3) << 4) | (b >> 4));
if (src < end) {
c = *src++;
*dst++ = encode_bits(((b & 15) << 2) |
(c >> 6));
*dst++ = encode_bits(c & 63);
} else {
*dst++ = encode_bits((b & 15) << 2);
*dst++ = '=';
}
} else {
*dst++ = encode_bits(((a & 3) << 4));
*dst++ = '=';
*dst++ = '=';
}
olen += 4;
line += 4;
if (line == 64) {
line = 0;
*(dst++) = '\n';
olen++;
}
}
return olen;
}
int ceph_unarmor(char *dst, const char *src, const char *end)
{
int olen = 0;
while (src < end) {
int a, b, c, d;
if (src[0] == '\n') {
src++;
continue;
}
if (src + 4 > end)
return -EINVAL;
a = decode_bits(src[0]);
b = decode_bits(src[1]);
c = decode_bits(src[2]);
d = decode_bits(src[3]);
if (a < 0 || b < 0 || c < 0 || d < 0)
return -EINVAL;
*dst++ = (a << 2) | (b >> 4);
if (src[2] == '=')
return olen + 1;
*dst++ = ((b & 15) << 4) | (c >> 2);
if (src[3] == '=')
return olen + 2;
*dst++ = ((c & 3) << 6) | d;
olen += 3;
src += 4;
}
return olen;
}
| gpl-2.0 |
Sudokamikaze/XKernel-taoshan | drivers/video/i810/i810_accel.c | 13563 | 12190 | /*-*- linux-c -*-
* linux/drivers/video/i810_accel.c -- Hardware Acceleration
*
* Copyright (C) 2001 Antonino Daplas<adaplas@pol.net>
* All Rights Reserved
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/fb.h>
#include "i810_regs.h"
#include "i810.h"
#include "i810_main.h"
static u32 i810fb_rop[] = {
COLOR_COPY_ROP, /* ROP_COPY */
XOR_ROP /* ROP_XOR */
};
/* Macros */
#define PUT_RING(n) { \
i810_writel(par->cur_tail, par->iring.virtual, n); \
par->cur_tail += 4; \
par->cur_tail &= RING_SIZE_MASK; \
}
extern void flush_cache(void);
/************************************************************/
/* BLT Engine Routines */
static inline void i810_report_error(u8 __iomem *mmio)
{
printk("IIR : 0x%04x\n"
"EIR : 0x%04x\n"
"PGTBL_ER: 0x%04x\n"
"IPEIR : 0x%04x\n"
"IPEHR : 0x%04x\n",
i810_readw(IIR, mmio),
i810_readb(EIR, mmio),
i810_readl(PGTBL_ER, mmio),
i810_readl(IPEIR, mmio),
i810_readl(IPEHR, mmio));
}
/**
* wait_for_space - check ring buffer free space
* @space: amount of ringbuffer space needed in bytes
* @par: pointer to i810fb_par structure
*
* DESCRIPTION:
* The function waits until a free space from the ringbuffer
* is available
*/
static inline int wait_for_space(struct fb_info *info, u32 space)
{
struct i810fb_par *par = info->par;
u32 head, count = WAIT_COUNT, tail;
u8 __iomem *mmio = par->mmio_start_virtual;
tail = par->cur_tail;
while (count--) {
head = i810_readl(IRING + 4, mmio) & RBUFFER_HEAD_MASK;
if ((tail == head) ||
(tail > head &&
(par->iring.size - tail + head) >= space) ||
(tail < head && (head - tail) >= space)) {
return 0;
}
}
printk("ringbuffer lockup!!!\n");
i810_report_error(mmio);
par->dev_flags |= LOCKUP;
info->pixmap.scan_align = 1;
return 1;
}
/**
* wait_for_engine_idle - waits for all hardware engines to finish
* @par: pointer to i810fb_par structure
*
* DESCRIPTION:
* This waits for lring(0), iring(1), and batch(3), etc to finish and
* waits until ringbuffer is empty.
*/
static inline int wait_for_engine_idle(struct fb_info *info)
{
struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
int count = WAIT_COUNT;
if (wait_for_space(info, par->iring.size)) /* flush */
return 1;
while((i810_readw(INSTDONE, mmio) & 0x7B) != 0x7B && --count);
if (count) return 0;
printk("accel engine lockup!!!\n");
printk("INSTDONE: 0x%04x\n", i810_readl(INSTDONE, mmio));
i810_report_error(mmio);
par->dev_flags |= LOCKUP;
info->pixmap.scan_align = 1;
return 1;
}
/* begin_iring - prepares the ringbuffer
* @space: length of sequence in dwords
* @par: pointer to i810fb_par structure
*
* DESCRIPTION:
* Checks/waits for sufficient space in ringbuffer of size
* space. Returns the tail of the buffer
*/
static inline u32 begin_iring(struct fb_info *info, u32 space)
{
struct i810fb_par *par = info->par;
if (par->dev_flags & ALWAYS_SYNC)
wait_for_engine_idle(info);
return wait_for_space(info, space);
}
/**
* end_iring - advances the buffer
* @par: pointer to i810fb_par structure
*
* DESCRIPTION:
* This advances the tail of the ringbuffer, effectively
* beginning the execution of the graphics instruction sequence.
*/
static inline void end_iring(struct i810fb_par *par)
{
u8 __iomem *mmio = par->mmio_start_virtual;
i810_writel(IRING, mmio, par->cur_tail);
}
/**
* source_copy_blit - BLIT transfer operation
* @dwidth: width of rectangular graphics data
* @dheight: height of rectangular graphics data
* @dpitch: bytes per line of destination buffer
* @xdir: direction of copy (left to right or right to left)
* @src: address of first pixel to read from
* @dest: address of first pixel to write to
* @from: source address
* @where: destination address
* @rop: raster operation
* @blit_bpp: pixel format which can be different from the
* framebuffer's pixelformat
* @par: pointer to i810fb_par structure
*
* DESCRIPTION:
* This is a BLIT operation typically used when doing
* a 'Copy and Paste'
*/
static inline void source_copy_blit(int dwidth, int dheight, int dpitch,
int xdir, int src, int dest, int rop,
int blit_bpp, struct fb_info *info)
{
struct i810fb_par *par = info->par;
if (begin_iring(info, 24 + IRING_PAD)) return;
PUT_RING(BLIT | SOURCE_COPY_BLIT | 4);
PUT_RING(xdir | rop << 16 | dpitch | DYN_COLOR_EN | blit_bpp);
PUT_RING(dheight << 16 | dwidth);
PUT_RING(dest);
PUT_RING(dpitch);
PUT_RING(src);
end_iring(par);
}
/**
* color_blit - solid color BLIT operation
* @width: width of destination
* @height: height of destination
* @pitch: pixels per line of the buffer
* @dest: address of first pixel to write to
* @where: destination
* @rop: raster operation
* @what: color to transfer
* @blit_bpp: pixel format which can be different from the
* framebuffer's pixelformat
* @par: pointer to i810fb_par structure
*
* DESCRIPTION:
* A BLIT operation which can be used for color fill/rectangular fill
*/
static inline void color_blit(int width, int height, int pitch, int dest,
int rop, int what, int blit_bpp,
struct fb_info *info)
{
struct i810fb_par *par = info->par;
if (begin_iring(info, 24 + IRING_PAD)) return;
PUT_RING(BLIT | COLOR_BLT | 3);
PUT_RING(rop << 16 | pitch | SOLIDPATTERN | DYN_COLOR_EN | blit_bpp);
PUT_RING(height << 16 | width);
PUT_RING(dest);
PUT_RING(what);
PUT_RING(NOP);
end_iring(par);
}
/**
* mono_src_copy_imm_blit - color expand from system memory to framebuffer
* @dwidth: width of destination
* @dheight: height of destination
* @dpitch: pixels per line of the buffer
* @dsize: size of bitmap in double words
* @dest: address of first byte of pixel;
* @rop: raster operation
* @blit_bpp: pixelformat to use which can be different from the
* framebuffer's pixelformat
* @src: address of image data
* @bg: backgound color
* @fg: forground color
* @par: pointer to i810fb_par structure
*
* DESCRIPTION:
* A color expand operation where the source data is placed in the
* ringbuffer itself. Useful for drawing text.
*
* REQUIREMENT:
* The end of a scanline must be padded to the next word.
*/
static inline void mono_src_copy_imm_blit(int dwidth, int dheight, int dpitch,
int dsize, int blit_bpp, int rop,
int dest, const u32 *src, int bg,
int fg, struct fb_info *info)
{
struct i810fb_par *par = info->par;
if (begin_iring(info, 24 + (dsize << 2) + IRING_PAD)) return;
PUT_RING(BLIT | MONO_SOURCE_COPY_IMMEDIATE | (4 + dsize));
PUT_RING(DYN_COLOR_EN | blit_bpp | rop << 16 | dpitch);
PUT_RING(dheight << 16 | dwidth);
PUT_RING(dest);
PUT_RING(bg);
PUT_RING(fg);
while (dsize--)
PUT_RING(*src++);
end_iring(par);
}
static inline void load_front(int offset, struct fb_info *info)
{
struct i810fb_par *par = info->par;
if (begin_iring(info, 8 + IRING_PAD)) return;
PUT_RING(PARSER | FLUSH);
PUT_RING(NOP);
end_iring(par);
if (begin_iring(info, 8 + IRING_PAD)) return;
PUT_RING(PARSER | FRONT_BUFFER | ((par->pitch >> 3) << 8));
PUT_RING((par->fb.offset << 12) + offset);
end_iring(par);
}
/**
* i810fb_iring_enable - enables/disables the ringbuffer
* @mode: enable or disable
* @par: pointer to i810fb_par structure
*
* DESCRIPTION:
* Enables or disables the ringbuffer, effectively enabling or
* disabling the instruction/acceleration engine.
*/
static inline void i810fb_iring_enable(struct i810fb_par *par, u32 mode)
{
u32 tmp;
u8 __iomem *mmio = par->mmio_start_virtual;
tmp = i810_readl(IRING + 12, mmio);
if (mode == OFF)
tmp &= ~1;
else
tmp |= 1;
flush_cache();
i810_writel(IRING + 12, mmio, tmp);
}
void i810fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct i810fb_par *par = info->par;
u32 dx, dy, width, height, dest, rop = 0, color = 0;
if (!info->var.accel_flags || par->dev_flags & LOCKUP ||
par->depth == 4) {
cfb_fillrect(info, rect);
return;
}
if (par->depth == 1)
color = rect->color;
else
color = ((u32 *) (info->pseudo_palette))[rect->color];
rop = i810fb_rop[rect->rop];
dx = rect->dx * par->depth;
width = rect->width * par->depth;
dy = rect->dy;
height = rect->height;
dest = info->fix.smem_start + (dy * info->fix.line_length) + dx;
color_blit(width, height, info->fix.line_length, dest, rop, color,
par->blit_bpp, info);
}
void i810fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct i810fb_par *par = info->par;
u32 sx, sy, dx, dy, pitch, width, height, src, dest, xdir;
if (!info->var.accel_flags || par->dev_flags & LOCKUP ||
par->depth == 4) {
cfb_copyarea(info, region);
return;
}
dx = region->dx * par->depth;
sx = region->sx * par->depth;
width = region->width * par->depth;
sy = region->sy;
dy = region->dy;
height = region->height;
if (dx <= sx) {
xdir = INCREMENT;
}
else {
xdir = DECREMENT;
sx += width - 1;
dx += width - 1;
}
if (dy <= sy) {
pitch = info->fix.line_length;
}
else {
pitch = (-(info->fix.line_length)) & 0xFFFF;
sy += height - 1;
dy += height - 1;
}
src = info->fix.smem_start + (sy * info->fix.line_length) + sx;
dest = info->fix.smem_start + (dy * info->fix.line_length) + dx;
source_copy_blit(width, height, pitch, xdir, src, dest,
PAT_COPY_ROP, par->blit_bpp, info);
}
void i810fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct i810fb_par *par = info->par;
u32 fg = 0, bg = 0, size, dst;
if (!info->var.accel_flags || par->dev_flags & LOCKUP ||
par->depth == 4 || image->depth != 1) {
cfb_imageblit(info, image);
return;
}
switch (info->var.bits_per_pixel) {
case 8:
fg = image->fg_color;
bg = image->bg_color;
break;
case 16:
case 24:
fg = ((u32 *)(info->pseudo_palette))[image->fg_color];
bg = ((u32 *)(info->pseudo_palette))[image->bg_color];
break;
}
dst = info->fix.smem_start + (image->dy * info->fix.line_length) +
(image->dx * par->depth);
size = (image->width+7)/8 + 1;
size &= ~1;
size *= image->height;
size += 7;
size &= ~7;
mono_src_copy_imm_blit(image->width * par->depth,
image->height, info->fix.line_length,
size/4, par->blit_bpp,
PAT_COPY_ROP, dst, (u32 *) image->data,
bg, fg, info);
}
int i810fb_sync(struct fb_info *info)
{
struct i810fb_par *par = info->par;
if (!info->var.accel_flags || par->dev_flags & LOCKUP)
return 0;
return wait_for_engine_idle(info);
}
void i810fb_load_front(u32 offset, struct fb_info *info)
{
struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
if (!info->var.accel_flags || par->dev_flags & LOCKUP)
i810_writel(DPLYBASE, mmio, par->fb.physical + offset);
else
load_front(offset, info);
}
/**
* i810fb_init_ringbuffer - initialize the ringbuffer
* @par: pointer to i810fb_par structure
*
* DESCRIPTION:
* Initializes the ringbuffer by telling the device the
* size and location of the ringbuffer. It also sets
* the head and tail pointers = 0
*/
void i810fb_init_ringbuffer(struct fb_info *info)
{
struct i810fb_par *par = info->par;
u32 tmp1, tmp2;
u8 __iomem *mmio = par->mmio_start_virtual;
wait_for_engine_idle(info);
i810fb_iring_enable(par, OFF);
i810_writel(IRING, mmio, 0);
i810_writel(IRING + 4, mmio, 0);
par->cur_tail = 0;
tmp2 = i810_readl(IRING + 8, mmio) & ~RBUFFER_START_MASK;
tmp1 = par->iring.physical;
i810_writel(IRING + 8, mmio, tmp2 | tmp1);
tmp1 = i810_readl(IRING + 12, mmio);
tmp1 &= ~RBUFFER_SIZE_MASK;
tmp2 = (par->iring.size - I810_PAGESIZE) & RBUFFER_SIZE_MASK;
i810_writel(IRING + 12, mmio, tmp1 | tmp2);
i810fb_iring_enable(par, ON);
}
| gpl-2.0 |
sleshepic/l900_MK4_Kernel | lib/extable.c | 13819 | 2486 | /*
* Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c.
*
* Copyright (C) 2004 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sort.h>
#include <asm/uaccess.h>
#ifndef ARCH_HAS_SORT_EXTABLE
/*
* The exception table needs to be sorted so that the binary
* search that we use to find entries in it works properly.
* This is used both for the kernel exception table and for
* the exception tables of modules that get loaded.
*/
static int cmp_ex(const void *a, const void *b)
{
const struct exception_table_entry *x = a, *y = b;
/* avoid overflow */
if (x->insn > y->insn)
return 1;
if (x->insn < y->insn)
return -1;
return 0;
}
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish)
{
sort(start, finish - start, sizeof(struct exception_table_entry),
cmp_ex, NULL);
}
#ifdef CONFIG_MODULES
/*
* If the exception table is sorted, any referring to the module init
* will be at the beginning or the end.
*/
void trim_init_extable(struct module *m)
{
/*trim the beginning*/
while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
m->extable++;
m->num_exentries--;
}
/*trim the end*/
while (m->num_exentries &&
within_module_init(m->extable[m->num_exentries-1].insn, m))
m->num_exentries--;
}
#endif /* CONFIG_MODULES */
#endif /* !ARCH_HAS_SORT_EXTABLE */
#ifndef ARCH_HAS_SEARCH_EXTABLE
/*
* Search one exception table for an entry corresponding to the
* given instruction address, and return the address of the entry,
* or NULL if none is found.
* We use a binary search, and thus we assume that the table is
* already sorted.
*/
const struct exception_table_entry *
search_extable(const struct exception_table_entry *first,
const struct exception_table_entry *last,
unsigned long value)
{
while (first <= last) {
const struct exception_table_entry *mid;
mid = ((last - first) >> 1) + first;
/*
* careful, the distance between value and insn
* can be larger than MAX_LONG:
*/
if (mid->insn < value)
first = mid + 1;
else if (mid->insn > value)
last = mid - 1;
else
return mid;
}
return NULL;
}
#endif
| gpl-2.0 |
mikronac/android_kernel_htc_msm8960 | arch/powerpc/boot/cuboot-hotfoot.c | 13819 | 4113 | /*
* Old U-boot compatibility for Esteem 195E Hotfoot CPU Board
*
* Author: Solomon Peachy <solomon@linux-wlan.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "ops.h"
#include "stdio.h"
#include "reg.h"
#include "dcr.h"
#include "4xx.h"
#include "cuboot.h"
#define TARGET_4xx
#define TARGET_HOTFOOT
#include "ppcboot-hotfoot.h"
static bd_t bd;
#define NUM_REGS 3
static void hotfoot_fixups(void)
{
u32 uart = mfdcr(DCRN_CPC0_UCR) & 0x7f;
dt_fixup_memory(bd.bi_memstart, bd.bi_memsize);
dt_fixup_cpu_clocks(bd.bi_procfreq, bd.bi_procfreq, 0);
dt_fixup_clock("/plb", bd.bi_plb_busfreq);
dt_fixup_clock("/plb/opb", bd.bi_opbfreq);
dt_fixup_clock("/plb/ebc", bd.bi_pci_busfreq);
dt_fixup_clock("/plb/opb/serial@ef600300", bd.bi_procfreq / uart);
dt_fixup_clock("/plb/opb/serial@ef600400", bd.bi_procfreq / uart);
dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr);
dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr);
/* Is this a single eth/serial board? */
if ((bd.bi_enet1addr[0] == 0) &&
(bd.bi_enet1addr[1] == 0) &&
(bd.bi_enet1addr[2] == 0) &&
(bd.bi_enet1addr[3] == 0) &&
(bd.bi_enet1addr[4] == 0) &&
(bd.bi_enet1addr[5] == 0)) {
void *devp;
printf("Trimming devtree for single serial/eth board\n");
devp = finddevice("/plb/opb/serial@ef600300");
if (!devp)
fatal("Can't find node for /plb/opb/serial@ef600300");
del_node(devp);
devp = finddevice("/plb/opb/ethernet@ef600900");
if (!devp)
fatal("Can't find node for /plb/opb/ethernet@ef600900");
del_node(devp);
}
ibm4xx_quiesce_eth((u32 *)0xef600800, (u32 *)0xef600900);
/* Fix up flash size in fdt for 4M boards. */
if (bd.bi_flashsize < 0x800000) {
u32 regs[NUM_REGS];
void *devp = finddevice("/plb/ebc/nor_flash@0");
if (!devp)
fatal("Can't find FDT node for nor_flash!??");
printf("Fixing devtree for 4M Flash\n");
/* First fix up the base addresse */
getprop(devp, "reg", regs, sizeof(regs));
regs[0] = 0;
regs[1] = 0xffc00000;
regs[2] = 0x00400000;
setprop(devp, "reg", regs, sizeof(regs));
/* Then the offsets */
devp = finddevice("/plb/ebc/nor_flash@0/partition@0");
if (!devp)
fatal("Can't find FDT node for partition@0");
getprop(devp, "reg", regs, 2*sizeof(u32));
regs[0] -= 0x400000;
setprop(devp, "reg", regs, 2*sizeof(u32));
devp = finddevice("/plb/ebc/nor_flash@0/partition@1");
if (!devp)
fatal("Can't find FDT node for partition@1");
getprop(devp, "reg", regs, 2*sizeof(u32));
regs[0] -= 0x400000;
setprop(devp, "reg", regs, 2*sizeof(u32));
devp = finddevice("/plb/ebc/nor_flash@0/partition@2");
if (!devp)
fatal("Can't find FDT node for partition@2");
getprop(devp, "reg", regs, 2*sizeof(u32));
regs[0] -= 0x400000;
setprop(devp, "reg", regs, 2*sizeof(u32));
devp = finddevice("/plb/ebc/nor_flash@0/partition@3");
if (!devp)
fatal("Can't find FDT node for partition@3");
getprop(devp, "reg", regs, 2*sizeof(u32));
regs[0] -= 0x400000;
setprop(devp, "reg", regs, 2*sizeof(u32));
devp = finddevice("/plb/ebc/nor_flash@0/partition@4");
if (!devp)
fatal("Can't find FDT node for partition@4");
getprop(devp, "reg", regs, 2*sizeof(u32));
regs[0] -= 0x400000;
setprop(devp, "reg", regs, 2*sizeof(u32));
devp = finddevice("/plb/ebc/nor_flash@0/partition@6");
if (!devp)
fatal("Can't find FDT node for partition@6");
getprop(devp, "reg", regs, 2*sizeof(u32));
regs[0] -= 0x400000;
setprop(devp, "reg", regs, 2*sizeof(u32));
/* Delete the FeatFS node */
devp = finddevice("/plb/ebc/nor_flash@0/partition@5");
if (!devp)
fatal("Can't find FDT node for partition@5");
del_node(devp);
}
}
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
CUBOOT_INIT();
platform_ops.fixups = hotfoot_fixups;
platform_ops.exit = ibm40x_dbcr_reset;
fdt_init(_dtb_start);
serial_console_init();
}
| gpl-2.0 |
Lukas1212/htc7x30-3.0 | drivers/misc/pmic8058-pwm.c | 252 | 26954 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/*
* Qualcomm PMIC8058 PWM driver
*
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/pwm.h>
#include <linux/mfd/pm8xxx/core.h>
#include <linux/pmic8058-pwm.h>
#define PM8058_LPG_BANKS 8
#define PM8058_PWM_CHANNELS PM8058_LPG_BANKS /* MAX=8 */
#define PM8058_LPG_CTL_REGS 7
/* PMIC8058 LPG/PWM */
#define SSBI_REG_ADDR_LPG_CTL_BASE 0x13C
#define SSBI_REG_ADDR_LPG_CTL(n) (SSBI_REG_ADDR_LPG_CTL_BASE + (n))
#define SSBI_REG_ADDR_LPG_BANK_SEL 0x143
#define SSBI_REG_ADDR_LPG_BANK_EN 0x144
#define SSBI_REG_ADDR_LPG_LUT_CFG0 0x145
#define SSBI_REG_ADDR_LPG_LUT_CFG1 0x146
#define SSBI_REG_ADDR_LPG_TEST 0x147
/* Control 0 */
#define PM8058_PWM_1KHZ_COUNT_MASK 0xF0
#define PM8058_PWM_1KHZ_COUNT_SHIFT 4
#define PM8058_PWM_1KHZ_COUNT_MAX 15
#define PM8058_PWM_OUTPUT_EN 0x08
#define PM8058_PWM_PWM_EN 0x04
#define PM8058_PWM_RAMP_GEN_EN 0x02
#define PM8058_PWM_RAMP_START 0x01
#define PM8058_PWM_PWM_START (PM8058_PWM_OUTPUT_EN \
| PM8058_PWM_PWM_EN)
#define PM8058_PWM_RAMP_GEN_START (PM8058_PWM_RAMP_GEN_EN \
| PM8058_PWM_RAMP_START)
/* Control 1 */
#define PM8058_PWM_REVERSE_EN 0x80
#define PM8058_PWM_BYPASS_LUT 0x40
#define PM8058_PWM_HIGH_INDEX_MASK 0x3F
/* Control 2 */
#define PM8058_PWM_LOOP_EN 0x80
#define PM8058_PWM_RAMP_UP 0x40
#define PM8058_PWM_LOW_INDEX_MASK 0x3F
/* Control 3 */
#define PM8058_PWM_VALUE_BIT7_0 0xFF
#define PM8058_PWM_VALUE_BIT5_0 0x3F
/* Control 4 */
#define PM8058_PWM_VALUE_BIT8 0x80
#define PM8058_PWM_CLK_SEL_MASK 0x60
#define PM8058_PWM_CLK_SEL_SHIFT 5
#define PM8058_PWM_CLK_SEL_NO 0
#define PM8058_PWM_CLK_SEL_1KHZ 1
#define PM8058_PWM_CLK_SEL_32KHZ 2
#define PM8058_PWM_CLK_SEL_19P2MHZ 3
#define PM8058_PWM_PREDIVIDE_MASK 0x18
#define PM8058_PWM_PREDIVIDE_SHIFT 3
#define PM8058_PWM_PREDIVIDE_2 0
#define PM8058_PWM_PREDIVIDE_3 1
#define PM8058_PWM_PREDIVIDE_5 2
#define PM8058_PWM_PREDIVIDE_6 3
#define PM8058_PWM_M_MASK 0x07
#define PM8058_PWM_M_MIN 0
#define PM8058_PWM_M_MAX 7
/* Control 5 */
#define PM8058_PWM_PAUSE_COUNT_HI_MASK 0xFC
#define PM8058_PWM_PAUSE_COUNT_HI_SHIFT 2
#define PM8058_PWM_PAUSE_ENABLE_HIGH 0x02
#define PM8058_PWM_SIZE_9_BIT 0x01
/* Control 6 */
#define PM8058_PWM_PAUSE_COUNT_LO_MASK 0xFC
#define PM8058_PWM_PAUSE_COUNT_LO_SHIFT 2
#define PM8058_PWM_PAUSE_ENABLE_LOW 0x02
#define PM8058_PWM_RESERVED 0x01
#define PM8058_PWM_PAUSE_COUNT_MAX 56 /* < 2^6 = 64*/
/* LUT_CFG1 */
#define PM8058_PWM_LUT_READ 0x40
/* TEST */
#define PM8058_PWM_DTEST_MASK 0x38
#define PM8058_PWM_DTEST_SHIFT 3
#define PM8058_PWM_DTEST_BANK_MASK 0x07
/* PWM frequency support
*
* PWM Frequency = Clock Frequency / (N * T)
* or
* PWM Period = Clock Period * (N * T)
* where
* N = 2^9 or 2^6 for 9-bit or 6-bit PWM size
* T = Pre-divide * 2^m, m = 0..7 (exponent)
*
* We use this formula to figure out m for the best pre-divide and clock:
* (PWM Period / N) / 2^m = (Pre-divide * Clock Period)
*/
#define NUM_CLOCKS 3
#define NSEC_1000HZ (NSEC_PER_SEC / 1000)
#define NSEC_32768HZ (NSEC_PER_SEC / 32768)
#define NSEC_19P2MHZ (NSEC_PER_SEC / 19200000)
#define CLK_PERIOD_MIN NSEC_19P2MHZ
#define CLK_PERIOD_MAX NSEC_1000HZ
#define NUM_PRE_DIVIDE 3 /* No default support for pre-divide = 6 */
#define PRE_DIVIDE_0 2
#define PRE_DIVIDE_1 3
#define PRE_DIVIDE_2 5
#define PRE_DIVIDE_MIN PRE_DIVIDE_0
#define PRE_DIVIDE_MAX PRE_DIVIDE_2
static char *clks[NUM_CLOCKS] = {
"1K", "32768", "19.2M"
};
static unsigned pre_div[NUM_PRE_DIVIDE] = {
PRE_DIVIDE_0, PRE_DIVIDE_1, PRE_DIVIDE_2
};
static unsigned int pt_t[NUM_PRE_DIVIDE][NUM_CLOCKS] = {
{ PRE_DIVIDE_0 * NSEC_1000HZ,
PRE_DIVIDE_0 * NSEC_32768HZ,
PRE_DIVIDE_0 * NSEC_19P2MHZ,
},
{ PRE_DIVIDE_1 * NSEC_1000HZ,
PRE_DIVIDE_1 * NSEC_32768HZ,
PRE_DIVIDE_1 * NSEC_19P2MHZ,
},
{ PRE_DIVIDE_2 * NSEC_1000HZ,
PRE_DIVIDE_2 * NSEC_32768HZ,
PRE_DIVIDE_2 * NSEC_19P2MHZ,
},
};
#define MIN_MPT ((PRE_DIVIDE_MIN * CLK_PERIOD_MIN) << PM8058_PWM_M_MIN)
#define MAX_MPT ((PRE_DIVIDE_MAX * CLK_PERIOD_MAX) << PM8058_PWM_M_MAX)
#define CHAN_LUT_SIZE (PM_PWM_LUT_SIZE / PM8058_PWM_CHANNELS)
/* Private data */
struct pm8058_pwm_chip;
struct pwm_device {
struct device *dev;
int pwm_id; /* = bank/channel id */
int in_use;
const char *label;
struct pm8058_pwm_period period;
int pwm_value;
int pwm_period;
int use_lut; /* Use LUT to output PWM */
u8 pwm_ctl[PM8058_LPG_CTL_REGS];
int irq;
struct pm8058_pwm_chip *chip;
};
struct pm8058_pwm_chip {
struct pwm_device pwm_dev[PM8058_PWM_CHANNELS];
u8 bank_mask;
struct mutex pwm_mutex;
struct pm8058_pwm_pdata *pdata;
};
static struct pm8058_pwm_chip *pwm_chip;
struct pm8058_pwm_lut {
/* LUT parameters */
int lut_duty_ms;
int lut_lo_index;
int lut_hi_index;
int lut_pause_hi;
int lut_pause_lo;
int flags;
};
static u16 duty_msec[PM8058_PWM_1KHZ_COUNT_MAX + 1] = {
0, 1, 2, 3, 4, 6, 8, 16, 18, 24, 32, 36, 64, 128, 256, 512
};
static u16 pause_count[PM8058_PWM_PAUSE_COUNT_MAX + 1] = {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
23, 28, 31, 42, 47, 56, 63, 83, 94, 111, 125, 167, 188, 222, 250, 333,
375, 500, 667, 750, 800, 900, 1000, 1100,
1200, 1300, 1400, 1500, 1600, 1800, 2000, 2500,
3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500,
7000
};
/* Internal functions */
static void pm8058_pwm_save(u8 *u8p, u8 mask, u8 val)
{
*u8p &= ~mask;
*u8p |= val & mask;
}
static int pm8058_pwm_bank_enable(struct pwm_device *pwm, int enable)
{
int rc;
u8 reg;
struct pm8058_pwm_chip *chip;
chip = pwm->chip;
if (enable)
reg = chip->bank_mask | (1 << pwm->pwm_id);
else
reg = chip->bank_mask & ~(1 << pwm->pwm_id);
rc = pm8xxx_writeb(pwm->dev->parent,
SSBI_REG_ADDR_LPG_BANK_EN, reg);
if (rc) {
pr_err("pm8xxx_write(): rc=%d (Enable LPG Bank)\n", rc);
goto bail_out;
}
chip->bank_mask = reg;
bail_out:
return rc;
}
static int pm8058_pwm_bank_sel(struct pwm_device *pwm)
{
int rc;
u8 reg;
reg = pwm->pwm_id;
rc = pm8xxx_writeb(pwm->dev->parent,
SSBI_REG_ADDR_LPG_BANK_SEL, reg);
if (rc)
pr_err("pm8xxx_write(): rc=%d (Select PWM Bank)\n", rc);
return rc;
}
static int pm8058_pwm_start(struct pwm_device *pwm, int start, int ramp_start)
{
int rc;
u8 reg;
if (start) {
reg = pwm->pwm_ctl[0] | PM8058_PWM_PWM_START;
if (ramp_start)
reg |= PM8058_PWM_RAMP_GEN_START;
else
reg &= ~PM8058_PWM_RAMP_GEN_START;
} else {
reg = pwm->pwm_ctl[0] & ~PM8058_PWM_PWM_START;
reg &= ~PM8058_PWM_RAMP_GEN_START;
}
rc = pm8xxx_writeb(pwm->dev->parent, SSBI_REG_ADDR_LPG_CTL(0),
reg);
if (rc)
pr_err("pm8xxx_write(): rc=%d (Enable PWM Ctl 0)\n", rc);
else
pwm->pwm_ctl[0] = reg;
return rc;
}
static void pm8058_pwm_calc_period(unsigned int period_us,
struct pm8058_pwm_period *period)
{
int n, m, clk, div;
int best_m, best_div, best_clk;
int last_err, cur_err, better_err, better_m;
unsigned int tmp_p, last_p, min_err, period_n;
/* PWM Period / N : handle underflow or overflow */
if (period_us < (PM_PWM_PERIOD_MAX / NSEC_PER_USEC))
period_n = (period_us * NSEC_PER_USEC) >> 6;
else
period_n = (period_us >> 6) * NSEC_PER_USEC;
if (period_n >= MAX_MPT) {
n = 9;
period_n >>= 3;
} else
n = 6;
min_err = MAX_MPT;
best_m = 0;
best_clk = 0;
best_div = 0;
for (clk = 0; clk < NUM_CLOCKS; clk++) {
for (div = 0; div < NUM_PRE_DIVIDE; div++) {
tmp_p = period_n;
last_p = tmp_p;
for (m = 0; m <= PM8058_PWM_M_MAX; m++) {
if (tmp_p <= pt_t[div][clk]) {
/* Found local best */
if (!m) {
better_err = pt_t[div][clk] -
tmp_p;
better_m = m;
} else {
last_err = last_p -
pt_t[div][clk];
cur_err = pt_t[div][clk] -
tmp_p;
if (cur_err < last_err) {
better_err = cur_err;
better_m = m;
} else {
better_err = last_err;
better_m = m - 1;
}
}
if (better_err < min_err) {
min_err = better_err;
best_m = better_m;
best_clk = clk;
best_div = div;
}
break;
} else {
last_p = tmp_p;
tmp_p >>= 1;
}
}
}
}
/* Use higher resolution */
if (best_m >= 3 && n == 6) {
n += 3;
best_m -= 3;
}
period->pwm_size = n;
period->clk = best_clk;
period->pre_div = best_div;
period->pre_div_exp = best_m;
pr_debug("period=%u: n=%d, m=%d, clk[%d]=%s, div[%d]=%d\n",
(unsigned)period_us, n, best_m,
best_clk, clks[best_clk], best_div, pre_div[best_div]);
}
static void pm8058_pwm_calc_pwm_value(struct pwm_device *pwm,
unsigned int period_us,
unsigned int duty_us)
{
unsigned int max_pwm_value, tmp;
/* Figure out pwm_value with overflow handling */
tmp = 1 << (sizeof(tmp) * 8 - pwm->period.pwm_size);
if (duty_us < tmp) {
tmp = duty_us << pwm->period.pwm_size;
pwm->pwm_value = tmp / period_us;
} else {
tmp = period_us >> pwm->period.pwm_size;
pwm->pwm_value = duty_us / tmp;
}
max_pwm_value = (1 << pwm->period.pwm_size) - 1;
if (pwm->pwm_value > max_pwm_value)
pwm->pwm_value = max_pwm_value;
}
static int pm8058_pwm_change_table(struct pwm_device *pwm, int duty_pct[],
int start_idx, int len, int raw_value)
{
unsigned int pwm_value, max_pwm_value;
u8 cfg0, cfg1;
int i, pwm_size;
int rc = 0;
pwm_size = (pwm->pwm_ctl[5] & PM8058_PWM_SIZE_9_BIT) ? 9 : 6;
max_pwm_value = (1 << pwm_size) - 1;
for (i = 0; i < len; i++) {
if (raw_value)
pwm_value = duty_pct[i];
else
pwm_value = (duty_pct[i] << pwm_size) / 100;
if (pwm_value > max_pwm_value)
pwm_value = max_pwm_value;
cfg0 = pwm_value;
cfg1 = (pwm_value >> 1) & 0x80;
cfg1 |= start_idx + i;
rc = pm8xxx_writeb(pwm->dev->parent,
SSBI_REG_ADDR_LPG_LUT_CFG0, cfg0);
if (rc)
break;
rc = pm8xxx_writeb(pwm->dev->parent,
SSBI_REG_ADDR_LPG_LUT_CFG1, cfg1);
if (rc)
break;
}
return rc;
}
static void pm8058_pwm_save_index(struct pwm_device *pwm,
int low_idx, int high_idx, int flags)
{
pwm->pwm_ctl[1] = high_idx & PM8058_PWM_HIGH_INDEX_MASK;
pwm->pwm_ctl[2] = low_idx & PM8058_PWM_LOW_INDEX_MASK;
if (flags & PM_PWM_LUT_REVERSE)
pwm->pwm_ctl[1] |= PM8058_PWM_REVERSE_EN;
if (flags & PM_PWM_LUT_RAMP_UP)
pwm->pwm_ctl[2] |= PM8058_PWM_RAMP_UP;
if (flags & PM_PWM_LUT_LOOP)
pwm->pwm_ctl[2] |= PM8058_PWM_LOOP_EN;
}
static void pm8058_pwm_save_period(struct pwm_device *pwm)
{
u8 mask, val;
val = ((pwm->period.clk + 1) << PM8058_PWM_CLK_SEL_SHIFT)
& PM8058_PWM_CLK_SEL_MASK;
val |= (pwm->period.pre_div << PM8058_PWM_PREDIVIDE_SHIFT)
& PM8058_PWM_PREDIVIDE_MASK;
val |= pwm->period.pre_div_exp & PM8058_PWM_M_MASK;
mask = PM8058_PWM_CLK_SEL_MASK | PM8058_PWM_PREDIVIDE_MASK |
PM8058_PWM_M_MASK;
pm8058_pwm_save(&pwm->pwm_ctl[4], mask, val);
val = (pwm->period.pwm_size > 6) ? PM8058_PWM_SIZE_9_BIT : 0;
mask = PM8058_PWM_SIZE_9_BIT;
pm8058_pwm_save(&pwm->pwm_ctl[5], mask, val);
}
static void pm8058_pwm_save_pwm_value(struct pwm_device *pwm)
{
u8 mask, val;
pwm->pwm_ctl[3] = pwm->pwm_value;
val = (pwm->period.pwm_size > 6) ? (pwm->pwm_value >> 1) : 0;
mask = PM8058_PWM_VALUE_BIT8;
pm8058_pwm_save(&pwm->pwm_ctl[4], mask, val);
}
static void pm8058_pwm_save_duty_time(struct pwm_device *pwm,
struct pm8058_pwm_lut *lut)
{
int i;
u8 mask, val;
/* Linear search for duty time */
for (i = 0; i < PM8058_PWM_1KHZ_COUNT_MAX; i++) {
if (duty_msec[i] >= lut->lut_duty_ms)
break;
}
val = i << PM8058_PWM_1KHZ_COUNT_SHIFT;
mask = PM8058_PWM_1KHZ_COUNT_MASK;
pm8058_pwm_save(&pwm->pwm_ctl[0], mask, val);
}
static void pm8058_pwm_save_pause(struct pwm_device *pwm,
struct pm8058_pwm_lut *lut)
{
int i, pause_cnt, time_cnt;
u8 mask, val;
time_cnt = (pwm->pwm_ctl[0] & PM8058_PWM_1KHZ_COUNT_MASK)
>> PM8058_PWM_1KHZ_COUNT_SHIFT;
if (lut->flags & PM_PWM_LUT_PAUSE_HI_EN) {
pause_cnt = (lut->lut_pause_hi + duty_msec[time_cnt] / 2)
/ duty_msec[time_cnt];
/* Linear search for pause time */
for (i = 0; i < PM8058_PWM_PAUSE_COUNT_MAX; i++) {
if (pause_count[i] >= pause_cnt)
break;
}
val = (i << PM8058_PWM_PAUSE_COUNT_HI_SHIFT) &
PM8058_PWM_PAUSE_COUNT_HI_MASK;
val |= PM8058_PWM_PAUSE_ENABLE_HIGH;
} else
val = 0;
mask = PM8058_PWM_PAUSE_COUNT_HI_MASK | PM8058_PWM_PAUSE_ENABLE_HIGH;
pm8058_pwm_save(&pwm->pwm_ctl[5], mask, val);
if (lut->flags & PM_PWM_LUT_PAUSE_LO_EN) {
/* Linear search for pause time */
pause_cnt = (lut->lut_pause_lo + duty_msec[time_cnt] / 2)
/ duty_msec[time_cnt];
for (i = 0; i < PM8058_PWM_PAUSE_COUNT_MAX; i++) {
if (pause_count[i] >= pause_cnt)
break;
}
val = (i << PM8058_PWM_PAUSE_COUNT_LO_SHIFT) &
PM8058_PWM_PAUSE_COUNT_LO_MASK;
val |= PM8058_PWM_PAUSE_ENABLE_LOW;
} else
val = 0;
mask = PM8058_PWM_PAUSE_COUNT_LO_MASK | PM8058_PWM_PAUSE_ENABLE_LOW;
pm8058_pwm_save(&pwm->pwm_ctl[6], mask, val);
}
static int pm8058_pwm_write(struct pwm_device *pwm, int start, int end)
{
int i, rc;
/* Write in reverse way so 0 would be the last */
for (i = end - 1; i >= start; i--) {
rc = pm8xxx_writeb(pwm->dev->parent,
SSBI_REG_ADDR_LPG_CTL(i),
pwm->pwm_ctl[i]);
if (rc) {
pr_err("pm8xxx_write(): rc=%d (PWM Ctl[%d])\n", rc, i);
return rc;
}
}
return 0;
}
static int pm8058_pwm_change_lut(struct pwm_device *pwm,
struct pm8058_pwm_lut *lut)
{
int rc;
pm8058_pwm_save_index(pwm, lut->lut_lo_index,
lut->lut_hi_index, lut->flags);
pm8058_pwm_save_duty_time(pwm, lut);
pm8058_pwm_save_pause(pwm, lut);
pm8058_pwm_save(&pwm->pwm_ctl[1], PM8058_PWM_BYPASS_LUT, 0);
pm8058_pwm_bank_sel(pwm);
rc = pm8058_pwm_write(pwm, 0, 7);
return rc;
}
/* APIs */
/*
* pwm_request - request a PWM device
*/
struct pwm_device *pwm_request(int pwm_id, const char *label)
{
struct pwm_device *pwm;
if (pwm_id > PM8058_PWM_CHANNELS || pwm_id < 0)
return ERR_PTR(-EINVAL);
if (pwm_chip == NULL)
return ERR_PTR(-ENODEV);
mutex_lock(&pwm_chip->pwm_mutex);
pwm = &pwm_chip->pwm_dev[pwm_id];
if (!pwm->in_use) {
pwm->in_use = 1;
pwm->label = label;
/* Do not to enable LED when request PWM
pwm->use_lut = 0;
if (pwm_chip->pdata && pwm_chip->pdata->config)
pwm_chip->pdata->config(pwm, pwm_id, 1);
*/
} else
pwm = ERR_PTR(-EBUSY);
mutex_unlock(&pwm_chip->pwm_mutex);
return pwm;
}
EXPORT_SYMBOL(pwm_request);
/*
* pwm_free - free a PWM device
*/
void pwm_free(struct pwm_device *pwm)
{
if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL)
return;
mutex_lock(&pwm->chip->pwm_mutex);
if (pwm->in_use) {
pm8058_pwm_bank_sel(pwm);
pm8058_pwm_start(pwm, 0, 0);
if (pwm->chip->pdata && pwm->chip->pdata->config)
pwm->chip->pdata->config(pwm, pwm->pwm_id, 0);
pwm->in_use = 0;
pwm->label = NULL;
}
pm8058_pwm_bank_enable(pwm, 0);
mutex_unlock(&pwm->chip->pwm_mutex);
}
EXPORT_SYMBOL(pwm_free);
/*
* pwm_config - change a PWM device configuration
*
* @pwm: the PWM device
* @period_us: period in micro second
* @duty_us: duty cycle in micro second
*/
int pwm_config(struct pwm_device *pwm, int duty_us, int period_us)
{
int rc;
if (pwm == NULL || IS_ERR(pwm) ||
duty_us > period_us ||
(unsigned)period_us > PM_PWM_PERIOD_MAX ||
(unsigned)period_us < PM_PWM_PERIOD_MIN)
return -EINVAL;
if (pwm->chip == NULL)
return -ENODEV;
mutex_lock(&pwm->chip->pwm_mutex);
if (!pwm->in_use) {
rc = -EINVAL;
goto out_unlock;
}
if (pwm->pwm_period != period_us) {
pm8058_pwm_calc_period(period_us, &pwm->period);
pm8058_pwm_save_period(pwm);
pwm->pwm_period = period_us;
}
pm8058_pwm_calc_pwm_value(pwm, period_us, duty_us);
pm8058_pwm_save_pwm_value(pwm);
pm8058_pwm_save(&pwm->pwm_ctl[1],
PM8058_PWM_BYPASS_LUT, PM8058_PWM_BYPASS_LUT);
pm8058_pwm_bank_sel(pwm);
rc = pm8058_pwm_write(pwm, 1, 6);
pr_debug("duty/period=%u/%u usec: pwm_value=%d (of %d)\n",
(unsigned)duty_us, (unsigned)period_us,
pwm->pwm_value, 1 << pwm->period.pwm_size);
out_unlock:
mutex_unlock(&pwm->chip->pwm_mutex);
return rc;
}
EXPORT_SYMBOL(pwm_config);
/*
* pwm_enable - start a PWM output toggling
*/
int pwm_enable(struct pwm_device *pwm)
{
int rc;
if (pwm == NULL || IS_ERR(pwm))
return -EINVAL;
if (pwm->chip == NULL)
return -ENODEV;
mutex_lock(&pwm->chip->pwm_mutex);
if (!pwm->in_use)
rc = -EINVAL;
else {
if (pwm->chip->pdata && pwm->chip->pdata->enable)
pwm->chip->pdata->enable(pwm, pwm->pwm_id, 1);
rc = pm8058_pwm_bank_enable(pwm, 1);
pm8058_pwm_bank_sel(pwm);
pm8058_pwm_start(pwm, 1, 0);
}
mutex_unlock(&pwm->chip->pwm_mutex);
return rc;
}
EXPORT_SYMBOL(pwm_enable);
/*
* pwm_disable - stop a PWM output toggling
*/
void pwm_disable(struct pwm_device *pwm)
{
if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL)
return;
mutex_lock(&pwm->chip->pwm_mutex);
if (pwm->in_use) {
pm8058_pwm_bank_sel(pwm);
pm8058_pwm_start(pwm, 0, 0);
pm8058_pwm_bank_enable(pwm, 0);
if (pwm->chip->pdata && pwm->chip->pdata->enable)
pwm->chip->pdata->enable(pwm, pwm->pwm_id, 0);
}
mutex_unlock(&pwm->chip->pwm_mutex);
}
EXPORT_SYMBOL(pwm_disable);
/**
* pm8058_pwm_config_period - change PWM period
*
* @pwm: the PWM device
* @pwm_p: period in struct pm8058_pwm_period
*/
int pm8058_pwm_config_period(struct pwm_device *pwm,
struct pm8058_pwm_period *period)
{
int rc;
if (pwm == NULL || IS_ERR(pwm) || period == NULL)
return -EINVAL;
if (pwm->chip == NULL)
return -ENODEV;
mutex_lock(&pwm->chip->pwm_mutex);
if (!pwm->in_use) {
rc = -EINVAL;
goto out_unlock;
}
pwm->period.pwm_size = period->pwm_size;
pwm->period.clk = period->clk;
pwm->period.pre_div = period->pre_div;
pwm->period.pre_div_exp = period->pre_div_exp;
pm8058_pwm_save_period(pwm);
pm8058_pwm_bank_sel(pwm);
rc = pm8058_pwm_write(pwm, 4, 6);
out_unlock:
mutex_unlock(&pwm->chip->pwm_mutex);
return rc;
}
EXPORT_SYMBOL(pm8058_pwm_config_period);
/**
* pm8058_pwm_config_duty_cycle - change PWM duty cycle
*
* @pwm: the PWM device
* @pwm_value: the duty cycle in raw PWM value (< 2^pwm_size)
*/
int pm8058_pwm_config_duty_cycle(struct pwm_device *pwm, int pwm_value)
{
struct pm8058_pwm_lut lut;
int flags, start_idx;
int rc = 0;
if (pwm == NULL || IS_ERR(pwm))
return -EINVAL;
if (pwm->chip == NULL)
return -ENODEV;
mutex_lock(&pwm->chip->pwm_mutex);
if (!pwm->in_use || !pwm->pwm_period) {
rc = -EINVAL;
goto out_unlock;
}
if (pwm->pwm_value == pwm_value)
goto out_unlock;
pwm->pwm_value = pwm_value;
flags = PM_PWM_LUT_RAMP_UP;
start_idx = pwm->pwm_id * CHAN_LUT_SIZE;
pm8058_pwm_change_table(pwm, &pwm_value, start_idx, 1, 1);
if (!pwm->use_lut) {
pwm->use_lut = 1;
lut.lut_duty_ms = 1;
lut.lut_lo_index = start_idx;
lut.lut_hi_index = start_idx;
lut.lut_pause_lo = 0;
lut.lut_pause_hi = 0;
lut.flags = flags;
rc = pm8058_pwm_change_lut(pwm, &lut);
} else {
pm8058_pwm_save_index(pwm, start_idx, start_idx, flags);
pm8058_pwm_save(&pwm->pwm_ctl[1], PM8058_PWM_BYPASS_LUT, 0);
pm8058_pwm_bank_sel(pwm);
rc = pm8058_pwm_write(pwm, 0, 3);
}
if (rc)
pr_err("[%d]: pm8058_pwm_write: rc=%d\n", pwm->pwm_id, rc);
out_unlock:
mutex_unlock(&pwm->chip->pwm_mutex);
return rc;
}
EXPORT_SYMBOL(pm8058_pwm_config_duty_cycle);
/*
* pwm_configure - change a PWM device configuration
*/
int pwm_configure(struct pwm_device *pwm, struct pm8058_pwm_period *pwm_conf, int bypass_lut, int pwm_value)
{
int rc;
if (pwm == NULL || IS_ERR(pwm) || pwm_conf == NULL)
return -EINVAL;
if (pwm->chip == NULL)
return -ENODEV;
if (!pwm->in_use)
rc = -EINVAL;
else {
pm8058_pwm_config_period(pwm, pwm_conf);
{
mutex_lock(&pwm->chip->pwm_mutex);
pwm->pwm_ctl[0] = 0;
pwm->pwm_ctl[1] = PM8058_PWM_BYPASS_LUT;
pwm->pwm_ctl[2] = 0;
if (pwm_conf->pwm_size > 6) {
pwm->pwm_ctl[3] = pwm_value
& PM8058_PWM_VALUE_BIT7_0;
pm8058_pwm_save(&pwm->pwm_ctl[4], PM8058_PWM_VALUE_BIT8, pwm_value >> 1);
} else {
pwm->pwm_ctl[3] = pwm_value
& PM8058_PWM_VALUE_BIT5_0;
}
pm8058_pwm_bank_sel(pwm);
rc = pm8058_pwm_write(pwm, 0, (pwm_conf->pwm_size > 6)? 4 : 3);
mutex_unlock(&pwm->chip->pwm_mutex);
}
}
return rc;
}
EXPORT_SYMBOL(pwm_configure);
/**
* pm8058_pwm_lut_config - change a PWM device configuration to use LUT
*
* @pwm: the PWM device
* @period_us: period in micro second
* @duty_pct: arrary of duty cycles in percent, like 20, 50.
* @duty_time_ms: time for each duty cycle in millisecond
* @start_idx: start index in lookup table from 0 to MAX-1
* @idx_len: number of index
* @pause_lo: pause time in millisecond at low index
* @pause_hi: pause time in millisecond at high index
* @flags: control flags
*/
int pm8058_pwm_lut_config(struct pwm_device *pwm, int period_us,
int duty_pct[], int duty_time_ms, int start_idx,
int idx_len, int pause_lo, int pause_hi, int flags)
{
struct pm8058_pwm_lut lut;
int len;
int rc;
if (pwm == NULL || IS_ERR(pwm) || !idx_len)
return -EINVAL;
if (duty_pct == NULL && !(flags & PM_PWM_LUT_NO_TABLE))
return -EINVAL;
if (pwm->chip == NULL)
return -ENODEV;
if (idx_len >= PM_PWM_LUT_SIZE && start_idx)
return -EINVAL;
if ((start_idx + idx_len) > PM_PWM_LUT_SIZE)
return -EINVAL;
if ((unsigned)period_us > PM_PWM_PERIOD_MAX ||
(unsigned)period_us < PM_PWM_PERIOD_MIN)
return -EINVAL;
mutex_lock(&pwm->chip->pwm_mutex);
if (!pwm->in_use) {
rc = -EINVAL;
goto out_unlock;
}
if (pwm->pwm_period != period_us) {
pm8058_pwm_calc_period(period_us, &pwm->period);
pm8058_pwm_save_period(pwm);
pwm->pwm_period = period_us;
}
len = (idx_len > PM_PWM_LUT_SIZE) ? PM_PWM_LUT_SIZE : idx_len;
if (flags & PM_PWM_LUT_NO_TABLE)
goto after_table_write;
rc = pm8058_pwm_change_table(pwm, duty_pct, start_idx, len, 0);
if (rc) {
pr_err("pm8058_pwm_change_table: rc=%d\n", rc);
goto out_unlock;
}
after_table_write:
lut.lut_duty_ms = duty_time_ms;
lut.lut_lo_index = start_idx;
lut.lut_hi_index = start_idx + len - 1;
lut.lut_pause_lo = pause_lo;
lut.lut_pause_hi = pause_hi;
lut.flags = flags;
rc = pm8058_pwm_change_lut(pwm, &lut);
out_unlock:
mutex_unlock(&pwm->chip->pwm_mutex);
return rc;
}
EXPORT_SYMBOL(pm8058_pwm_lut_config);
/**
* pm8058_pwm_lut_enable - control a PWM device to start/stop LUT ramp
*
* @pwm: the PWM device
* @start: to start (1), or stop (0)
*/
int pm8058_pwm_lut_enable(struct pwm_device *pwm, int start)
{
if (pwm == NULL || IS_ERR(pwm))
return -EINVAL;
if (pwm->chip == NULL)
return -ENODEV;
mutex_lock(&pwm->chip->pwm_mutex);
if (start) {
pm8058_pwm_bank_enable(pwm, 1);
pm8058_pwm_bank_sel(pwm);
pm8058_pwm_start(pwm, 1, 1);
} else {
pm8058_pwm_bank_sel(pwm);
pm8058_pwm_start(pwm, 0, 0);
pm8058_pwm_bank_enable(pwm, 0);
}
mutex_unlock(&pwm->chip->pwm_mutex);
return 0;
}
EXPORT_SYMBOL(pm8058_pwm_lut_enable);
#define SSBI_REG_ADDR_LED_BASE 0x131
#define SSBI_REG_ADDR_LED(n) (SSBI_REG_ADDR_LED_BASE + (n))
#define SSBI_REG_ADDR_FLASH_BASE 0x48
#define SSBI_REG_ADDR_FLASH_DRV_1 0xFB
#define SSBI_REG_ADDR_FLASH(n) (((n) < 2 ? \
SSBI_REG_ADDR_FLASH_BASE + (n) : \
SSBI_REG_ADDR_FLASH_DRV_1))
#define PM8058_LED_CURRENT_SHIFT 3
#define PM8058_LED_MODE_MASK 0x07
#define PM8058_FLASH_CURRENT_SHIFT 4
#define PM8058_FLASH_MODE_MASK 0x03
#define PM8058_FLASH_MODE_NONE 0
#define PM8058_FLASH_MODE_DTEST1 1
#define PM8058_FLASH_MODE_DTEST2 2
#define PM8058_FLASH_MODE_PWM 3
int pm8058_pwm_config_led(struct pwm_device *pwm, int id,
int mode, int max_current)
{
int rc;
u8 conf;
switch (id) {
case PM_PWM_LED_0:
case PM_PWM_LED_1:
case PM_PWM_LED_2:
conf = mode & PM8058_LED_MODE_MASK;
conf |= (max_current / 2) << PM8058_LED_CURRENT_SHIFT;
rc = pm8xxx_writeb(pwm->dev->parent,
SSBI_REG_ADDR_LED(id), conf);
break;
case PM_PWM_LED_KPD:
case PM_PWM_LED_FLASH:
case PM_PWM_LED_FLASH1:
switch (mode) {
case PM_PWM_CONF_PWM1:
case PM_PWM_CONF_PWM2:
case PM_PWM_CONF_PWM3:
conf = PM8058_FLASH_MODE_PWM;
break;
case PM_PWM_CONF_DTEST1:
conf = PM8058_FLASH_MODE_DTEST1;
break;
case PM_PWM_CONF_DTEST2:
conf = PM8058_FLASH_MODE_DTEST2;
break;
default:
conf = PM8058_FLASH_MODE_NONE;
break;
}
conf |= (max_current / 20) << PM8058_FLASH_CURRENT_SHIFT;
id -= PM_PWM_LED_KPD;
rc = pm8xxx_writeb(pwm->dev->parent,
SSBI_REG_ADDR_FLASH(id), conf);
break;
default:
rc = -EINVAL;
break;
}
return rc;
}
EXPORT_SYMBOL(pm8058_pwm_config_led);
int pm8058_pwm_set_dtest(struct pwm_device *pwm, int enable)
{
int rc;
u8 reg;
if (pwm == NULL || IS_ERR(pwm))
return -EINVAL;
if (pwm->chip == NULL)
return -ENODEV;
if (!pwm->in_use)
rc = -EINVAL;
else {
reg = pwm->pwm_id & PM8058_PWM_DTEST_BANK_MASK;
if (enable)
/* Only Test 1 available */
reg |= (1 << PM8058_PWM_DTEST_SHIFT) &
PM8058_PWM_DTEST_MASK;
rc = pm8xxx_writeb(pwm->dev->parent,
SSBI_REG_ADDR_LPG_TEST, reg);
if (rc)
pr_err("pm8xxx_write(DTEST=0x%x): rc=%d\n", reg, rc);
}
return rc;
}
EXPORT_SYMBOL(pm8058_pwm_set_dtest);
static int __devinit pmic8058_pwm_probe(struct platform_device *pdev)
{
struct pm8058_pwm_chip *chip;
int i;
chip = kzalloc(sizeof *chip, GFP_KERNEL);
if (chip == NULL) {
pr_err("kzalloc() failed.\n");
return -ENOMEM;
}
for (i = 0; i < PM8058_PWM_CHANNELS; i++) {
chip->pwm_dev[i].pwm_id = i;
chip->pwm_dev[i].chip = chip;
chip->pwm_dev[i].dev = &pdev->dev;
}
mutex_init(&chip->pwm_mutex);
chip->pdata = pdev->dev.platform_data;
pwm_chip = chip;
platform_set_drvdata(pdev, chip);
pr_notice("OK\n");
return 0;
}
static int __devexit pmic8058_pwm_remove(struct platform_device *pdev)
{
struct pm8058_pwm_chip *chip = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
kfree(chip);
return 0;
}
static struct platform_driver pmic8058_pwm_driver = {
.probe = pmic8058_pwm_probe,
.remove = __devexit_p(pmic8058_pwm_remove),
.driver = {
.name = "pm8058-pwm",
.owner = THIS_MODULE,
},
};
static int __init pm8058_pwm_init(void)
{
return platform_driver_register(&pmic8058_pwm_driver);
}
static void __exit pm8058_pwm_exit(void)
{
platform_driver_unregister(&pmic8058_pwm_driver);
}
subsys_initcall(pm8058_pwm_init);
module_exit(pm8058_pwm_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PMIC8058 PWM driver");
MODULE_VERSION("1.0");
MODULE_ALIAS("platform:pmic8058_pwm");
| gpl-2.0 |
full-of-foo/linux | arch/powerpc/sysdev/ppc4xx_pci.c | 508 | 51050 | /*
* PCI / PCI-X / PCI-Express support for 4xx parts
*
* Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
*
* Most PCI Express code is coming from Stefan Roese implementation for
* arch/ppc in the Denx tree, slightly reworked by me.
*
* Copyright 2007 DENX Software Engineering, Stefan Roese <sr@denx.de>
*
* Some of that comes itself from a previous implementation for 440SPE only
* by Roland Dreier:
*
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Roland Dreier <rolandd@cisco.com>
*
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/bootmem.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <mm/mmu_decl.h>
#include "ppc4xx_pci.h"
static int dma_offset_set;
#define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL))
#define U64_TO_U32_HIGH(val) ((u32)((val) >> 32))
#define RES_TO_U32_LOW(val) \
((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
#define RES_TO_U32_HIGH(val) \
((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
static inline int ppc440spe_revA(void)
{
/* Catch both 440SPe variants, with and without RAID6 support */
if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890)
return 1;
else
return 0;
}
static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
{
struct pci_controller *hose;
int i;
if (dev->devfn != 0 || dev->bus->self != NULL)
return;
hose = pci_bus_to_host(dev->bus);
if (hose == NULL)
return;
if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") &&
!of_device_is_compatible(hose->dn, "ibm,plb-pcix") &&
!of_device_is_compatible(hose->dn, "ibm,plb-pci"))
return;
if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") ||
of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) {
hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM;
}
/* Hide the PCI host BARs from the kernel as their content doesn't
* fit well in the resource management
*/
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
dev->resource[i].start = dev->resource[i].end = 0;
dev->resource[i].flags = 0;
}
printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
pci_name(dev));
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge);
static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
void __iomem *reg,
struct resource *res)
{
u64 size;
const u32 *ranges;
int rlen;
int pna = of_n_addr_cells(hose->dn);
int np = pna + 5;
/* Default */
res->start = 0;
size = 0x80000000;
res->end = size - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
/* Get dma-ranges property */
ranges = of_get_property(hose->dn, "dma-ranges", &rlen);
if (ranges == NULL)
goto out;
/* Walk it */
while ((rlen -= np * 4) >= 0) {
u32 pci_space = ranges[0];
u64 pci_addr = of_read_number(ranges + 1, 2);
u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3);
size = of_read_number(ranges + pna + 3, 2);
ranges += np;
if (cpu_addr == OF_BAD_ADDR || size == 0)
continue;
/* We only care about memory */
if ((pci_space & 0x03000000) != 0x02000000)
continue;
/* We currently only support memory at 0, and pci_addr
* within 32 bits space
*/
if (cpu_addr != 0 || pci_addr > 0xffffffff) {
printk(KERN_WARNING "%s: Ignored unsupported dma range"
" 0x%016llx...0x%016llx -> 0x%016llx\n",
hose->dn->full_name,
pci_addr, pci_addr + size - 1, cpu_addr);
continue;
}
/* Check if not prefetchable */
if (!(pci_space & 0x40000000))
res->flags &= ~IORESOURCE_PREFETCH;
/* Use that */
res->start = pci_addr;
/* Beware of 32 bits resources */
if (sizeof(resource_size_t) == sizeof(u32) &&
(pci_addr + size) > 0x100000000ull)
res->end = 0xffffffff;
else
res->end = res->start + size - 1;
break;
}
/* We only support one global DMA offset */
if (dma_offset_set && pci_dram_offset != res->start) {
printk(KERN_ERR "%s: dma-ranges(s) mismatch\n",
hose->dn->full_name);
return -ENXIO;
}
/* Check that we can fit all of memory as we don't support
* DMA bounce buffers
*/
if (size < total_memory) {
printk(KERN_ERR "%s: dma-ranges too small "
"(size=%llx total_memory=%llx)\n",
hose->dn->full_name, size, (u64)total_memory);
return -ENXIO;
}
/* Check we are a power of 2 size and that base is a multiple of size*/
if ((size & (size - 1)) != 0 ||
(res->start & (size - 1)) != 0) {
printk(KERN_ERR "%s: dma-ranges unaligned\n",
hose->dn->full_name);
return -ENXIO;
}
/* Check that we are fully contained within 32 bits space */
if (res->end > 0xffffffff) {
printk(KERN_ERR "%s: dma-ranges outside of 32 bits space\n",
hose->dn->full_name);
return -ENXIO;
}
out:
dma_offset_set = 1;
pci_dram_offset = res->start;
printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
pci_dram_offset);
return 0;
}
/*
* 4xx PCI 2.x part
*/
static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
void __iomem *reg,
u64 plb_addr,
u64 pci_addr,
u64 size,
unsigned int flags,
int index)
{
u32 ma, pcila, pciha;
/* Hack warning ! The "old" PCI 2.x cell only let us configure the low
* 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit
* address are actually hard wired to a value that appears to depend
* on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx.
*
* The trick here is we just crop those top bits and ignore them when
* programming the chip. That means the device-tree has to be right
* for the specific part used (we don't print a warning if it's wrong
* but on the other hand, you'll crash quickly enough), but at least
* this code should work whatever the hard coded value is
*/
plb_addr &= 0xffffffffull;
/* Note: Due to the above hack, the test below doesn't actually test
* if you address is above 4G, but it tests that address and
* (address + size) are both contained in the same 4G
*/
if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
size < 0x1000 || (plb_addr & (size - 1)) != 0) {
printk(KERN_WARNING "%s: Resource out of range\n",
hose->dn->full_name);
return -1;
}
ma = (0xffffffffu << ilog2(size)) | 1;
if (flags & IORESOURCE_PREFETCH)
ma |= 2;
pciha = RES_TO_U32_HIGH(pci_addr);
pcila = RES_TO_U32_LOW(pci_addr);
writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
return 0;
}
static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
void __iomem *reg)
{
int i, j, found_isa_hole = 0;
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
struct resource *res = &hose->mem_resources[i];
/* we only care about memory windows */
if (!(res->flags & IORESOURCE_MEM))
continue;
if (j > 2) {
printk(KERN_WARNING "%s: Too many ranges\n",
hose->dn->full_name);
break;
}
/* Configure the resource */
if (ppc4xx_setup_one_pci_PMM(hose, reg,
res->start,
res->start - hose->pci_mem_offset,
res->end + 1 - res->start,
res->flags,
j) == 0) {
j++;
/* If the resource PCI address is 0 then we have our
* ISA memory hole
*/
if (res->start == hose->pci_mem_offset)
found_isa_hole = 1;
}
}
/* Handle ISA memory hole if not already covered */
if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
hose->isa_mem_size, 0, j) == 0)
printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
hose->dn->full_name);
}
static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
void __iomem *reg,
const struct resource *res)
{
resource_size_t size = res->end - res->start + 1;
u32 sa;
/* Calculate window size */
sa = (0xffffffffu << ilog2(size)) | 1;
sa |= 0x1;
/* RAM is always at 0 local for now */
writel(0, reg + PCIL0_PTM1LA);
writel(sa, reg + PCIL0_PTM1MS);
/* Map on PCI side */
early_write_config_dword(hose, hose->first_busno, 0,
PCI_BASE_ADDRESS_1, res->start);
early_write_config_dword(hose, hose->first_busno, 0,
PCI_BASE_ADDRESS_2, 0x00000000);
early_write_config_word(hose, hose->first_busno, 0,
PCI_COMMAND, 0x0006);
}
static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
{
/* NYI */
struct resource rsrc_cfg;
struct resource rsrc_reg;
struct resource dma_window;
struct pci_controller *hose = NULL;
void __iomem *reg = NULL;
const int *bus_range;
int primary = 0;
/* Check if device is enabled */
if (!of_device_is_available(np)) {
printk(KERN_INFO "%s: Port disabled via device-tree\n",
np->full_name);
return;
}
/* Fetch config space registers address */
if (of_address_to_resource(np, 0, &rsrc_cfg)) {
printk(KERN_ERR "%s: Can't get PCI config register base !",
np->full_name);
return;
}
/* Fetch host bridge internal registers address */
if (of_address_to_resource(np, 3, &rsrc_reg)) {
printk(KERN_ERR "%s: Can't get PCI internal register base !",
np->full_name);
return;
}
/* Check if primary bridge */
if (of_get_property(np, "primary", NULL))
primary = 1;
/* Get bus range if any */
bus_range = of_get_property(np, "bus-range", NULL);
/* Map registers */
reg = ioremap(rsrc_reg.start, rsrc_reg.end + 1 - rsrc_reg.start);
if (reg == NULL) {
printk(KERN_ERR "%s: Can't map registers !", np->full_name);
goto fail;
}
/* Allocate the host controller data structure */
hose = pcibios_alloc_controller(np);
if (!hose)
goto fail;
hose->first_busno = bus_range ? bus_range[0] : 0x0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
/* Setup config space */
setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
/* Disable all windows */
writel(0, reg + PCIL0_PMM0MA);
writel(0, reg + PCIL0_PMM1MA);
writel(0, reg + PCIL0_PMM2MA);
writel(0, reg + PCIL0_PTM1MS);
writel(0, reg + PCIL0_PTM2MS);
/* Parse outbound mapping resources */
pci_process_bridge_OF_ranges(hose, np, primary);
/* Parse inbound mapping resources */
if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
goto fail;
/* Configure outbound ranges POMs */
ppc4xx_configure_pci_PMMs(hose, reg);
/* Configure inbound ranges PIMs */
ppc4xx_configure_pci_PTMs(hose, reg, &dma_window);
/* We don't need the registers anymore */
iounmap(reg);
return;
fail:
if (hose)
pcibios_free_controller(hose);
if (reg)
iounmap(reg);
}
/*
* 4xx PCI-X part
*/
static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose,
void __iomem *reg,
u64 plb_addr,
u64 pci_addr,
u64 size,
unsigned int flags,
int index)
{
u32 lah, lal, pciah, pcial, sa;
if (!is_power_of_2(size) || size < 0x1000 ||
(plb_addr & (size - 1)) != 0) {
printk(KERN_WARNING "%s: Resource out of range\n",
hose->dn->full_name);
return -1;
}
/* Calculate register values */
lah = RES_TO_U32_HIGH(plb_addr);
lal = RES_TO_U32_LOW(plb_addr);
pciah = RES_TO_U32_HIGH(pci_addr);
pcial = RES_TO_U32_LOW(pci_addr);
sa = (0xffffffffu << ilog2(size)) | 0x1;
/* Program register values */
if (index == 0) {
writel(lah, reg + PCIX0_POM0LAH);
writel(lal, reg + PCIX0_POM0LAL);
writel(pciah, reg + PCIX0_POM0PCIAH);
writel(pcial, reg + PCIX0_POM0PCIAL);
writel(sa, reg + PCIX0_POM0SA);
} else {
writel(lah, reg + PCIX0_POM1LAH);
writel(lal, reg + PCIX0_POM1LAL);
writel(pciah, reg + PCIX0_POM1PCIAH);
writel(pcial, reg + PCIX0_POM1PCIAL);
writel(sa, reg + PCIX0_POM1SA);
}
return 0;
}
static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
void __iomem *reg)
{
int i, j, found_isa_hole = 0;
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
struct resource *res = &hose->mem_resources[i];
/* we only care about memory windows */
if (!(res->flags & IORESOURCE_MEM))
continue;
if (j > 1) {
printk(KERN_WARNING "%s: Too many ranges\n",
hose->dn->full_name);
break;
}
/* Configure the resource */
if (ppc4xx_setup_one_pcix_POM(hose, reg,
res->start,
res->start - hose->pci_mem_offset,
res->end + 1 - res->start,
res->flags,
j) == 0) {
j++;
/* If the resource PCI address is 0 then we have our
* ISA memory hole
*/
if (res->start == hose->pci_mem_offset)
found_isa_hole = 1;
}
}
/* Handle ISA memory hole if not already covered */
if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
hose->isa_mem_size, 0, j) == 0)
printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
hose->dn->full_name);
}
static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
void __iomem *reg,
const struct resource *res,
int big_pim,
int enable_msi_hole)
{
resource_size_t size = res->end - res->start + 1;
u32 sa;
/* RAM is always at 0 */
writel(0x00000000, reg + PCIX0_PIM0LAH);
writel(0x00000000, reg + PCIX0_PIM0LAL);
/* Calculate window size */
sa = (0xffffffffu << ilog2(size)) | 1;
sa |= 0x1;
if (res->flags & IORESOURCE_PREFETCH)
sa |= 0x2;
if (enable_msi_hole)
sa |= 0x4;
writel(sa, reg + PCIX0_PIM0SA);
if (big_pim)
writel(0xffffffff, reg + PCIX0_PIM0SAH);
/* Map on PCI side */
writel(0x00000000, reg + PCIX0_BAR0H);
writel(res->start, reg + PCIX0_BAR0L);
writew(0x0006, reg + PCIX0_COMMAND);
}
static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
{
struct resource rsrc_cfg;
struct resource rsrc_reg;
struct resource dma_window;
struct pci_controller *hose = NULL;
void __iomem *reg = NULL;
const int *bus_range;
int big_pim = 0, msi = 0, primary = 0;
/* Fetch config space registers address */
if (of_address_to_resource(np, 0, &rsrc_cfg)) {
printk(KERN_ERR "%s:Can't get PCI-X config register base !",
np->full_name);
return;
}
/* Fetch host bridge internal registers address */
if (of_address_to_resource(np, 3, &rsrc_reg)) {
printk(KERN_ERR "%s: Can't get PCI-X internal register base !",
np->full_name);
return;
}
/* Check if it supports large PIMs (440GX) */
if (of_get_property(np, "large-inbound-windows", NULL))
big_pim = 1;
/* Check if we should enable MSIs inbound hole */
if (of_get_property(np, "enable-msi-hole", NULL))
msi = 1;
/* Check if primary bridge */
if (of_get_property(np, "primary", NULL))
primary = 1;
/* Get bus range if any */
bus_range = of_get_property(np, "bus-range", NULL);
/* Map registers */
reg = ioremap(rsrc_reg.start, rsrc_reg.end + 1 - rsrc_reg.start);
if (reg == NULL) {
printk(KERN_ERR "%s: Can't map registers !", np->full_name);
goto fail;
}
/* Allocate the host controller data structure */
hose = pcibios_alloc_controller(np);
if (!hose)
goto fail;
hose->first_busno = bus_range ? bus_range[0] : 0x0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
/* Setup config space */
setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
/* Disable all windows */
writel(0, reg + PCIX0_POM0SA);
writel(0, reg + PCIX0_POM1SA);
writel(0, reg + PCIX0_POM2SA);
writel(0, reg + PCIX0_PIM0SA);
writel(0, reg + PCIX0_PIM1SA);
writel(0, reg + PCIX0_PIM2SA);
if (big_pim) {
writel(0, reg + PCIX0_PIM0SAH);
writel(0, reg + PCIX0_PIM2SAH);
}
/* Parse outbound mapping resources */
pci_process_bridge_OF_ranges(hose, np, primary);
/* Parse inbound mapping resources */
if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
goto fail;
/* Configure outbound ranges POMs */
ppc4xx_configure_pcix_POMs(hose, reg);
/* Configure inbound ranges PIMs */
ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi);
/* We don't need the registers anymore */
iounmap(reg);
return;
fail:
if (hose)
pcibios_free_controller(hose);
if (reg)
iounmap(reg);
}
#ifdef CONFIG_PPC4xx_PCI_EXPRESS
/*
* 4xx PCI-Express part
*
* We support 3 parts currently based on the compatible property:
*
* ibm,plb-pciex-440spe
* ibm,plb-pciex-405ex
* ibm,plb-pciex-460ex
*
* Anything else will be rejected for now as they are all subtly
* different unfortunately.
*
*/
#define MAX_PCIE_BUS_MAPPED 0x40
struct ppc4xx_pciex_port
{
struct pci_controller *hose;
struct device_node *node;
unsigned int index;
int endpoint;
int link;
int has_ibpre;
unsigned int sdr_base;
dcr_host_t dcrs;
struct resource cfg_space;
struct resource utl_regs;
void __iomem *utl_base;
};
static struct ppc4xx_pciex_port *ppc4xx_pciex_ports;
static unsigned int ppc4xx_pciex_port_count;
struct ppc4xx_pciex_hwops
{
int (*core_init)(struct device_node *np);
int (*port_init_hw)(struct ppc4xx_pciex_port *port);
int (*setup_utl)(struct ppc4xx_pciex_port *port);
};
static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
#ifdef CONFIG_44x
/* Check various reset bits of the 440SPe PCIe core */
static int __init ppc440spe_pciex_check_reset(struct device_node *np)
{
u32 valPE0, valPE1, valPE2;
int err = 0;
/* SDR0_PEGPLLLCT1 reset */
if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) {
/*
* the PCIe core was probably already initialised
* by firmware - let's re-reset RCSSET regs
*
* -- Shouldn't we also re-reset the whole thing ? -- BenH
*/
pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n");
mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000);
mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000);
mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000);
}
valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET);
valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET);
valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET);
/* SDR0_PExRCSSET rstgu */
if (!(valPE0 & 0x01000000) ||
!(valPE1 & 0x01000000) ||
!(valPE2 & 0x01000000)) {
printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
err = -1;
}
/* SDR0_PExRCSSET rstdl */
if (!(valPE0 & 0x00010000) ||
!(valPE1 & 0x00010000) ||
!(valPE2 & 0x00010000)) {
printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
err = -1;
}
/* SDR0_PExRCSSET rstpyn */
if ((valPE0 & 0x00001000) ||
(valPE1 & 0x00001000) ||
(valPE2 & 0x00001000)) {
printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
err = -1;
}
/* SDR0_PExRCSSET hldplb */
if ((valPE0 & 0x10000000) ||
(valPE1 & 0x10000000) ||
(valPE2 & 0x10000000)) {
printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
err = -1;
}
/* SDR0_PExRCSSET rdy */
if ((valPE0 & 0x00100000) ||
(valPE1 & 0x00100000) ||
(valPE2 & 0x00100000)) {
printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
err = -1;
}
/* SDR0_PExRCSSET shutdown */
if ((valPE0 & 0x00000100) ||
(valPE1 & 0x00000100) ||
(valPE2 & 0x00000100)) {
printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
err = -1;
}
return err;
}
/* Global PCIe core initializations for 440SPe core */
static int __init ppc440spe_pciex_core_init(struct device_node *np)
{
int time_out = 20;
/* Set PLL clock receiver to LVPECL */
dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28);
/* Shouldn't we do all the calibration stuff etc... here ? */
if (ppc440spe_pciex_check_reset(np))
return -ENXIO;
if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) {
printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration "
"failed (0x%08x)\n",
mfdcri(SDR0, PESDR0_PLLLCT2));
return -1;
}
/* De-assert reset of PCIe PLL, wait for lock */
dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0);
udelay(3);
while (time_out) {
if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) {
time_out--;
udelay(1);
} else
break;
}
if (!time_out) {
printk(KERN_INFO "PCIE: VCO output not locked\n");
return -1;
}
pr_debug("PCIE initialization OK\n");
return 3;
}
static int ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
u32 val = 1 << 24;
if (port->endpoint)
val = PTYPE_LEGACY_ENDPOINT << 20;
else
val = PTYPE_ROOT_PORT << 20;
if (port->index == 0)
val |= LNKW_X8 << 12;
else
val |= LNKW_X4 << 12;
mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222);
if (ppc440spe_revA())
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000);
if (port->index == 0) {
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1,
0x35000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1,
0x35000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1,
0x35000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1,
0x35000000);
}
dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
(1 << 24) | (1 << 16), 1 << 12);
return 0;
}
static int ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
return ppc440spe_pciex_init_port_hw(port);
}
static int ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
int rc = ppc440spe_pciex_init_port_hw(port);
port->has_ibpre = 1;
return rc;
}
static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port)
{
/* XXX Check what that value means... I hate magic */
dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800);
/*
* Set buffer allocations and then assert VRB and TXE.
*/
out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
out_be32(port->utl_base + PEUTL_OPDBSZ, 0x10000000);
out_be32(port->utl_base + PEUTL_PBBSZ, 0x53000000);
out_be32(port->utl_base + PEUTL_IPHBSZ, 0x08000000);
out_be32(port->utl_base + PEUTL_IPDBSZ, 0x10000000);
out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
return 0;
}
static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
{
/* Report CRS to the operating system */
out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
return 0;
}
static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
{
.core_init = ppc440spe_pciex_core_init,
.port_init_hw = ppc440speA_pciex_init_port_hw,
.setup_utl = ppc440speA_pciex_init_utl,
};
static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
{
.core_init = ppc440spe_pciex_core_init,
.port_init_hw = ppc440speB_pciex_init_port_hw,
.setup_utl = ppc440speB_pciex_init_utl,
};
static int __init ppc460ex_pciex_core_init(struct device_node *np)
{
/* Nothing to do, return 2 ports */
return 2;
}
static int ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
u32 val;
u32 utlset1;
if (port->endpoint)
val = PTYPE_LEGACY_ENDPOINT << 20;
else
val = PTYPE_ROOT_PORT << 20;
if (port->index == 0) {
val |= LNKW_X1 << 12;
utlset1 = 0x20000000;
} else {
val |= LNKW_X4 << 12;
utlset1 = 0x20101101;
}
mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1);
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000);
switch (port->index) {
case 0:
mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000);
break;
case 1:
mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230);
mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230);
mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230);
mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230);
mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130);
mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130);
mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130);
mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130);
mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006);
mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006);
mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006);
mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006);
mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000);
break;
}
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
/* Poll for PHY reset */
/* XXX FIXME add timeout */
switch (port->index) {
case 0:
while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1))
udelay(10);
break;
case 1:
while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1))
udelay(10);
break;
}
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
(mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
PESDRx_RCSSET_RSTPYN);
port->has_ibpre = 1;
return 0;
}
static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
{
dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
/*
* Set buffer allocations and then assert VRB and TXE.
*/
out_be32(port->utl_base + PEUTL_PBCTL, 0x0800000c);
out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
out_be32(port->utl_base + PEUTL_PBBSZ, 0x00000000);
out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000);
out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
return 0;
}
static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
{
.core_init = ppc460ex_pciex_core_init,
.port_init_hw = ppc460ex_pciex_init_port_hw,
.setup_utl = ppc460ex_pciex_init_utl,
};
#endif /* CONFIG_44x */
#ifdef CONFIG_40x
static int __init ppc405ex_pciex_core_init(struct device_node *np)
{
/* Nothing to do, return 2 ports */
return 2;
}
static void ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
{
/* Assert the PE0_PHY reset */
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
msleep(1);
/* deassert the PE0_hotreset */
if (port->endpoint)
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000);
else
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000);
/* poll for phy !reset */
/* XXX FIXME add timeout */
while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000))
;
/* deassert the PE0_gpl_utl_reset */
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000);
}
static int ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
u32 val;
if (port->endpoint)
val = PTYPE_LEGACY_ENDPOINT;
else
val = PTYPE_ROOT_PORT;
mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET,
1 << 24 | val << 20 | LNKW_X1 << 12);
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000);
mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003);
/*
* Only reset the PHY when no link is currently established.
* This is for the Atheros PCIe board which has problems to establish
* the link (again) after this PHY reset. All other currently tested
* PCIe boards don't show this problem.
* This has to be re-tested and fixed in a later release!
*/
val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
if (!(val & 0x00001000))
ppc405ex_pcie_phy_reset(port);
dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000); /* guarded on */
port->has_ibpre = 1;
return 0;
}
static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
{
dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
/*
* Set buffer allocations and then assert VRB and TXE.
*/
out_be32(port->utl_base + PEUTL_OUTTR, 0x02000000);
out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
out_be32(port->utl_base + PEUTL_PBBSZ, 0x21000000);
out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
return 0;
}
static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
{
.core_init = ppc405ex_pciex_core_init,
.port_init_hw = ppc405ex_pciex_init_port_hw,
.setup_utl = ppc405ex_pciex_init_utl,
};
#endif /* CONFIG_40x */
/* Check that the core has been initied and if not, do it */
static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
{
static int core_init;
int count = -ENODEV;
if (core_init++)
return 0;
#ifdef CONFIG_44x
if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) {
if (ppc440spe_revA())
ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops;
else
ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops;
}
if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
#endif /* CONFIG_44x */
#ifdef CONFIG_40x
if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
#endif
if (ppc4xx_pciex_hwops == NULL) {
printk(KERN_WARNING "PCIE: unknown host type %s\n",
np->full_name);
return -ENODEV;
}
count = ppc4xx_pciex_hwops->core_init(np);
if (count > 0) {
ppc4xx_pciex_ports =
kzalloc(count * sizeof(struct ppc4xx_pciex_port),
GFP_KERNEL);
if (ppc4xx_pciex_ports) {
ppc4xx_pciex_port_count = count;
return 0;
}
printk(KERN_WARNING "PCIE: failed to allocate ports array\n");
return -ENOMEM;
}
return -ENODEV;
}
static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port)
{
/* We map PCI Express configuration based on the reg property */
dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH,
RES_TO_U32_HIGH(port->cfg_space.start));
dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL,
RES_TO_U32_LOW(port->cfg_space.start));
/* XXX FIXME: Use size from reg property. For now, map 512M */
dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001);
/* We map UTL registers based on the reg property */
dcr_write(port->dcrs, DCRO_PEGPL_REGBAH,
RES_TO_U32_HIGH(port->utl_regs.start));
dcr_write(port->dcrs, DCRO_PEGPL_REGBAL,
RES_TO_U32_LOW(port->utl_regs.start));
/* XXX FIXME: Use size from reg property */
dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001);
/* Disable all other outbound windows */
dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0);
dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0);
dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0);
dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
}
static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
unsigned int sdr_offset,
unsigned int mask,
unsigned int value,
int timeout_ms)
{
u32 val;
while(timeout_ms--) {
val = mfdcri(SDR0, port->sdr_base + sdr_offset);
if ((val & mask) == value) {
pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
port->index, sdr_offset, timeout_ms, val);
return 0;
}
msleep(1);
}
return -1;
}
static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
{
int rc = 0;
/* Init HW */
if (ppc4xx_pciex_hwops->port_init_hw)
rc = ppc4xx_pciex_hwops->port_init_hw(port);
if (rc != 0)
return rc;
printk(KERN_INFO "PCIE%d: Checking link...\n",
port->index);
/* Wait for reset to complete */
if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
printk(KERN_WARNING "PCIE%d: PGRST failed\n",
port->index);
return -1;
}
/* Check for card presence detect if supported, if not, just wait for
* link unconditionally.
*
* note that we don't fail if there is no link, we just filter out
* config space accesses. That way, it will be easier to implement
* hotplug later on.
*/
if (!port->has_ibpre ||
!ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
1 << 28, 1 << 28, 100)) {
printk(KERN_INFO
"PCIE%d: Device detected, waiting for link...\n",
port->index);
if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
0x1000, 0x1000, 2000))
printk(KERN_WARNING
"PCIE%d: Link up failed\n", port->index);
else {
printk(KERN_INFO
"PCIE%d: link is up !\n", port->index);
port->link = 1;
}
} else
printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
/*
* Initialize mapping: disable all regions and configure
* CFG and REG regions based on resources in the device tree
*/
ppc4xx_pciex_port_init_mapping(port);
/*
* Map UTL
*/
port->utl_base = ioremap(port->utl_regs.start, 0x100);
BUG_ON(port->utl_base == NULL);
/*
* Setup UTL registers --BenH.
*/
if (ppc4xx_pciex_hwops->setup_utl)
ppc4xx_pciex_hwops->setup_utl(port);
/*
* Check for VC0 active and assert RDY.
*/
if (port->link &&
ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
1 << 16, 1 << 16, 5000)) {
printk(KERN_INFO "PCIE%d: VC0 not active\n", port->index);
port->link = 0;
}
dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
msleep(100);
return 0;
}
static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port,
struct pci_bus *bus,
unsigned int devfn)
{
static int message;
/* Endpoint can not generate upstream(remote) config cycles */
if (port->endpoint && bus->number != port->hose->first_busno)
return PCIBIOS_DEVICE_NOT_FOUND;
/* Check we are within the mapped range */
if (bus->number > port->hose->last_busno) {
if (!message) {
printk(KERN_WARNING "Warning! Probing bus %u"
" out of range !\n", bus->number);
message++;
}
return PCIBIOS_DEVICE_NOT_FOUND;
}
/* The root complex has only one device / function */
if (bus->number == port->hose->first_busno && devfn != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
/* The other side of the RC has only one device as well */
if (bus->number == (port->hose->first_busno + 1) &&
PCI_SLOT(devfn) != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
/* Check if we have a link */
if ((bus->number != port->hose->first_busno) && !port->link)
return PCIBIOS_DEVICE_NOT_FOUND;
return 0;
}
static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port,
struct pci_bus *bus,
unsigned int devfn)
{
int relbus;
/* Remove the casts when we finally remove the stupid volatile
* in struct pci_controller
*/
if (bus->number == port->hose->first_busno)
return (void __iomem *)port->hose->cfg_addr;
relbus = bus->number - (port->hose->first_busno + 1);
return (void __iomem *)port->hose->cfg_data +
((relbus << 20) | (devfn << 12));
}
static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
struct ppc4xx_pciex_port *port =
&ppc4xx_pciex_ports[hose->indirect_type];
void __iomem *addr;
u32 gpl_cfg;
BUG_ON(hose != port->hose);
if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
/*
* Reading from configuration space of non-existing device can
* generate transaction errors. For the read duration we suppress
* assertion of machine check exceptions to avoid those.
*/
gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
/* Make sure no CRS is recorded */
out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000);
switch (len) {
case 1:
*val = in_8((u8 *)(addr + offset));
break;
case 2:
*val = in_le16((u16 *)(addr + offset));
break;
default:
*val = in_le32((u32 *)(addr + offset));
break;
}
pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x"
" offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
bus->number, hose->first_busno, hose->last_busno,
devfn, offset, len, addr + offset, *val);
/* Check for CRS (440SPe rev B does that for us but heh ..) */
if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) {
pr_debug("Got CRS !\n");
if (len != 4 || offset != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
*val = 0xffff0001;
}
dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
return PCIBIOS_SUCCESSFUL;
}
static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
struct ppc4xx_pciex_port *port =
&ppc4xx_pciex_ports[hose->indirect_type];
void __iomem *addr;
u32 gpl_cfg;
if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
/*
* Reading from configuration space of non-existing device can
* generate transaction errors. For the read duration we suppress
* assertion of machine check exceptions to avoid those.
*/
gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x"
" offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
bus->number, hose->first_busno, hose->last_busno,
devfn, offset, len, addr + offset, val);
switch (len) {
case 1:
out_8((u8 *)(addr + offset), val);
break;
case 2:
out_le16((u16 *)(addr + offset), val);
break;
default:
out_le32((u32 *)(addr + offset), val);
break;
}
dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops ppc4xx_pciex_pci_ops =
{
.read = ppc4xx_pciex_read_config,
.write = ppc4xx_pciex_write_config,
};
static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port,
struct pci_controller *hose,
void __iomem *mbase,
u64 plb_addr,
u64 pci_addr,
u64 size,
unsigned int flags,
int index)
{
u32 lah, lal, pciah, pcial, sa;
if (!is_power_of_2(size) ||
(index < 2 && size < 0x100000) ||
(index == 2 && size < 0x100) ||
(plb_addr & (size - 1)) != 0) {
printk(KERN_WARNING "%s: Resource out of range\n",
hose->dn->full_name);
return -1;
}
/* Calculate register values */
lah = RES_TO_U32_HIGH(plb_addr);
lal = RES_TO_U32_LOW(plb_addr);
pciah = RES_TO_U32_HIGH(pci_addr);
pcial = RES_TO_U32_LOW(pci_addr);
sa = (0xffffffffu << ilog2(size)) | 0x1;
/* Program register values */
switch (index) {
case 0:
out_le32(mbase + PECFG_POM0LAH, pciah);
out_le32(mbase + PECFG_POM0LAL, pcial);
dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
/* Note that 3 here means enabled | single region */
dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3);
break;
case 1:
out_le32(mbase + PECFG_POM1LAH, pciah);
out_le32(mbase + PECFG_POM1LAL, pcial);
dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
/* Note that 3 here means enabled | single region */
dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3);
break;
case 2:
out_le32(mbase + PECFG_POM2LAH, pciah);
out_le32(mbase + PECFG_POM2LAL, pcial);
dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
/* Note that 3 here means enabled | IO space !!! */
dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, sa | 3);
break;
}
return 0;
}
static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
struct pci_controller *hose,
void __iomem *mbase)
{
int i, j, found_isa_hole = 0;
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
struct resource *res = &hose->mem_resources[i];
/* we only care about memory windows */
if (!(res->flags & IORESOURCE_MEM))
continue;
if (j > 1) {
printk(KERN_WARNING "%s: Too many ranges\n",
port->node->full_name);
break;
}
/* Configure the resource */
if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
res->start,
res->start - hose->pci_mem_offset,
res->end + 1 - res->start,
res->flags,
j) == 0) {
j++;
/* If the resource PCI address is 0 then we have our
* ISA memory hole
*/
if (res->start == hose->pci_mem_offset)
found_isa_hole = 1;
}
}
/* Handle ISA memory hole if not already covered */
if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
hose->isa_mem_phys, 0,
hose->isa_mem_size, 0, j) == 0)
printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
hose->dn->full_name);
/* Configure IO, always 64K starting at 0. We hard wire it to 64K !
* Note also that it -has- to be region index 2 on this HW
*/
if (hose->io_resource.flags & IORESOURCE_IO)
ppc4xx_setup_one_pciex_POM(port, hose, mbase,
hose->io_base_phys, 0,
0x10000, IORESOURCE_IO, 2);
}
static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
struct pci_controller *hose,
void __iomem *mbase,
struct resource *res)
{
resource_size_t size = res->end - res->start + 1;
u64 sa;
if (port->endpoint) {
resource_size_t ep_addr = 0;
resource_size_t ep_size = 32 << 20;
/* Currently we map a fixed 64MByte window to PLB address
* 0 (SDRAM). This should probably be configurable via a dts
* property.
*/
/* Calculate window size */
sa = (0xffffffffffffffffull << ilog2(ep_size));
/* Setup BAR0 */
out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) |
PCI_BASE_ADDRESS_MEM_TYPE_64);
/* Disable BAR1 & BAR2 */
out_le32(mbase + PECFG_BAR1MPA, 0);
out_le32(mbase + PECFG_BAR2HMPA, 0);
out_le32(mbase + PECFG_BAR2LMPA, 0);
out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa));
out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa));
out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr));
out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
} else {
/* Calculate window size */
sa = (0xffffffffffffffffull << ilog2(size));
if (res->flags & IORESOURCE_PREFETCH)
sa |= 0x8;
out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
/* The setup of the split looks weird to me ... let's see
* if it works
*/
out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
}
/* Enable inbound mapping */
out_le32(mbase + PECFG_PIMEN, 0x1);
/* Enable I/O, Mem, and Busmaster cycles */
out_le16(mbase + PCI_COMMAND,
in_le16(mbase + PCI_COMMAND) |
PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
}
static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
{
struct resource dma_window;
struct pci_controller *hose = NULL;
const int *bus_range;
int primary = 0, busses;
void __iomem *mbase = NULL, *cfg_data = NULL;
const u32 *pval;
u32 val;
/* Check if primary bridge */
if (of_get_property(port->node, "primary", NULL))
primary = 1;
/* Get bus range if any */
bus_range = of_get_property(port->node, "bus-range", NULL);
/* Allocate the host controller data structure */
hose = pcibios_alloc_controller(port->node);
if (!hose)
goto fail;
/* We stick the port number in "indirect_type" so the config space
* ops can retrieve the port data structure easily
*/
hose->indirect_type = port->index;
/* Get bus range */
hose->first_busno = bus_range ? bus_range[0] : 0x0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
/* Because of how big mapping the config space is (1M per bus), we
* limit how many busses we support. In the long run, we could replace
* that with something akin to kmap_atomic instead. We set aside 1 bus
* for the host itself too.
*/
busses = hose->last_busno - hose->first_busno; /* This is off by 1 */
if (busses > MAX_PCIE_BUS_MAPPED) {
busses = MAX_PCIE_BUS_MAPPED;
hose->last_busno = hose->first_busno + busses;
}
if (!port->endpoint) {
/* Only map the external config space in cfg_data for
* PCIe root-complexes. External space is 1M per bus
*/
cfg_data = ioremap(port->cfg_space.start +
(hose->first_busno + 1) * 0x100000,
busses * 0x100000);
if (cfg_data == NULL) {
printk(KERN_ERR "%s: Can't map external config space !",
port->node->full_name);
goto fail;
}
hose->cfg_data = cfg_data;
}
/* Always map the host config space in cfg_addr.
* Internal space is 4K
*/
mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
if (mbase == NULL) {
printk(KERN_ERR "%s: Can't map internal config space !",
port->node->full_name);
goto fail;
}
hose->cfg_addr = mbase;
pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name,
hose->first_busno, hose->last_busno);
pr_debug(" config space mapped at: root @0x%p, other @0x%p\n",
hose->cfg_addr, hose->cfg_data);
/* Setup config space */
hose->ops = &ppc4xx_pciex_pci_ops;
port->hose = hose;
mbase = (void __iomem *)hose->cfg_addr;
if (!port->endpoint) {
/*
* Set bus numbers on our root port
*/
out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
}
/*
* OMRs are already reset, also disable PIMs
*/
out_le32(mbase + PECFG_PIMEN, 0);
/* Parse outbound mapping resources */
pci_process_bridge_OF_ranges(hose, port->node, primary);
/* Parse inbound mapping resources */
if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0)
goto fail;
/* Configure outbound ranges POMs */
ppc4xx_configure_pciex_POMs(port, hose, mbase);
/* Configure inbound ranges PIMs */
ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
/* The root complex doesn't show up if we don't set some vendor
* and device IDs into it. The defaults below are the same bogus
* one that the initial code in arch/ppc had. This can be
* overwritten by setting the "vendor-id/device-id" properties
* in the pciex node.
*/
/* Get the (optional) vendor-/device-id from the device-tree */
pval = of_get_property(port->node, "vendor-id", NULL);
if (pval) {
val = *pval;
} else {
if (!port->endpoint)
val = 0xaaa0 + port->index;
else
val = 0xeee0 + port->index;
}
out_le16(mbase + 0x200, val);
pval = of_get_property(port->node, "device-id", NULL);
if (pval) {
val = *pval;
} else {
if (!port->endpoint)
val = 0xbed0 + port->index;
else
val = 0xfed0 + port->index;
}
out_le16(mbase + 0x202, val);
if (!port->endpoint) {
/* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
out_le32(mbase + 0x208, 0x06040001);
printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
port->index);
} else {
/* Set Class Code to Processor/PPC */
out_le32(mbase + 0x208, 0x0b200001);
printk(KERN_INFO "PCIE%d: successfully set as endpoint\n",
port->index);
}
return;
fail:
if (hose)
pcibios_free_controller(hose);
if (cfg_data)
iounmap(cfg_data);
if (mbase)
iounmap(mbase);
}
static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
{
struct ppc4xx_pciex_port *port;
const u32 *pval;
int portno;
unsigned int dcrs;
const char *val;
/* First, proceed to core initialization as we assume there's
* only one PCIe core in the system
*/
if (ppc4xx_pciex_check_core_init(np))
return;
/* Get the port number from the device-tree */
pval = of_get_property(np, "port", NULL);
if (pval == NULL) {
printk(KERN_ERR "PCIE: Can't find port number for %s\n",
np->full_name);
return;
}
portno = *pval;
if (portno >= ppc4xx_pciex_port_count) {
printk(KERN_ERR "PCIE: port number out of range for %s\n",
np->full_name);
return;
}
port = &ppc4xx_pciex_ports[portno];
port->index = portno;
/*
* Check if device is enabled
*/
if (!of_device_is_available(np)) {
printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index);
return;
}
port->node = of_node_get(np);
pval = of_get_property(np, "sdr-base", NULL);
if (pval == NULL) {
printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
np->full_name);
return;
}
port->sdr_base = *pval;
/* Check if device_type property is set to "pci" or "pci-endpoint".
* Resulting from this setup this PCIe port will be configured
* as root-complex or as endpoint.
*/
val = of_get_property(port->node, "device_type", NULL);
if (!strcmp(val, "pci-endpoint")) {
port->endpoint = 1;
} else if (!strcmp(val, "pci")) {
port->endpoint = 0;
} else {
printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n",
np->full_name);
return;
}
/* Fetch config space registers address */
if (of_address_to_resource(np, 0, &port->cfg_space)) {
printk(KERN_ERR "%s: Can't get PCI-E config space !",
np->full_name);
return;
}
/* Fetch host bridge internal registers address */
if (of_address_to_resource(np, 1, &port->utl_regs)) {
printk(KERN_ERR "%s: Can't get UTL register base !",
np->full_name);
return;
}
/* Map DCRs */
dcrs = dcr_resource_start(np, 0);
if (dcrs == 0) {
printk(KERN_ERR "%s: Can't get DCR register base !",
np->full_name);
return;
}
port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
/* Initialize the port specific registers */
if (ppc4xx_pciex_port_init(port)) {
printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index);
return;
}
/* Setup the linux hose data structure */
ppc4xx_pciex_port_setup_hose(port);
}
#endif /* CONFIG_PPC4xx_PCI_EXPRESS */
static int __init ppc4xx_pci_find_bridges(void)
{
struct device_node *np;
ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0;
#ifdef CONFIG_PPC4xx_PCI_EXPRESS
for_each_compatible_node(np, NULL, "ibm,plb-pciex")
ppc4xx_probe_pciex_bridge(np);
#endif
for_each_compatible_node(np, NULL, "ibm,plb-pcix")
ppc4xx_probe_pcix_bridge(np);
for_each_compatible_node(np, NULL, "ibm,plb-pci")
ppc4xx_probe_pci_bridge(np);
return 0;
}
arch_initcall(ppc4xx_pci_find_bridges);
| gpl-2.0 |
burstlam/zte-kernel-gb | net/netfilter/xt_statistic.c | 764 | 2486 | /*
* Copyright (c) 2006 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Based on ipt_random and ipt_nth by Fabrice MARIE <fabrice@netfilter.org>.
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <linux/netfilter/xt_statistic.h>
#include <linux/netfilter/x_tables.h>
struct xt_statistic_priv {
uint32_t count;
};
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)");
MODULE_ALIAS("ipt_statistic");
MODULE_ALIAS("ip6t_statistic");
static DEFINE_SPINLOCK(nth_lock);
static bool
statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_statistic_info *info = par->matchinfo;
bool ret = info->flags & XT_STATISTIC_INVERT;
switch (info->mode) {
case XT_STATISTIC_MODE_RANDOM:
if ((net_random() & 0x7FFFFFFF) < info->u.random.probability)
ret = !ret;
break;
case XT_STATISTIC_MODE_NTH:
spin_lock_bh(&nth_lock);
if (info->master->count++ == info->u.nth.every) {
info->master->count = 0;
ret = !ret;
}
spin_unlock_bh(&nth_lock);
break;
}
return ret;
}
static int statistic_mt_check(const struct xt_mtchk_param *par)
{
struct xt_statistic_info *info = par->matchinfo;
if (info->mode > XT_STATISTIC_MODE_MAX ||
info->flags & ~XT_STATISTIC_MASK)
return -EINVAL;
info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
if (info->master == NULL)
return -ENOMEM;
info->master->count = info->u.nth.count;
return 0;
}
static void statistic_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_statistic_info *info = par->matchinfo;
kfree(info->master);
}
static struct xt_match xt_statistic_mt_reg __read_mostly = {
.name = "statistic",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = statistic_mt,
.checkentry = statistic_mt_check,
.destroy = statistic_mt_destroy,
.matchsize = sizeof(struct xt_statistic_info),
.me = THIS_MODULE,
};
static int __init statistic_mt_init(void)
{
return xt_register_match(&xt_statistic_mt_reg);
}
static void __exit statistic_mt_exit(void)
{
xt_unregister_match(&xt_statistic_mt_reg);
}
module_init(statistic_mt_init);
module_exit(statistic_mt_exit);
| gpl-2.0 |
Shkerzy/alcatel_brandy_kernel | drivers/staging/memrar/memrar_handler.c | 764 | 25559 | /*
* memrar_handler 1.0: An Intel restricted access region handler device
*
* Copyright (C) 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General
* Public License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the Free
* Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
* The full GNU General Public License is included in this
* distribution in the file called COPYING.
*
* -------------------------------------------------------------------
*
* Moorestown restricted access regions (RAR) provide isolated
* areas of main memory that are only acceessible by authorized
* devices.
*
* The Intel Moorestown RAR handler module exposes a kernel space
* RAR memory management mechanism. It is essentially a
* RAR-specific allocator.
*
* Besides providing RAR buffer management, the RAR handler also
* behaves in many ways like an OS virtual memory manager. For
* example, the RAR "handles" created by the RAR handler are
* analogous to user space virtual addresses.
*
* RAR memory itself is never accessed directly by the RAR
* handler.
*/
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include "../rar_register/rar_register.h"
#include "memrar.h"
#include "memrar_allocator.h"
#define MEMRAR_VER "1.0"
/*
* Moorestown supports three restricted access regions.
*
* We only care about the first two, video and audio. The third,
* reserved for Chaabi and the P-unit, will be handled by their
* respective drivers.
*/
#define MRST_NUM_RAR 2
/* ---------------- -------------------- ------------------- */
/**
* struct memrar_buffer_info - struct that keeps track of all RAR buffers
* @list: Linked list of memrar_buffer_info objects.
* @buffer: Core RAR buffer information.
* @refcount: Reference count.
* @owner: File handle corresponding to process that reserved the
* block of memory in RAR. This will be zero for buffers
* allocated by other drivers instead of by a user space
* process.
*
* This structure encapsulates a link list of RAR buffers, as well as
* other characteristics specific to a given list node, such as the
* reference count on the corresponding RAR buffer.
*/
struct memrar_buffer_info {
struct list_head list;
struct RAR_buffer buffer;
struct kref refcount;
struct file *owner;
};
/**
* struct memrar_rar_info - characteristics of a given RAR
* @base: Base bus address of the RAR.
* @length: Length of the RAR.
* @iobase: Virtual address of RAR mapped into kernel.
* @allocator: Allocator associated with the RAR. Note the allocator
* "capacity" may be smaller than the RAR length if the
* length is not a multiple of the configured allocator
* block size.
* @buffers: Table that keeps track of all reserved RAR buffers.
* @lock: Lock used to synchronize access to RAR-specific data
* structures.
*
* Each RAR has an associated memrar_rar_info structure that describes
* where in memory the RAR is located, how large it is, and a list of
* reserved RAR buffers inside that RAR. Each RAR also has a mutex
* associated with it to reduce lock contention when operations on
* multiple RARs are performed in parallel.
*/
struct memrar_rar_info {
dma_addr_t base;
unsigned long length;
void __iomem *iobase;
struct memrar_allocator *allocator;
struct memrar_buffer_info buffers;
struct mutex lock;
int allocated; /* True if we own this RAR */
};
/*
* Array of RAR characteristics.
*/
static struct memrar_rar_info memrars[MRST_NUM_RAR];
/* ---------------- -------------------- ------------------- */
/* Validate RAR type. */
static inline int memrar_is_valid_rar_type(u32 type)
{
return type == RAR_TYPE_VIDEO || type == RAR_TYPE_AUDIO;
}
/* Check if an address/handle falls with the given RAR memory range. */
static inline int memrar_handle_in_range(struct memrar_rar_info *rar,
u32 vaddr)
{
unsigned long const iobase = (unsigned long) (rar->iobase);
return (vaddr >= iobase && vaddr < iobase + rar->length);
}
/* Retrieve RAR information associated with the given handle. */
static struct memrar_rar_info *memrar_get_rar_info(u32 vaddr)
{
int i;
for (i = 0; i < MRST_NUM_RAR; ++i) {
struct memrar_rar_info * const rar = &memrars[i];
if (memrar_handle_in_range(rar, vaddr))
return rar;
}
return NULL;
}
/**
* memrar_get_bus address - handle to bus address
*
* Retrieve bus address from given handle.
*
* Returns address corresponding to given handle. Zero if handle is
* invalid.
*/
static dma_addr_t memrar_get_bus_address(
struct memrar_rar_info *rar,
u32 vaddr)
{
unsigned long const iobase = (unsigned long) (rar->iobase);
if (!memrar_handle_in_range(rar, vaddr))
return 0;
/*
* An assumption is made that the virtual address offset is
* the same as the bus address offset, at least based on the
* way this driver is implemented. For example, vaddr + 2 ==
* baddr + 2.
*
* @todo Is that a valid assumption?
*/
return rar->base + (vaddr - iobase);
}
/**
* memrar_get_physical_address - handle to physical address
*
* Retrieve physical address from given handle.
*
* Returns address corresponding to given handle. Zero if handle is
* invalid.
*/
static dma_addr_t memrar_get_physical_address(
struct memrar_rar_info *rar,
u32 vaddr)
{
/*
* @todo This assumes that the bus address and physical
* address are the same. That is true for Moorestown
* but not necessarily on other platforms. This
* deficiency should be addressed at some point.
*/
return memrar_get_bus_address(rar, vaddr);
}
/**
* memrar_release_block - release a block to the pool
* @kref: kref of block
*
* Core block release code. A node has hit zero references so can
* be released and the lists must be updated.
*
* Note: This code removes the node from a list. Make sure any list
* iteration is performed using list_for_each_safe().
*/
static void memrar_release_block_i(struct kref *ref)
{
/*
* Last reference is being released. Remove from the table,
* and reclaim resources.
*/
struct memrar_buffer_info * const node =
container_of(ref, struct memrar_buffer_info, refcount);
struct RAR_block_info * const user_info =
&node->buffer.info;
struct memrar_allocator * const allocator =
memrars[user_info->type].allocator;
list_del(&node->list);
memrar_allocator_free(allocator, user_info->handle);
kfree(node);
}
/**
* memrar_init_rar_resources - configure a RAR
* @rarnum: rar that has been allocated
* @devname: name of our device
*
* Initialize RAR parameters, such as bus addresses, etc and make
* the resource accessible.
*/
static int memrar_init_rar_resources(int rarnum, char const *devname)
{
/* ---- Sanity Checks ----
* 1. RAR bus addresses in both Lincroft and Langwell RAR
* registers should be the same.
* a. There's no way we can do this through IA.
*
* 2. Secure device ID in Langwell RAR registers should be set
* appropriately, e.g. only LPE DMA for the audio RAR, and
* security for the other Langwell based RAR registers.
* a. There's no way we can do this through IA.
*
* 3. Audio and video RAR registers and RAR access should be
* locked down. If not, enable RAR access control. Except
* for debugging purposes, there is no reason for them to
* be unlocked.
* a. We can only do this for the Lincroft (IA) side.
*
* @todo Should the RAR handler driver even be aware of audio
* and video RAR settings?
*/
/*
* RAR buffer block size.
*
* We choose it to be the size of a page to simplify the
* /dev/memrar mmap() implementation and usage. Otherwise
* paging is not involved once an RAR is locked down.
*/
static size_t const RAR_BLOCK_SIZE = PAGE_SIZE;
dma_addr_t low, high;
struct memrar_rar_info * const rar = &memrars[rarnum];
BUG_ON(MRST_NUM_RAR != ARRAY_SIZE(memrars));
BUG_ON(!memrar_is_valid_rar_type(rarnum));
BUG_ON(rar->allocated);
mutex_init(&rar->lock);
/*
* Initialize the process table before we reach any
* code that exit on failure since the finalization
* code requires an initialized list.
*/
INIT_LIST_HEAD(&rar->buffers.list);
if (rar_get_address(rarnum, &low, &high) != 0)
/* No RAR is available. */
return -ENODEV;
if (low == 0 || high == 0) {
rar->base = 0;
rar->length = 0;
rar->iobase = NULL;
rar->allocator = NULL;
return -ENOSPC;
}
/*
* @todo Verify that LNC and LNW RAR register contents
* addresses, security, etc are compatible and
* consistent).
*/
rar->length = high - low + 1;
/* Claim RAR memory as our own. */
if (request_mem_region(low, rar->length, devname) == NULL) {
rar->length = 0;
pr_err("%s: Unable to claim RAR[%d] memory.\n", devname, rarnum);
pr_err("%s: RAR[%d] disabled.\n", devname, rarnum);
return -EBUSY;
}
rar->base = low;
/*
* Now map it into the kernel address space.
*
* Note that the RAR memory may only be accessed by IA
* when debugging. Otherwise attempts to access the
* RAR memory when it is locked down will result in
* behavior similar to writing to /dev/null and
* reading from /dev/zero. This behavior is enforced
* by the hardware. Even if we don't access the
* memory, mapping it into the kernel provides us with
* a convenient RAR handle to bus address mapping.
*/
rar->iobase = ioremap_nocache(rar->base, rar->length);
if (rar->iobase == NULL) {
pr_err("%s: Unable to map RAR memory.\n", devname);
release_mem_region(low, rar->length);
return -ENOMEM;
}
/* Initialize corresponding memory allocator. */
rar->allocator = memrar_create_allocator((unsigned long) rar->iobase,
rar->length, RAR_BLOCK_SIZE);
if (rar->allocator == NULL) {
iounmap(rar->iobase);
release_mem_region(low, rar->length);
return -ENOMEM;
}
pr_info("%s: BRAR[%d] bus address range = [0x%lx, 0x%lx]\n",
devname, rarnum, (unsigned long) low, (unsigned long) high);
pr_info("%s: BRAR[%d] size = %zu KiB\n",
devname, rarnum, rar->allocator->capacity / 1024);
rar->allocated = 1;
return 0;
}
/**
* memrar_fini_rar_resources - free up RAR resources
*
* Finalize RAR resources. Free up the resource tables, hand the memory
* back to the kernel, unmap the device and release the address space.
*/
static void memrar_fini_rar_resources(void)
{
int z;
struct memrar_buffer_info *pos;
struct memrar_buffer_info *tmp;
/*
* @todo Do we need to hold a lock at this point in time?
* (module initialization failure or exit?)
*/
for (z = MRST_NUM_RAR; z-- != 0; ) {
struct memrar_rar_info * const rar = &memrars[z];
if (!rar->allocated)
continue;
/* Clean up remaining resources. */
list_for_each_entry_safe(pos,
tmp,
&rar->buffers.list,
list) {
kref_put(&pos->refcount, memrar_release_block_i);
}
memrar_destroy_allocator(rar->allocator);
rar->allocator = NULL;
iounmap(rar->iobase);
release_mem_region(rar->base, rar->length);
rar->iobase = NULL;
rar->base = 0;
rar->length = 0;
unregister_rar(z);
}
}
/**
* memrar_reserve_block - handle an allocation request
* @request: block being requested
* @filp: owner it is tied to
*
* Allocate a block of the requested RAR. If successful return the
* request object filled in and zero, if not report an error code
*/
static long memrar_reserve_block(struct RAR_buffer *request,
struct file *filp)
{
struct RAR_block_info * const rinfo = &request->info;
struct RAR_buffer *buffer;
struct memrar_buffer_info *buffer_info;
u32 handle;
struct memrar_rar_info *rar = NULL;
/* Prevent array overflow. */
if (!memrar_is_valid_rar_type(rinfo->type))
return -EINVAL;
rar = &memrars[rinfo->type];
if (!rar->allocated)
return -ENODEV;
/* Reserve memory in RAR. */
handle = memrar_allocator_alloc(rar->allocator, rinfo->size);
if (handle == 0)
return -ENOMEM;
buffer_info = kmalloc(sizeof(*buffer_info), GFP_KERNEL);
if (buffer_info == NULL) {
memrar_allocator_free(rar->allocator, handle);
return -ENOMEM;
}
buffer = &buffer_info->buffer;
buffer->info.type = rinfo->type;
buffer->info.size = rinfo->size;
/* Memory handle corresponding to the bus address. */
buffer->info.handle = handle;
buffer->bus_address = memrar_get_bus_address(rar, handle);
/*
* Keep track of owner so that we can later cleanup if
* necessary.
*/
buffer_info->owner = filp;
kref_init(&buffer_info->refcount);
mutex_lock(&rar->lock);
list_add(&buffer_info->list, &rar->buffers.list);
mutex_unlock(&rar->lock);
rinfo->handle = buffer->info.handle;
request->bus_address = buffer->bus_address;
return 0;
}
/**
* memrar_release_block - release a RAR block
* @addr: address in RAR space
*
* Release a previously allocated block. Releases act on complete
* blocks, partially freeing a block is not supported
*/
static long memrar_release_block(u32 addr)
{
struct memrar_buffer_info *pos;
struct memrar_buffer_info *tmp;
struct memrar_rar_info * const rar = memrar_get_rar_info(addr);
long result = -EINVAL;
if (rar == NULL)
return -ENOENT;
mutex_lock(&rar->lock);
/*
* Iterate through the buffer list to find the corresponding
* buffer to be released.
*/
list_for_each_entry_safe(pos,
tmp,
&rar->buffers.list,
list) {
struct RAR_block_info * const info =
&pos->buffer.info;
/*
* Take into account handle offsets that may have been
* added to the base handle, such as in the following
* scenario:
*
* u32 handle = base + offset;
* rar_handle_to_bus(handle);
* rar_release(handle);
*/
if (addr >= info->handle
&& addr < (info->handle + info->size)
&& memrar_is_valid_rar_type(info->type)) {
kref_put(&pos->refcount, memrar_release_block_i);
result = 0;
break;
}
}
mutex_unlock(&rar->lock);
return result;
}
/**
* memrar_get_stats - read statistics for a RAR
* @r: statistics to be filled in
*
* Returns the statistics data for the RAR, or an error code if
* the request cannot be completed
*/
static long memrar_get_stat(struct RAR_stat *r)
{
struct memrar_allocator *allocator;
if (!memrar_is_valid_rar_type(r->type))
return -EINVAL;
if (!memrars[r->type].allocated)
return -ENODEV;
allocator = memrars[r->type].allocator;
BUG_ON(allocator == NULL);
/*
* Allocator capacity doesn't change over time. No
* need to synchronize.
*/
r->capacity = allocator->capacity;
mutex_lock(&allocator->lock);
r->largest_block_size = allocator->largest_free_area;
mutex_unlock(&allocator->lock);
return 0;
}
/**
* memrar_ioctl - ioctl callback
* @filp: file issuing the request
* @cmd: command
* @arg: pointer to control information
*
* Perform one of the ioctls supported by the memrar device
*/
static long memrar_ioctl(struct file *filp,
unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
long result = 0;
struct RAR_buffer buffer;
struct RAR_block_info * const request = &buffer.info;
struct RAR_stat rar_info;
u32 rar_handle;
switch (cmd) {
case RAR_HANDLER_RESERVE:
if (copy_from_user(request,
argp,
sizeof(*request)))
return -EFAULT;
result = memrar_reserve_block(&buffer, filp);
if (result != 0)
return result;
return copy_to_user(argp, request, sizeof(*request));
case RAR_HANDLER_RELEASE:
if (copy_from_user(&rar_handle,
argp,
sizeof(rar_handle)))
return -EFAULT;
return memrar_release_block(rar_handle);
case RAR_HANDLER_STAT:
if (copy_from_user(&rar_info,
argp,
sizeof(rar_info)))
return -EFAULT;
/*
* Populate the RAR_stat structure based on the RAR
* type given by the user
*/
if (memrar_get_stat(&rar_info) != 0)
return -EINVAL;
/*
* @todo Do we need to verify destination pointer
* "argp" is non-zero? Is that already done by
* copy_to_user()?
*/
return copy_to_user(argp,
&rar_info,
sizeof(rar_info)) ? -EFAULT : 0;
default:
return -ENOTTY;
}
return 0;
}
/**
* memrar_mmap - mmap helper for deubgging
* @filp: handle doing the mapping
* @vma: memory area
*
* Support the mmap operation on the RAR space for debugging systems
* when the memory is not locked down.
*/
static int memrar_mmap(struct file *filp, struct vm_area_struct *vma)
{
/*
* This mmap() implementation is predominantly useful for
* debugging since the CPU will be prevented from accessing
* RAR memory by the hardware when RAR is properly locked
* down.
*
* In order for this implementation to be useful RAR memory
* must be not be locked down. However, we only want to do
* that when debugging. DO NOT leave RAR memory unlocked in a
* deployed device that utilizes RAR.
*/
size_t const size = vma->vm_end - vma->vm_start;
/* Users pass the RAR handle as the mmap() offset parameter. */
unsigned long const handle = vma->vm_pgoff << PAGE_SHIFT;
struct memrar_rar_info * const rar = memrar_get_rar_info(handle);
unsigned long pfn;
/* Only allow priviledged apps to go poking around this way */
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
/* Invalid RAR handle or size passed to mmap(). */
if (rar == NULL
|| handle == 0
|| size > (handle - (unsigned long) rar->iobase))
return -EINVAL;
/*
* Retrieve physical address corresponding to the RAR handle,
* and convert it to a page frame.
*/
pfn = memrar_get_physical_address(rar, handle) >> PAGE_SHIFT;
pr_debug("memrar: mapping RAR range [0x%lx, 0x%lx) into user space.\n",
handle,
handle + size);
/*
* Map RAR memory into user space. This is really only useful
* for debugging purposes since the memory won't be
* accessible, i.e. reads return zero and writes are ignored,
* when RAR access control is enabled.
*/
if (remap_pfn_range(vma,
vma->vm_start,
pfn,
size,
vma->vm_page_prot))
return -EAGAIN;
/* vma->vm_ops = &memrar_mem_ops; */
return 0;
}
/**
* memrar_open - device open method
* @inode: inode to open
* @filp: file handle
*
* As we support multiple arbitary opens there is no work to be done
* really.
*/
static int memrar_open(struct inode *inode, struct file *filp)
{
nonseekable_open(inode, filp);
return 0;
}
/**
* memrar_release - close method for miscev
* @inode: inode of device
* @filp: handle that is going away
*
* Free up all the regions that belong to this file handle. We use
* the handle as a natural Linux style 'lifetime' indicator and to
* ensure resources are not leaked when their owner explodes in an
* unplanned fashion.
*/
static int memrar_release(struct inode *inode, struct file *filp)
{
/* Free all regions associated with the given file handle. */
struct memrar_buffer_info *pos;
struct memrar_buffer_info *tmp;
int z;
for (z = 0; z != MRST_NUM_RAR; ++z) {
struct memrar_rar_info * const rar = &memrars[z];
mutex_lock(&rar->lock);
list_for_each_entry_safe(pos,
tmp,
&rar->buffers.list,
list) {
if (filp == pos->owner)
kref_put(&pos->refcount,
memrar_release_block_i);
}
mutex_unlock(&rar->lock);
}
return 0;
}
/**
* rar_reserve - reserve RAR memory
* @buffers: buffers to reserve
* @count: number wanted
*
* Reserve a series of buffers in the RAR space. Returns the number of
* buffers successfully allocated
*/
size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
{
struct RAR_buffer * const end =
(buffers == NULL ? buffers : buffers + count);
struct RAR_buffer *i;
size_t reserve_count = 0;
for (i = buffers; i != end; ++i) {
if (memrar_reserve_block(i, NULL) == 0)
++reserve_count;
else
i->bus_address = 0;
}
return reserve_count;
}
EXPORT_SYMBOL(rar_reserve);
/**
* rar_release - return RAR buffers
* @buffers: buffers to release
* @size: size of released block
*
* Return a set of buffers to the RAR pool
*/
size_t rar_release(struct RAR_buffer *buffers, size_t count)
{
struct RAR_buffer * const end =
(buffers == NULL ? buffers : buffers + count);
struct RAR_buffer *i;
size_t release_count = 0;
for (i = buffers; i != end; ++i) {
u32 * const handle = &i->info.handle;
if (memrar_release_block(*handle) == 0) {
/*
* @todo We assume we should do this each time
* the ref count is decremented. Should
* we instead only do this when the ref
* count has dropped to zero, and the
* buffer has been completely
* released/unmapped?
*/
*handle = 0;
++release_count;
}
}
return release_count;
}
EXPORT_SYMBOL(rar_release);
/**
* rar_handle_to_bus - RAR to bus address
* @buffers: RAR buffer structure
* @count: number of buffers to convert
*
* Turn a list of RAR handle mappings into actual bus addresses. Note
* that when the device is locked down the bus addresses in question
* are not CPU accessible.
*/
size_t rar_handle_to_bus(struct RAR_buffer *buffers, size_t count)
{
struct RAR_buffer * const end =
(buffers == NULL ? buffers : buffers + count);
struct RAR_buffer *i;
struct memrar_buffer_info *pos;
size_t conversion_count = 0;
/*
* Find all bus addresses corresponding to the given handles.
*
* @todo Not liking this nested loop. Optimize.
*/
for (i = buffers; i != end; ++i) {
struct memrar_rar_info * const rar =
memrar_get_rar_info(i->info.handle);
/*
* Check if we have a bogus handle, and then continue
* with remaining buffers.
*/
if (rar == NULL) {
i->bus_address = 0;
continue;
}
mutex_lock(&rar->lock);
list_for_each_entry(pos, &rar->buffers.list, list) {
struct RAR_block_info * const user_info =
&pos->buffer.info;
/*
* Take into account handle offsets that may
* have been added to the base handle, such as
* in the following scenario:
*
* u32 handle = base + offset;
* rar_handle_to_bus(handle);
*/
if (i->info.handle >= user_info->handle
&& i->info.handle < (user_info->handle
+ user_info->size)) {
u32 const offset =
i->info.handle - user_info->handle;
i->info.type = user_info->type;
i->info.size = user_info->size - offset;
i->bus_address =
pos->buffer.bus_address
+ offset;
/* Increment the reference count. */
kref_get(&pos->refcount);
++conversion_count;
break;
} else {
i->bus_address = 0;
}
}
mutex_unlock(&rar->lock);
}
return conversion_count;
}
EXPORT_SYMBOL(rar_handle_to_bus);
static const struct file_operations memrar_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = memrar_ioctl,
.mmap = memrar_mmap,
.open = memrar_open,
.release = memrar_release,
};
static struct miscdevice memrar_miscdev = {
.minor = MISC_DYNAMIC_MINOR, /* dynamic allocation */
.name = "memrar", /* /dev/memrar */
.fops = &memrar_fops
};
static char const banner[] __initdata =
KERN_INFO
"Intel RAR Handler: " MEMRAR_VER " initialized.\n";
/**
* memrar_registration_callback - RAR obtained
* @rar: RAR number
*
* We have been granted ownership of the RAR. Add it to our memory
* management tables
*/
static int memrar_registration_callback(unsigned long rar)
{
/*
* We initialize the RAR parameters early on so that we can
* discontinue memrar device initialization and registration
* if suitably configured RARs are not available.
*/
return memrar_init_rar_resources(rar, memrar_miscdev.name);
}
/**
* memrar_init - initialise RAR support
*
* Initialise support for RAR handlers. This may get loaded before
* the RAR support is activated, but the callbacks on the registration
* will handle that situation for us anyway.
*/
static int __init memrar_init(void)
{
int err;
printk(banner);
err = misc_register(&memrar_miscdev);
if (err)
return err;
/* Now claim the two RARs we want */
err = register_rar(0, memrar_registration_callback, 0);
if (err)
goto fail;
err = register_rar(1, memrar_registration_callback, 1);
if (err == 0)
return 0;
/* It is possible rar 0 registered and allocated resources then rar 1
failed so do a full resource free */
memrar_fini_rar_resources();
fail:
misc_deregister(&memrar_miscdev);
return err;
}
/**
* memrar_exit - unregister and unload
*
* Unregister the device and then unload any mappings and release
* the RAR resources
*/
static void __exit memrar_exit(void)
{
misc_deregister(&memrar_miscdev);
memrar_fini_rar_resources();
}
module_init(memrar_init);
module_exit(memrar_exit);
MODULE_AUTHOR("Ossama Othman <ossama.othman@intel.com>");
MODULE_DESCRIPTION("Intel Restricted Access Region Handler");
MODULE_LICENSE("GPL");
MODULE_VERSION(MEMRAR_VER);
/*
Local Variables:
c-file-style: "linux"
End:
*/
| gpl-2.0 |
burstlam/zte-blade-35 | net/sunrpc/auth_gss/gss_krb5_mech.c | 764 | 20001 | /*
* linux/net/sunrpc/gss_krb5_mech.c
*
* Copyright (c) 2001-2008 The Regents of the University of Michigan.
* All rights reserved.
*
* Andy Adamson <andros@umich.edu>
* J. Bruce Fields <bfields@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/gss_krb5.h>
#include <linux/sunrpc/xdr.h>
#include <linux/crypto.h>
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
static struct gss_api_mech gss_kerberos_mech; /* forward declaration */
static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
/*
* DES (All DES enctypes are mapped to the same gss functionality)
*/
{
.etype = ENCTYPE_DES_CBC_RAW,
.ctype = CKSUMTYPE_RSA_MD5,
.name = "des-cbc-crc",
.encrypt_name = "cbc(des)",
.cksum_name = "md5",
.encrypt = krb5_encrypt,
.decrypt = krb5_decrypt,
.mk_key = NULL,
.signalg = SGN_ALG_DES_MAC_MD5,
.sealalg = SEAL_ALG_DES,
.keybytes = 7,
.keylength = 8,
.blocksize = 8,
.conflen = 8,
.cksumlength = 8,
.keyed_cksum = 0,
},
/*
* RC4-HMAC
*/
{
.etype = ENCTYPE_ARCFOUR_HMAC,
.ctype = CKSUMTYPE_HMAC_MD5_ARCFOUR,
.name = "rc4-hmac",
.encrypt_name = "ecb(arc4)",
.cksum_name = "hmac(md5)",
.encrypt = krb5_encrypt,
.decrypt = krb5_decrypt,
.mk_key = NULL,
.signalg = SGN_ALG_HMAC_MD5,
.sealalg = SEAL_ALG_MICROSOFT_RC4,
.keybytes = 16,
.keylength = 16,
.blocksize = 1,
.conflen = 8,
.cksumlength = 8,
.keyed_cksum = 1,
},
/*
* 3DES
*/
{
.etype = ENCTYPE_DES3_CBC_RAW,
.ctype = CKSUMTYPE_HMAC_SHA1_DES3,
.name = "des3-hmac-sha1",
.encrypt_name = "cbc(des3_ede)",
.cksum_name = "hmac(sha1)",
.encrypt = krb5_encrypt,
.decrypt = krb5_decrypt,
.mk_key = gss_krb5_des3_make_key,
.signalg = SGN_ALG_HMAC_SHA1_DES3_KD,
.sealalg = SEAL_ALG_DES3KD,
.keybytes = 21,
.keylength = 24,
.blocksize = 8,
.conflen = 8,
.cksumlength = 20,
.keyed_cksum = 1,
},
/*
* AES128
*/
{
.etype = ENCTYPE_AES128_CTS_HMAC_SHA1_96,
.ctype = CKSUMTYPE_HMAC_SHA1_96_AES128,
.name = "aes128-cts",
.encrypt_name = "cts(cbc(aes))",
.cksum_name = "hmac(sha1)",
.encrypt = krb5_encrypt,
.decrypt = krb5_decrypt,
.mk_key = gss_krb5_aes_make_key,
.encrypt_v2 = gss_krb5_aes_encrypt,
.decrypt_v2 = gss_krb5_aes_decrypt,
.signalg = -1,
.sealalg = -1,
.keybytes = 16,
.keylength = 16,
.blocksize = 16,
.conflen = 16,
.cksumlength = 12,
.keyed_cksum = 1,
},
/*
* AES256
*/
{
.etype = ENCTYPE_AES256_CTS_HMAC_SHA1_96,
.ctype = CKSUMTYPE_HMAC_SHA1_96_AES256,
.name = "aes256-cts",
.encrypt_name = "cts(cbc(aes))",
.cksum_name = "hmac(sha1)",
.encrypt = krb5_encrypt,
.decrypt = krb5_decrypt,
.mk_key = gss_krb5_aes_make_key,
.encrypt_v2 = gss_krb5_aes_encrypt,
.decrypt_v2 = gss_krb5_aes_decrypt,
.signalg = -1,
.sealalg = -1,
.keybytes = 32,
.keylength = 32,
.blocksize = 16,
.conflen = 16,
.cksumlength = 12,
.keyed_cksum = 1,
},
};
static const int num_supported_enctypes =
ARRAY_SIZE(supported_gss_krb5_enctypes);
static int
supported_gss_krb5_enctype(int etype)
{
int i;
for (i = 0; i < num_supported_enctypes; i++)
if (supported_gss_krb5_enctypes[i].etype == etype)
return 1;
return 0;
}
static const struct gss_krb5_enctype *
get_gss_krb5_enctype(int etype)
{
int i;
for (i = 0; i < num_supported_enctypes; i++)
if (supported_gss_krb5_enctypes[i].etype == etype)
return &supported_gss_krb5_enctypes[i];
return NULL;
}
static const void *
simple_get_bytes(const void *p, const void *end, void *res, int len)
{
const void *q = (const void *)((const char *)p + len);
if (unlikely(q > end || q < p))
return ERR_PTR(-EFAULT);
memcpy(res, p, len);
return q;
}
static const void *
simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
{
const void *q;
unsigned int len;
p = simple_get_bytes(p, end, &len, sizeof(len));
if (IS_ERR(p))
return p;
q = (const void *)((const char *)p + len);
if (unlikely(q > end || q < p))
return ERR_PTR(-EFAULT);
res->data = kmemdup(p, len, GFP_NOFS);
if (unlikely(res->data == NULL))
return ERR_PTR(-ENOMEM);
res->len = len;
return q;
}
static inline const void *
get_key(const void *p, const void *end,
struct krb5_ctx *ctx, struct crypto_blkcipher **res)
{
struct xdr_netobj key;
int alg;
p = simple_get_bytes(p, end, &alg, sizeof(alg));
if (IS_ERR(p))
goto out_err;
switch (alg) {
case ENCTYPE_DES_CBC_CRC:
case ENCTYPE_DES_CBC_MD4:
case ENCTYPE_DES_CBC_MD5:
/* Map all these key types to ENCTYPE_DES_CBC_RAW */
alg = ENCTYPE_DES_CBC_RAW;
break;
}
if (!supported_gss_krb5_enctype(alg)) {
printk(KERN_WARNING "gss_kerberos_mech: unsupported "
"encryption key algorithm %d\n", alg);
goto out_err;
}
p = simple_get_netobj(p, end, &key);
if (IS_ERR(p))
goto out_err;
*res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(*res)) {
printk(KERN_WARNING "gss_kerberos_mech: unable to initialize "
"crypto algorithm %s\n", ctx->gk5e->encrypt_name);
*res = NULL;
goto out_err_free_key;
}
if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
printk(KERN_WARNING "gss_kerberos_mech: error setting key for "
"crypto algorithm %s\n", ctx->gk5e->encrypt_name);
goto out_err_free_tfm;
}
kfree(key.data);
return p;
out_err_free_tfm:
crypto_free_blkcipher(*res);
out_err_free_key:
kfree(key.data);
p = ERR_PTR(-EINVAL);
out_err:
return p;
}
static int
gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
{
int tmp;
p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
if (IS_ERR(p))
goto out_err;
/* Old format supports only DES! Any other enctype uses new format */
ctx->enctype = ENCTYPE_DES_CBC_RAW;
ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
if (ctx->gk5e == NULL)
goto out_err;
/* The downcall format was designed before we completely understood
* the uses of the context fields; so it includes some stuff we
* just give some minimal sanity-checking, and some we ignore
* completely (like the next twenty bytes): */
if (unlikely(p + 20 > end || p + 20 < p))
goto out_err;
p += 20;
p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
if (IS_ERR(p))
goto out_err;
if (tmp != SGN_ALG_DES_MAC_MD5) {
p = ERR_PTR(-ENOSYS);
goto out_err;
}
p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
if (IS_ERR(p))
goto out_err;
if (tmp != SEAL_ALG_DES) {
p = ERR_PTR(-ENOSYS);
goto out_err;
}
p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
if (IS_ERR(p))
goto out_err;
p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send));
if (IS_ERR(p))
goto out_err;
p = simple_get_netobj(p, end, &ctx->mech_used);
if (IS_ERR(p))
goto out_err;
p = get_key(p, end, ctx, &ctx->enc);
if (IS_ERR(p))
goto out_err_free_mech;
p = get_key(p, end, ctx, &ctx->seq);
if (IS_ERR(p))
goto out_err_free_key1;
if (p != end) {
p = ERR_PTR(-EFAULT);
goto out_err_free_key2;
}
return 0;
out_err_free_key2:
crypto_free_blkcipher(ctx->seq);
out_err_free_key1:
crypto_free_blkcipher(ctx->enc);
out_err_free_mech:
kfree(ctx->mech_used.data);
out_err:
return PTR_ERR(p);
}
struct crypto_blkcipher *
context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
{
struct crypto_blkcipher *cp;
cp = crypto_alloc_blkcipher(cname, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(cp)) {
dprintk("gss_kerberos_mech: unable to initialize "
"crypto algorithm %s\n", cname);
return NULL;
}
if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) {
dprintk("gss_kerberos_mech: error setting key for "
"crypto algorithm %s\n", cname);
crypto_free_blkcipher(cp);
return NULL;
}
return cp;
}
static inline void
set_cdata(u8 cdata[GSS_KRB5_K5CLENGTH], u32 usage, u8 seed)
{
cdata[0] = (usage>>24)&0xff;
cdata[1] = (usage>>16)&0xff;
cdata[2] = (usage>>8)&0xff;
cdata[3] = usage&0xff;
cdata[4] = seed;
}
static int
context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask)
{
struct xdr_netobj c, keyin, keyout;
u8 cdata[GSS_KRB5_K5CLENGTH];
u32 err;
c.len = GSS_KRB5_K5CLENGTH;
c.data = cdata;
keyin.data = ctx->Ksess;
keyin.len = ctx->gk5e->keylength;
keyout.len = ctx->gk5e->keylength;
/* seq uses the raw key */
ctx->seq = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
ctx->Ksess);
if (ctx->seq == NULL)
goto out_err;
ctx->enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
ctx->Ksess);
if (ctx->enc == NULL)
goto out_free_seq;
/* derive cksum */
set_cdata(cdata, KG_USAGE_SIGN, KEY_USAGE_SEED_CHECKSUM);
keyout.data = ctx->cksum;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving cksum key\n",
__func__, err);
goto out_free_enc;
}
return 0;
out_free_enc:
crypto_free_blkcipher(ctx->enc);
out_free_seq:
crypto_free_blkcipher(ctx->seq);
out_err:
return -EINVAL;
}
/*
* Note that RC4 depends on deriving keys using the sequence
* number or the checksum of a token. Therefore, the final keys
* cannot be calculated until the token is being constructed!
*/
static int
context_derive_keys_rc4(struct krb5_ctx *ctx)
{
struct crypto_hash *hmac;
char sigkeyconstant[] = "signaturekey";
int slen = strlen(sigkeyconstant) + 1; /* include null terminator */
struct hash_desc desc;
struct scatterlist sg[1];
int err;
dprintk("RPC: %s: entered\n", __func__);
/*
* derive cksum (aka Ksign) key
*/
hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac)) {
dprintk("%s: error %ld allocating hash '%s'\n",
__func__, PTR_ERR(hmac), ctx->gk5e->cksum_name);
err = PTR_ERR(hmac);
goto out_err;
}
err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength);
if (err)
goto out_err_free_hmac;
sg_init_table(sg, 1);
sg_set_buf(sg, sigkeyconstant, slen);
desc.tfm = hmac;
desc.flags = 0;
err = crypto_hash_init(&desc);
if (err)
goto out_err_free_hmac;
err = crypto_hash_digest(&desc, sg, slen, ctx->cksum);
if (err)
goto out_err_free_hmac;
/*
* allocate hash, and blkciphers for data and seqnum encryption
*/
ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->enc)) {
err = PTR_ERR(ctx->enc);
goto out_err_free_hmac;
}
ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->seq)) {
crypto_free_blkcipher(ctx->enc);
err = PTR_ERR(ctx->seq);
goto out_err_free_hmac;
}
dprintk("RPC: %s: returning success\n", __func__);
err = 0;
out_err_free_hmac:
crypto_free_hash(hmac);
out_err:
dprintk("RPC: %s: returning %d\n", __func__, err);
return err;
}
static int
context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
{
struct xdr_netobj c, keyin, keyout;
u8 cdata[GSS_KRB5_K5CLENGTH];
u32 err;
c.len = GSS_KRB5_K5CLENGTH;
c.data = cdata;
keyin.data = ctx->Ksess;
keyin.len = ctx->gk5e->keylength;
keyout.len = ctx->gk5e->keylength;
/* initiator seal encryption */
set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_ENCRYPTION);
keyout.data = ctx->initiator_seal;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving initiator_seal key\n",
__func__, err);
goto out_err;
}
ctx->initiator_enc = context_v2_alloc_cipher(ctx,
ctx->gk5e->encrypt_name,
ctx->initiator_seal);
if (ctx->initiator_enc == NULL)
goto out_err;
/* acceptor seal encryption */
set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_ENCRYPTION);
keyout.data = ctx->acceptor_seal;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving acceptor_seal key\n",
__func__, err);
goto out_free_initiator_enc;
}
ctx->acceptor_enc = context_v2_alloc_cipher(ctx,
ctx->gk5e->encrypt_name,
ctx->acceptor_seal);
if (ctx->acceptor_enc == NULL)
goto out_free_initiator_enc;
/* initiator sign checksum */
set_cdata(cdata, KG_USAGE_INITIATOR_SIGN, KEY_USAGE_SEED_CHECKSUM);
keyout.data = ctx->initiator_sign;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving initiator_sign key\n",
__func__, err);
goto out_free_acceptor_enc;
}
/* acceptor sign checksum */
set_cdata(cdata, KG_USAGE_ACCEPTOR_SIGN, KEY_USAGE_SEED_CHECKSUM);
keyout.data = ctx->acceptor_sign;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving acceptor_sign key\n",
__func__, err);
goto out_free_acceptor_enc;
}
/* initiator seal integrity */
set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_INTEGRITY);
keyout.data = ctx->initiator_integ;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving initiator_integ key\n",
__func__, err);
goto out_free_acceptor_enc;
}
/* acceptor seal integrity */
set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_INTEGRITY);
keyout.data = ctx->acceptor_integ;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving acceptor_integ key\n",
__func__, err);
goto out_free_acceptor_enc;
}
switch (ctx->enctype) {
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
ctx->initiator_enc_aux =
context_v2_alloc_cipher(ctx, "cbc(aes)",
ctx->initiator_seal);
if (ctx->initiator_enc_aux == NULL)
goto out_free_acceptor_enc;
ctx->acceptor_enc_aux =
context_v2_alloc_cipher(ctx, "cbc(aes)",
ctx->acceptor_seal);
if (ctx->acceptor_enc_aux == NULL) {
crypto_free_blkcipher(ctx->initiator_enc_aux);
goto out_free_acceptor_enc;
}
}
return 0;
out_free_acceptor_enc:
crypto_free_blkcipher(ctx->acceptor_enc);
out_free_initiator_enc:
crypto_free_blkcipher(ctx->initiator_enc);
out_err:
return -EINVAL;
}
static int
gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
gfp_t gfp_mask)
{
int keylen;
p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags));
if (IS_ERR(p))
goto out_err;
ctx->initiate = ctx->flags & KRB5_CTX_FLAG_INITIATOR;
p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
if (IS_ERR(p))
goto out_err;
p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64));
if (IS_ERR(p))
goto out_err;
/* set seq_send for use by "older" enctypes */
ctx->seq_send = ctx->seq_send64;
if (ctx->seq_send64 != ctx->seq_send) {
dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
(long unsigned)ctx->seq_send64, ctx->seq_send);
goto out_err;
}
p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype));
if (IS_ERR(p))
goto out_err;
/* Map ENCTYPE_DES3_CBC_SHA1 to ENCTYPE_DES3_CBC_RAW */
if (ctx->enctype == ENCTYPE_DES3_CBC_SHA1)
ctx->enctype = ENCTYPE_DES3_CBC_RAW;
ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
if (ctx->gk5e == NULL) {
dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n",
ctx->enctype);
p = ERR_PTR(-EINVAL);
goto out_err;
}
keylen = ctx->gk5e->keylength;
p = simple_get_bytes(p, end, ctx->Ksess, keylen);
if (IS_ERR(p))
goto out_err;
if (p != end) {
p = ERR_PTR(-EINVAL);
goto out_err;
}
ctx->mech_used.data = kmemdup(gss_kerberos_mech.gm_oid.data,
gss_kerberos_mech.gm_oid.len, gfp_mask);
if (unlikely(ctx->mech_used.data == NULL)) {
p = ERR_PTR(-ENOMEM);
goto out_err;
}
ctx->mech_used.len = gss_kerberos_mech.gm_oid.len;
switch (ctx->enctype) {
case ENCTYPE_DES3_CBC_RAW:
return context_derive_keys_des3(ctx, gfp_mask);
case ENCTYPE_ARCFOUR_HMAC:
return context_derive_keys_rc4(ctx);
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
return context_derive_keys_new(ctx, gfp_mask);
default:
return -EINVAL;
}
out_err:
return PTR_ERR(p);
}
static int
gss_import_sec_context_kerberos(const void *p, size_t len,
struct gss_ctx *ctx_id,
gfp_t gfp_mask)
{
const void *end = (const void *)((const char *)p + len);
struct krb5_ctx *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), gfp_mask);
if (ctx == NULL)
return -ENOMEM;
if (len == 85)
ret = gss_import_v1_context(p, end, ctx);
else
ret = gss_import_v2_context(p, end, ctx, gfp_mask);
if (ret == 0)
ctx_id->internal_ctx_id = ctx;
else
kfree(ctx);
dprintk("RPC: %s: returning %d\n", __func__, ret);
return ret;
}
static void
gss_delete_sec_context_kerberos(void *internal_ctx) {
struct krb5_ctx *kctx = internal_ctx;
crypto_free_blkcipher(kctx->seq);
crypto_free_blkcipher(kctx->enc);
crypto_free_blkcipher(kctx->acceptor_enc);
crypto_free_blkcipher(kctx->initiator_enc);
crypto_free_blkcipher(kctx->acceptor_enc_aux);
crypto_free_blkcipher(kctx->initiator_enc_aux);
kfree(kctx->mech_used.data);
kfree(kctx);
}
static const struct gss_api_ops gss_kerberos_ops = {
.gss_import_sec_context = gss_import_sec_context_kerberos,
.gss_get_mic = gss_get_mic_kerberos,
.gss_verify_mic = gss_verify_mic_kerberos,
.gss_wrap = gss_wrap_kerberos,
.gss_unwrap = gss_unwrap_kerberos,
.gss_delete_sec_context = gss_delete_sec_context_kerberos,
};
static struct pf_desc gss_kerberos_pfs[] = {
[0] = {
.pseudoflavor = RPC_AUTH_GSS_KRB5,
.service = RPC_GSS_SVC_NONE,
.name = "krb5",
},
[1] = {
.pseudoflavor = RPC_AUTH_GSS_KRB5I,
.service = RPC_GSS_SVC_INTEGRITY,
.name = "krb5i",
},
[2] = {
.pseudoflavor = RPC_AUTH_GSS_KRB5P,
.service = RPC_GSS_SVC_PRIVACY,
.name = "krb5p",
},
};
static struct gss_api_mech gss_kerberos_mech = {
.gm_name = "krb5",
.gm_owner = THIS_MODULE,
.gm_oid = {9, (void *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x02"},
.gm_ops = &gss_kerberos_ops,
.gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
.gm_pfs = gss_kerberos_pfs,
.gm_upcall_enctypes = "enctypes=18,17,16,23,3,1,2 ",
};
static int __init init_kerberos_module(void)
{
int status;
status = gss_mech_register(&gss_kerberos_mech);
if (status)
printk("Failed to register kerberos gss mechanism!\n");
return status;
}
static void __exit cleanup_kerberos_module(void)
{
gss_mech_unregister(&gss_kerberos_mech);
}
MODULE_LICENSE("GPL");
module_init(init_kerberos_module);
module_exit(cleanup_kerberos_module);
| gpl-2.0 |
arpith20/ubuntu-vivid | drivers/leds/leds-lm3530.c | 1020 | 12991 | /*
* Copyright (C) 2011 ST-Ericsson SA.
* Copyright (C) 2009 Motorola, Inc.
*
* License Terms: GNU General Public License v2
*
* Simple driver for National Semiconductor LM3530 Backlight driver chip
*
* Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
* based on leds-lm3530.c by Dan Murphy <D.Murphy@motorola.com>
*/
#include <linux/i2c.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/led-lm3530.h>
#include <linux/types.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#define LM3530_LED_DEV "lcd-backlight"
#define LM3530_NAME "lm3530-led"
#define LM3530_GEN_CONFIG 0x10
#define LM3530_ALS_CONFIG 0x20
#define LM3530_BRT_RAMP_RATE 0x30
#define LM3530_ALS_IMP_SELECT 0x41
#define LM3530_BRT_CTRL_REG 0xA0
#define LM3530_ALS_ZB0_REG 0x60
#define LM3530_ALS_ZB1_REG 0x61
#define LM3530_ALS_ZB2_REG 0x62
#define LM3530_ALS_ZB3_REG 0x63
#define LM3530_ALS_Z0T_REG 0x70
#define LM3530_ALS_Z1T_REG 0x71
#define LM3530_ALS_Z2T_REG 0x72
#define LM3530_ALS_Z3T_REG 0x73
#define LM3530_ALS_Z4T_REG 0x74
#define LM3530_REG_MAX 14
/* General Control Register */
#define LM3530_EN_I2C_SHIFT (0)
#define LM3530_RAMP_LAW_SHIFT (1)
#define LM3530_MAX_CURR_SHIFT (2)
#define LM3530_EN_PWM_SHIFT (5)
#define LM3530_PWM_POL_SHIFT (6)
#define LM3530_EN_PWM_SIMPLE_SHIFT (7)
#define LM3530_ENABLE_I2C (1 << LM3530_EN_I2C_SHIFT)
#define LM3530_ENABLE_PWM (1 << LM3530_EN_PWM_SHIFT)
#define LM3530_POL_LOW (1 << LM3530_PWM_POL_SHIFT)
#define LM3530_ENABLE_PWM_SIMPLE (1 << LM3530_EN_PWM_SIMPLE_SHIFT)
/* ALS Config Register Options */
#define LM3530_ALS_AVG_TIME_SHIFT (0)
#define LM3530_EN_ALS_SHIFT (3)
#define LM3530_ALS_SEL_SHIFT (5)
#define LM3530_ENABLE_ALS (3 << LM3530_EN_ALS_SHIFT)
/* Brightness Ramp Rate Register */
#define LM3530_BRT_RAMP_FALL_SHIFT (0)
#define LM3530_BRT_RAMP_RISE_SHIFT (3)
/* ALS Resistor Select */
#define LM3530_ALS1_IMP_SHIFT (0)
#define LM3530_ALS2_IMP_SHIFT (4)
/* Zone Boundary Register defaults */
#define LM3530_ALS_ZB_MAX (4)
#define LM3530_ALS_WINDOW_mV (1000)
#define LM3530_ALS_OFFSET_mV (4)
/* Zone Target Register defaults */
#define LM3530_DEF_ZT_0 (0x7F)
#define LM3530_DEF_ZT_1 (0x66)
#define LM3530_DEF_ZT_2 (0x4C)
#define LM3530_DEF_ZT_3 (0x33)
#define LM3530_DEF_ZT_4 (0x19)
/* 7 bits are used for the brightness : LM3530_BRT_CTRL_REG */
#define MAX_BRIGHTNESS (127)
struct lm3530_mode_map {
const char *mode;
enum lm3530_mode mode_val;
};
static struct lm3530_mode_map mode_map[] = {
{ "man", LM3530_BL_MODE_MANUAL },
{ "als", LM3530_BL_MODE_ALS },
{ "pwm", LM3530_BL_MODE_PWM },
};
/**
* struct lm3530_data
* @led_dev: led class device
* @client: i2c client
* @pdata: LM3530 platform data
* @mode: mode of operation - manual, ALS, PWM
* @regulator: regulator
* @brighness: previous brightness value
* @enable: regulator is enabled
*/
struct lm3530_data {
struct led_classdev led_dev;
struct i2c_client *client;
struct lm3530_platform_data *pdata;
enum lm3530_mode mode;
struct regulator *regulator;
enum led_brightness brightness;
bool enable;
};
/*
* struct lm3530_als_data
* @config : value of ALS configuration register
* @imp_sel : value of ALS resistor select register
* @zone : values of ALS ZB(Zone Boundary) registers
*/
struct lm3530_als_data {
u8 config;
u8 imp_sel;
u8 zones[LM3530_ALS_ZB_MAX];
};
static const u8 lm3530_reg[LM3530_REG_MAX] = {
LM3530_GEN_CONFIG,
LM3530_ALS_CONFIG,
LM3530_BRT_RAMP_RATE,
LM3530_ALS_IMP_SELECT,
LM3530_BRT_CTRL_REG,
LM3530_ALS_ZB0_REG,
LM3530_ALS_ZB1_REG,
LM3530_ALS_ZB2_REG,
LM3530_ALS_ZB3_REG,
LM3530_ALS_Z0T_REG,
LM3530_ALS_Z1T_REG,
LM3530_ALS_Z2T_REG,
LM3530_ALS_Z3T_REG,
LM3530_ALS_Z4T_REG,
};
static int lm3530_get_mode_from_str(const char *str)
{
int i;
for (i = 0; i < ARRAY_SIZE(mode_map); i++)
if (sysfs_streq(str, mode_map[i].mode))
return mode_map[i].mode_val;
return -EINVAL;
}
static void lm3530_als_configure(struct lm3530_platform_data *pdata,
struct lm3530_als_data *als)
{
int i;
u32 als_vmin, als_vmax, als_vstep;
if (pdata->als_vmax == 0) {
pdata->als_vmin = 0;
pdata->als_vmax = LM3530_ALS_WINDOW_mV;
}
als_vmin = pdata->als_vmin;
als_vmax = pdata->als_vmax;
if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
pdata->als_vmax = als_vmax = als_vmin + LM3530_ALS_WINDOW_mV;
/* n zone boundary makes n+1 zones */
als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
als->zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
als_vstep + (i * als_vstep)) * LED_FULL) / 1000;
als->config =
(pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
(LM3530_ENABLE_ALS) |
(pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
als->imp_sel =
(pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
(pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
}
static int lm3530_led_enable(struct lm3530_data *drvdata)
{
int ret;
if (drvdata->enable)
return 0;
ret = regulator_enable(drvdata->regulator);
if (ret) {
dev_err(drvdata->led_dev.dev, "Failed to enable vin:%d\n", ret);
return ret;
}
drvdata->enable = true;
return 0;
}
static void lm3530_led_disable(struct lm3530_data *drvdata)
{
int ret;
if (!drvdata->enable)
return;
ret = regulator_disable(drvdata->regulator);
if (ret) {
dev_err(drvdata->led_dev.dev, "Failed to disable vin:%d\n",
ret);
return;
}
drvdata->enable = false;
}
static int lm3530_init_registers(struct lm3530_data *drvdata)
{
int ret = 0;
int i;
u8 gen_config;
u8 brt_ramp;
u8 brightness;
u8 reg_val[LM3530_REG_MAX];
struct lm3530_platform_data *pdata = drvdata->pdata;
struct i2c_client *client = drvdata->client;
struct lm3530_pwm_data *pwm = &pdata->pwm_data;
struct lm3530_als_data als;
memset(&als, 0, sizeof(struct lm3530_als_data));
gen_config = (pdata->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) |
((pdata->max_current & 7) << LM3530_MAX_CURR_SHIFT);
switch (drvdata->mode) {
case LM3530_BL_MODE_MANUAL:
gen_config |= LM3530_ENABLE_I2C;
break;
case LM3530_BL_MODE_ALS:
gen_config |= LM3530_ENABLE_I2C;
lm3530_als_configure(pdata, &als);
break;
case LM3530_BL_MODE_PWM:
gen_config |= LM3530_ENABLE_PWM | LM3530_ENABLE_PWM_SIMPLE |
(pdata->pwm_pol_hi << LM3530_PWM_POL_SHIFT);
break;
}
brt_ramp = (pdata->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
(pdata->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
if (drvdata->brightness)
brightness = drvdata->brightness;
else
brightness = drvdata->brightness = pdata->brt_val;
if (brightness > drvdata->led_dev.max_brightness)
brightness = drvdata->led_dev.max_brightness;
reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */
reg_val[1] = als.config; /* LM3530_ALS_CONFIG */
reg_val[2] = brt_ramp; /* LM3530_BRT_RAMP_RATE */
reg_val[3] = als.imp_sel; /* LM3530_ALS_IMP_SELECT */
reg_val[4] = brightness; /* LM3530_BRT_CTRL_REG */
reg_val[5] = als.zones[0]; /* LM3530_ALS_ZB0_REG */
reg_val[6] = als.zones[1]; /* LM3530_ALS_ZB1_REG */
reg_val[7] = als.zones[2]; /* LM3530_ALS_ZB2_REG */
reg_val[8] = als.zones[3]; /* LM3530_ALS_ZB3_REG */
reg_val[9] = LM3530_DEF_ZT_0; /* LM3530_ALS_Z0T_REG */
reg_val[10] = LM3530_DEF_ZT_1; /* LM3530_ALS_Z1T_REG */
reg_val[11] = LM3530_DEF_ZT_2; /* LM3530_ALS_Z2T_REG */
reg_val[12] = LM3530_DEF_ZT_3; /* LM3530_ALS_Z3T_REG */
reg_val[13] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */
ret = lm3530_led_enable(drvdata);
if (ret)
return ret;
for (i = 0; i < LM3530_REG_MAX; i++) {
/* do not update brightness register when pwm mode */
if (lm3530_reg[i] == LM3530_BRT_CTRL_REG &&
drvdata->mode == LM3530_BL_MODE_PWM) {
if (pwm->pwm_set_intensity)
pwm->pwm_set_intensity(reg_val[i],
drvdata->led_dev.max_brightness);
continue;
}
ret = i2c_smbus_write_byte_data(client,
lm3530_reg[i], reg_val[i]);
if (ret)
break;
}
return ret;
}
static void lm3530_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brt_val)
{
int err;
struct lm3530_data *drvdata =
container_of(led_cdev, struct lm3530_data, led_dev);
struct lm3530_platform_data *pdata = drvdata->pdata;
struct lm3530_pwm_data *pwm = &pdata->pwm_data;
u8 max_brightness = led_cdev->max_brightness;
switch (drvdata->mode) {
case LM3530_BL_MODE_MANUAL:
if (!drvdata->enable) {
err = lm3530_init_registers(drvdata);
if (err) {
dev_err(&drvdata->client->dev,
"Register Init failed: %d\n", err);
break;
}
}
/* set the brightness in brightness control register*/
err = i2c_smbus_write_byte_data(drvdata->client,
LM3530_BRT_CTRL_REG, brt_val);
if (err)
dev_err(&drvdata->client->dev,
"Unable to set brightness: %d\n", err);
else
drvdata->brightness = brt_val;
if (brt_val == 0)
lm3530_led_disable(drvdata);
break;
case LM3530_BL_MODE_ALS:
break;
case LM3530_BL_MODE_PWM:
if (pwm->pwm_set_intensity)
pwm->pwm_set_intensity(brt_val, max_brightness);
break;
default:
break;
}
}
static ssize_t lm3530_mode_get(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct lm3530_data *drvdata;
int i, len = 0;
drvdata = container_of(led_cdev, struct lm3530_data, led_dev);
for (i = 0; i < ARRAY_SIZE(mode_map); i++)
if (drvdata->mode == mode_map[i].mode_val)
len += sprintf(buf + len, "[%s] ", mode_map[i].mode);
else
len += sprintf(buf + len, "%s ", mode_map[i].mode);
len += sprintf(buf + len, "\n");
return len;
}
static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
*attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct lm3530_data *drvdata;
struct lm3530_pwm_data *pwm;
u8 max_brightness;
int mode, err;
drvdata = container_of(led_cdev, struct lm3530_data, led_dev);
pwm = &drvdata->pdata->pwm_data;
max_brightness = led_cdev->max_brightness;
mode = lm3530_get_mode_from_str(buf);
if (mode < 0) {
dev_err(dev, "Invalid mode\n");
return mode;
}
drvdata->mode = mode;
/* set pwm to low if unnecessary */
if (mode != LM3530_BL_MODE_PWM && pwm->pwm_set_intensity)
pwm->pwm_set_intensity(0, max_brightness);
err = lm3530_init_registers(drvdata);
if (err) {
dev_err(dev, "Setting %s Mode failed :%d\n", buf, err);
return err;
}
return sizeof(drvdata->mode);
}
static DEVICE_ATTR(mode, 0644, lm3530_mode_get, lm3530_mode_set);
static struct attribute *lm3530_attrs[] = {
&dev_attr_mode.attr,
NULL
};
ATTRIBUTE_GROUPS(lm3530);
static int lm3530_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lm3530_platform_data *pdata = dev_get_platdata(&client->dev);
struct lm3530_data *drvdata;
int err = 0;
if (pdata == NULL) {
dev_err(&client->dev, "platform data required\n");
return -ENODEV;
}
/* BL mode */
if (pdata->mode > LM3530_BL_MODE_PWM) {
dev_err(&client->dev, "Illegal Mode request\n");
return -EINVAL;
}
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "I2C_FUNC_I2C not supported\n");
return -EIO;
}
drvdata = devm_kzalloc(&client->dev, sizeof(struct lm3530_data),
GFP_KERNEL);
if (drvdata == NULL)
return -ENOMEM;
drvdata->mode = pdata->mode;
drvdata->client = client;
drvdata->pdata = pdata;
drvdata->brightness = LED_OFF;
drvdata->enable = false;
drvdata->led_dev.name = LM3530_LED_DEV;
drvdata->led_dev.brightness_set = lm3530_brightness_set;
drvdata->led_dev.max_brightness = MAX_BRIGHTNESS;
drvdata->led_dev.groups = lm3530_groups;
i2c_set_clientdata(client, drvdata);
drvdata->regulator = devm_regulator_get(&client->dev, "vin");
if (IS_ERR(drvdata->regulator)) {
dev_err(&client->dev, "regulator get failed\n");
err = PTR_ERR(drvdata->regulator);
drvdata->regulator = NULL;
return err;
}
if (drvdata->pdata->brt_val) {
err = lm3530_init_registers(drvdata);
if (err < 0) {
dev_err(&client->dev,
"Register Init failed: %d\n", err);
return err;
}
}
err = led_classdev_register(&client->dev, &drvdata->led_dev);
if (err < 0) {
dev_err(&client->dev, "Register led class failed: %d\n", err);
return err;
}
return 0;
}
static int lm3530_remove(struct i2c_client *client)
{
struct lm3530_data *drvdata = i2c_get_clientdata(client);
lm3530_led_disable(drvdata);
led_classdev_unregister(&drvdata->led_dev);
return 0;
}
static const struct i2c_device_id lm3530_id[] = {
{LM3530_NAME, 0},
{}
};
MODULE_DEVICE_TABLE(i2c, lm3530_id);
static struct i2c_driver lm3530_i2c_driver = {
.probe = lm3530_probe,
.remove = lm3530_remove,
.id_table = lm3530_id,
.driver = {
.name = LM3530_NAME,
.owner = THIS_MODULE,
},
};
module_i2c_driver(lm3530_i2c_driver);
MODULE_DESCRIPTION("Back Light driver for LM3530");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>");
| gpl-2.0 |
adbaby/android_kernel_msm8974 | fs/ext4/page-io.c | 3580 | 11535 | /*
* linux/fs/ext4/page-io.c
*
* This contains the new page_io functions for ext4
*
* Written by Theodore Ts'o, 2010.
*/
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/mpage.h>
#include <linux/namei.h>
#include <linux/uio.h>
#include <linux/bio.h>
#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
#include "ext4_extents.h"
static struct kmem_cache *io_page_cachep, *io_end_cachep;
int __init ext4_init_pageio(void)
{
io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
if (io_page_cachep == NULL)
return -ENOMEM;
io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
if (io_end_cachep == NULL) {
kmem_cache_destroy(io_page_cachep);
return -ENOMEM;
}
return 0;
}
void ext4_exit_pageio(void)
{
kmem_cache_destroy(io_end_cachep);
kmem_cache_destroy(io_page_cachep);
}
void ext4_ioend_wait(struct inode *inode)
{
wait_queue_head_t *wq = ext4_ioend_wq(inode);
wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
}
static void put_io_page(struct ext4_io_page *io_page)
{
if (atomic_dec_and_test(&io_page->p_count)) {
end_page_writeback(io_page->p_page);
put_page(io_page->p_page);
kmem_cache_free(io_page_cachep, io_page);
}
}
void ext4_free_io_end(ext4_io_end_t *io)
{
int i;
BUG_ON(!io);
if (io->page)
put_page(io->page);
for (i = 0; i < io->num_io_pages; i++)
put_io_page(io->pages[i]);
io->num_io_pages = 0;
if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
wake_up_all(ext4_ioend_wq(io->inode));
kmem_cache_free(io_end_cachep, io);
}
/*
* check a range of space and convert unwritten extents to written.
*
* Called with inode->i_mutex; we depend on this when we manipulate
* io->flag, since we could otherwise race with ext4_flush_completed_IO()
*/
int ext4_end_io_nolock(ext4_io_end_t *io)
{
struct inode *inode = io->inode;
loff_t offset = io->offset;
ssize_t size = io->size;
int ret = 0;
ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
"list->prev 0x%p\n",
io, inode->i_ino, io->list.next, io->list.prev);
ret = ext4_convert_unwritten_extents(inode, offset, size);
if (ret < 0) {
ext4_msg(inode->i_sb, KERN_EMERG,
"failed to convert unwritten extents to written "
"extents -- potential data loss! "
"(inode %lu, offset %llu, size %zd, error %d)",
inode->i_ino, offset, size, ret);
}
if (io->iocb)
aio_complete(io->iocb, io->result, 0);
if (io->flag & EXT4_IO_END_DIRECT)
inode_dio_done(inode);
/* Wake up anyone waiting on unwritten extent conversion */
if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten))
wake_up_all(ext4_ioend_wq(io->inode));
return ret;
}
/*
* work on completed aio dio IO, to convert unwritten extents to extents
*/
static void ext4_end_io_work(struct work_struct *work)
{
ext4_io_end_t *io = container_of(work, ext4_io_end_t, work);
struct inode *inode = io->inode;
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned long flags;
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
if (io->flag & EXT4_IO_END_IN_FSYNC)
goto requeue;
if (list_empty(&io->list)) {
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
goto free;
}
if (!mutex_trylock(&inode->i_mutex)) {
bool was_queued;
requeue:
was_queued = !!(io->flag & EXT4_IO_END_QUEUED);
io->flag |= EXT4_IO_END_QUEUED;
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
/*
* Requeue the work instead of waiting so that the work
* items queued after this can be processed.
*/
queue_work(EXT4_SB(inode->i_sb)->dio_unwritten_wq, &io->work);
/*
* To prevent the ext4-dio-unwritten thread from keeping
* requeueing end_io requests and occupying cpu for too long,
* yield the cpu if it sees an end_io request that has already
* been requeued.
*/
if (was_queued)
yield();
return;
}
list_del_init(&io->list);
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
(void) ext4_end_io_nolock(io);
mutex_unlock(&inode->i_mutex);
free:
ext4_free_io_end(io);
}
ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
{
ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
if (io) {
atomic_inc(&EXT4_I(inode)->i_ioend_count);
io->inode = inode;
INIT_WORK(&io->work, ext4_end_io_work);
INIT_LIST_HEAD(&io->list);
}
return io;
}
/*
* Print an buffer I/O error compatible with the fs/buffer.c. This
* provides compatibility with dmesg scrapers that look for a specific
* buffer I/O error message. We really need a unified error reporting
* structure to userspace ala Digital Unix's uerf system, but it's
* probably not going to happen in my lifetime, due to LKML politics...
*/
static void buffer_io_error(struct buffer_head *bh)
{
char b[BDEVNAME_SIZE];
printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
bdevname(bh->b_bdev, b),
(unsigned long long)bh->b_blocknr);
}
static void ext4_end_bio(struct bio *bio, int error)
{
ext4_io_end_t *io_end = bio->bi_private;
struct workqueue_struct *wq;
struct inode *inode;
unsigned long flags;
int i;
sector_t bi_sector = bio->bi_sector;
BUG_ON(!io_end);
bio->bi_private = NULL;
bio->bi_end_io = NULL;
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
error = 0;
bio_put(bio);
for (i = 0; i < io_end->num_io_pages; i++) {
struct page *page = io_end->pages[i]->p_page;
struct buffer_head *bh, *head;
loff_t offset;
loff_t io_end_offset;
if (error) {
SetPageError(page);
set_bit(AS_EIO, &page->mapping->flags);
head = page_buffers(page);
BUG_ON(!head);
io_end_offset = io_end->offset + io_end->size;
offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
bh = head;
do {
if ((offset >= io_end->offset) &&
(offset+bh->b_size <= io_end_offset))
buffer_io_error(bh);
offset += bh->b_size;
bh = bh->b_this_page;
} while (bh != head);
}
put_io_page(io_end->pages[i]);
}
io_end->num_io_pages = 0;
inode = io_end->inode;
if (error) {
io_end->flag |= EXT4_IO_END_ERROR;
ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
"(offset %llu size %ld starting block %llu)",
inode->i_ino,
(unsigned long long) io_end->offset,
(long) io_end->size,
(unsigned long long)
bi_sector >> (inode->i_blkbits - 9));
}
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
ext4_free_io_end(io_end);
return;
}
/* Add the io_end to per-inode completed io list*/
spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
/* queue the work to convert unwritten extents to written */
queue_work(wq, &io_end->work);
}
void ext4_io_submit(struct ext4_io_submit *io)
{
struct bio *bio = io->io_bio;
if (bio) {
bio_get(io->io_bio);
submit_bio(io->io_op, io->io_bio);
BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
bio_put(io->io_bio);
}
io->io_bio = NULL;
io->io_op = 0;
io->io_end = NULL;
}
static int io_submit_init(struct ext4_io_submit *io,
struct inode *inode,
struct writeback_control *wbc,
struct buffer_head *bh)
{
ext4_io_end_t *io_end;
struct page *page = bh->b_page;
int nvecs = bio_get_nr_vecs(bh->b_bdev);
struct bio *bio;
io_end = ext4_init_io_end(inode, GFP_NOFS);
if (!io_end)
return -ENOMEM;
bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
bio->bi_private = io->io_end = io_end;
bio->bi_end_io = ext4_end_bio;
io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
io->io_bio = bio;
io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
io->io_next_block = bh->b_blocknr;
return 0;
}
static int io_submit_add_bh(struct ext4_io_submit *io,
struct ext4_io_page *io_page,
struct inode *inode,
struct writeback_control *wbc,
struct buffer_head *bh)
{
ext4_io_end_t *io_end;
int ret;
if (buffer_new(bh)) {
clear_buffer_new(bh);
unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
}
if (!buffer_mapped(bh) || buffer_delay(bh)) {
if (!buffer_mapped(bh))
clear_buffer_dirty(bh);
if (io->io_bio)
ext4_io_submit(io);
return 0;
}
if (io->io_bio && bh->b_blocknr != io->io_next_block) {
submit_and_retry:
ext4_io_submit(io);
}
if (io->io_bio == NULL) {
ret = io_submit_init(io, inode, wbc, bh);
if (ret)
return ret;
}
io_end = io->io_end;
if ((io_end->num_io_pages >= MAX_IO_PAGES) &&
(io_end->pages[io_end->num_io_pages-1] != io_page))
goto submit_and_retry;
if (buffer_uninit(bh))
ext4_set_io_unwritten_flag(inode, io_end);
io->io_end->size += bh->b_size;
io->io_next_block++;
ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
if (ret != bh->b_size)
goto submit_and_retry;
if ((io_end->num_io_pages == 0) ||
(io_end->pages[io_end->num_io_pages-1] != io_page)) {
io_end->pages[io_end->num_io_pages++] = io_page;
atomic_inc(&io_page->p_count);
}
return 0;
}
int ext4_bio_write_page(struct ext4_io_submit *io,
struct page *page,
int len,
struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
unsigned block_start, block_end, blocksize;
struct ext4_io_page *io_page;
struct buffer_head *bh, *head;
int ret = 0;
blocksize = 1 << inode->i_blkbits;
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
if (!io_page) {
set_page_dirty(page);
unlock_page(page);
return -ENOMEM;
}
io_page->p_page = page;
atomic_set(&io_page->p_count, 1);
get_page(page);
set_page_writeback(page);
ClearPageError(page);
for (bh = head = page_buffers(page), block_start = 0;
bh != head || !block_start;
block_start = block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_start >= len) {
/*
* Comments copied from block_write_full_page_endio:
*
* The page straddles i_size. It must be zeroed out on
* each and every writepage invocation because it may
* be mmapped. "A file is mapped in multiples of the
* page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when
* mapped, and writes to that region are not written
* out to the file."
*/
zero_user_segment(page, block_start, block_end);
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
continue;
}
clear_buffer_dirty(bh);
ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
if (ret) {
/*
* We only get here on ENOMEM. Not much else
* we can do but mark the page as dirty, and
* better luck next time.
*/
set_page_dirty(page);
break;
}
}
unlock_page(page);
/*
* If the page was truncated before we could do the writeback,
* or we had a memory allocation error while trying to write
* the first buffer head, we won't have submitted any pages for
* I/O. In that case we need to make sure we've cleared the
* PageWriteback bit from the page to prevent the system from
* wedging later on.
*/
put_io_page(io_page);
return ret;
}
| gpl-2.0 |
lplachno/mx6-dev | arch/mips/pci/fixup-ip32.c | 4348 | 1484 | #include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <asm/ip32/ip32_ints.h>
/*
* O2 has up to 5 PCI devices connected into the MACE bridge. The device
* map looks like this:
*
* 0 aic7xxx 0
* 1 aic7xxx 1
* 2 expansion slot
* 3 N/C
* 4 N/C
*/
#define SCSI0 MACEPCI_SCSI0_IRQ
#define SCSI1 MACEPCI_SCSI1_IRQ
#define INTA0 MACEPCI_SLOT0_IRQ
#define INTA1 MACEPCI_SLOT1_IRQ
#define INTA2 MACEPCI_SLOT2_IRQ
#define INTB MACEPCI_SHARED0_IRQ
#define INTC MACEPCI_SHARED1_IRQ
#define INTD MACEPCI_SHARED2_IRQ
static char irq_tab_mace[][5] __initdata = {
/* Dummy INT#A INT#B INT#C INT#D */
{0, 0, 0, 0, 0}, /* This is placeholder row - never used */
{0, SCSI0, SCSI0, SCSI0, SCSI0},
{0, SCSI1, SCSI1, SCSI1, SCSI1},
{0, INTA0, INTB, INTC, INTD},
{0, INTA1, INTC, INTD, INTB},
{0, INTA2, INTD, INTB, INTC},
};
/*
* Given a PCI slot number (a la PCI_SLOT(...)) and the interrupt pin of
* the device (1-4 => A-D), tell what irq to use. Note that we don't
* in theory have slots 4 and 5, and we never normally use the shared
* irqs. I suppose a device without a pin A will thank us for doing it
* right if there exists such a broken piece of crap.
*/
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return irq_tab_mace[slot][pin];
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
| gpl-2.0 |
AICP/kernel_htc_m7 | net/rxrpc/ar-recvmsg.c | 4604 | 11038 | /* RxRPC recvmsg() implementation
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/export.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
/*
* removal a call's user ID from the socket tree to make the user ID available
* again and so that it won't be seen again in association with that call
*/
void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call)
{
_debug("RELEASE CALL %d", call->debug_id);
if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
write_lock_bh(&rx->call_lock);
rb_erase(&call->sock_node, &call->socket->calls);
clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
write_unlock_bh(&rx->call_lock);
}
read_lock_bh(&call->state_lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
}
/*
* receive a message from an RxRPC socket
* - we need to be careful about two or more threads calling recvmsg
* simultaneously
*/
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct rxrpc_skb_priv *sp;
struct rxrpc_call *call = NULL, *continue_call = NULL;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
struct sk_buff *skb;
long timeo;
int copy, ret, ullen, offset, copied = 0;
u32 abort_code;
DEFINE_WAIT(wait);
_enter(",,,%zu,%d", len, flags);
if (flags & (MSG_OOB | MSG_TRUNC))
return -EOPNOTSUPP;
ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);
timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
msg->msg_flags |= MSG_MORE;
lock_sock(&rx->sk);
for (;;) {
/* return immediately if a client socket has no outstanding
* calls */
if (RB_EMPTY_ROOT(&rx->calls)) {
if (copied)
goto out;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
release_sock(&rx->sk);
if (continue_call)
rxrpc_put_call(continue_call);
return -ENODATA;
}
}
/* get the next message on the Rx queue */
skb = skb_peek(&rx->sk.sk_receive_queue);
if (!skb) {
/* nothing remains on the queue */
if (copied &&
(msg->msg_flags & MSG_PEEK || timeo == 0))
goto out;
/* wait for a message to turn up */
release_sock(&rx->sk);
prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
TASK_INTERRUPTIBLE);
ret = sock_error(&rx->sk);
if (ret)
goto wait_error;
if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
if (signal_pending(current))
goto wait_interrupted;
timeo = schedule_timeout(timeo);
}
finish_wait(sk_sleep(&rx->sk), &wait);
lock_sock(&rx->sk);
continue;
}
peek_next_packet:
sp = rxrpc_skb(skb);
call = sp->call;
ASSERT(call != NULL);
_debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);
/* make sure we wait for the state to be updated in this call */
spin_lock_bh(&call->lock);
spin_unlock_bh(&call->lock);
if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
_debug("packet from released call");
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
continue;
}
/* determine whether to continue last data receive */
if (continue_call) {
_debug("maybe cont");
if (call != continue_call ||
skb->mark != RXRPC_SKB_MARK_DATA) {
release_sock(&rx->sk);
rxrpc_put_call(continue_call);
_leave(" = %d [noncont]", copied);
return copied;
}
}
rxrpc_get_call(call);
/* copy the peer address and timestamp */
if (!continue_call) {
if (msg->msg_name && msg->msg_namelen > 0)
memcpy(msg->msg_name,
&call->conn->trans->peer->srx,
sizeof(call->conn->trans->peer->srx));
sock_recv_ts_and_drops(msg, &rx->sk, skb);
}
/* receive the message */
if (skb->mark != RXRPC_SKB_MARK_DATA)
goto receive_non_data_message;
_debug("recvmsg DATA #%u { %d, %d }",
ntohl(sp->hdr.seq), skb->len, sp->offset);
if (!continue_call) {
/* only set the control data once per recvmsg() */
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
ullen, &call->user_call_ID);
if (ret < 0)
goto copy_error;
ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
}
ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
call->rx_data_recv = ntohl(sp->hdr.seq);
ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);
offset = sp->offset;
copy = skb->len - offset;
if (copy > len - copied)
copy = len - copied;
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
ret = skb_copy_datagram_iovec(skb, offset,
msg->msg_iov, copy);
} else {
ret = skb_copy_and_csum_datagram_iovec(skb, offset,
msg->msg_iov);
if (ret == -EINVAL)
goto csum_copy_error;
}
if (ret < 0)
goto copy_error;
/* handle piecemeal consumption of data packets */
_debug("copied %d+%d", copy, copied);
offset += copy;
copied += copy;
if (!(flags & MSG_PEEK))
sp->offset = offset;
if (sp->offset < skb->len) {
_debug("buffer full");
ASSERTCMP(copied, ==, len);
break;
}
/* we transferred the whole data packet */
if (sp->hdr.flags & RXRPC_LAST_PACKET) {
_debug("last");
if (call->conn->out_clientflag) {
/* last byte of reply received */
ret = copied;
goto terminal_message;
}
/* last bit of request received */
if (!(flags & MSG_PEEK)) {
_debug("eat packet");
if (skb_dequeue(&rx->sk.sk_receive_queue) !=
skb)
BUG();
rxrpc_free_skb(skb);
}
msg->msg_flags &= ~MSG_MORE;
break;
}
/* move on to the next data message */
_debug("next");
if (!continue_call)
continue_call = sp->call;
else
rxrpc_put_call(call);
call = NULL;
if (flags & MSG_PEEK) {
_debug("peek next");
skb = skb->next;
if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
break;
goto peek_next_packet;
}
_debug("eat packet");
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
}
/* end of non-terminal data packet reception for the moment */
_debug("end rcv data");
out:
release_sock(&rx->sk);
if (call)
rxrpc_put_call(call);
if (continue_call)
rxrpc_put_call(continue_call);
_leave(" = %d [data]", copied);
return copied;
/* handle non-DATA messages such as aborts, incoming connections and
* final ACKs */
receive_non_data_message:
_debug("non-data");
if (skb->mark == RXRPC_SKB_MARK_NEW_CALL) {
_debug("RECV NEW CALL");
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &abort_code);
if (ret < 0)
goto copy_error;
if (!(flags & MSG_PEEK)) {
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
}
goto out;
}
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
ullen, &call->user_call_ID);
if (ret < 0)
goto copy_error;
ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
switch (skb->mark) {
case RXRPC_SKB_MARK_DATA:
BUG();
case RXRPC_SKB_MARK_FINAL_ACK:
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &abort_code);
break;
case RXRPC_SKB_MARK_BUSY:
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code);
break;
case RXRPC_SKB_MARK_REMOTE_ABORT:
abort_code = call->abort_code;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
break;
case RXRPC_SKB_MARK_NET_ERROR:
_debug("RECV NET ERROR %d", sp->error);
abort_code = sp->error;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &abort_code);
break;
case RXRPC_SKB_MARK_LOCAL_ERROR:
_debug("RECV LOCAL ERROR %d", sp->error);
abort_code = sp->error;
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4,
&abort_code);
break;
default:
BUG();
break;
}
if (ret < 0)
goto copy_error;
terminal_message:
_debug("terminal");
msg->msg_flags &= ~MSG_MORE;
msg->msg_flags |= MSG_EOR;
if (!(flags & MSG_PEEK)) {
_net("free terminal skb %p", skb);
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
BUG();
rxrpc_free_skb(skb);
rxrpc_remove_user_ID(rx, call);
}
release_sock(&rx->sk);
rxrpc_put_call(call);
if (continue_call)
rxrpc_put_call(continue_call);
_leave(" = %d", ret);
return ret;
copy_error:
_debug("copy error");
release_sock(&rx->sk);
rxrpc_put_call(call);
if (continue_call)
rxrpc_put_call(continue_call);
_leave(" = %d", ret);
return ret;
csum_copy_error:
_debug("csum error");
release_sock(&rx->sk);
if (continue_call)
rxrpc_put_call(continue_call);
rxrpc_kill_skb(skb);
skb_kill_datagram(&rx->sk, skb, flags);
rxrpc_put_call(call);
return -EAGAIN;
wait_interrupted:
ret = sock_intr_errno(timeo);
wait_error:
finish_wait(sk_sleep(&rx->sk), &wait);
if (continue_call)
rxrpc_put_call(continue_call);
if (copied)
copied = ret;
_leave(" = %d [waitfail %d]", copied, ret);
return copied;
}
/**
* rxrpc_kernel_data_delivered - Record delivery of data message
* @skb: Message holding data
*
* Record the delivery of a data message. This permits RxRPC to keep its
* tracking correct. The socket buffer will be deleted.
*/
void rxrpc_kernel_data_delivered(struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_call *call = sp->call;
ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
call->rx_data_recv = ntohl(sp->hdr.seq);
ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);
rxrpc_free_skb(skb);
}
EXPORT_SYMBOL(rxrpc_kernel_data_delivered);
/**
* rxrpc_kernel_is_data_last - Determine if data message is last one
* @skb: Message holding data
*
* Determine if data message is last one for the parent call.
*/
bool rxrpc_kernel_is_data_last(struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_DATA);
return sp->hdr.flags & RXRPC_LAST_PACKET;
}
EXPORT_SYMBOL(rxrpc_kernel_is_data_last);
/**
* rxrpc_kernel_get_abort_code - Get the abort code from an RxRPC abort message
* @skb: Message indicating an abort
*
* Get the abort code from an RxRPC abort message.
*/
u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_REMOTE_ABORT);
return sp->call->abort_code;
}
EXPORT_SYMBOL(rxrpc_kernel_get_abort_code);
/**
* rxrpc_kernel_get_error - Get the error number from an RxRPC error message
* @skb: Message indicating an error
*
* Get the error number from an RxRPC error message.
*/
int rxrpc_kernel_get_error_number(struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
return sp->error;
}
EXPORT_SYMBOL(rxrpc_kernel_get_error_number);
| gpl-2.0 |
adrynalyne/clean_kernel_htc-mecha | fs/reiserfs/lbalance.c | 4860 | 42396 | /*
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
#include <asm/uaccess.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/reiserfs_fs.h>
#include <linux/buffer_head.h>
/* these are used in do_balance.c */
/* leaf_move_items
leaf_shift_left
leaf_shift_right
leaf_delete_items
leaf_insert_into_buf
leaf_paste_in_buffer
leaf_cut_from_buffer
leaf_paste_entries
*/
/* copy copy_count entries from source directory item to dest buffer (creating new item if needed) */
static void leaf_copy_dir_entries(struct buffer_info *dest_bi,
struct buffer_head *source, int last_first,
int item_num, int from, int copy_count)
{
struct buffer_head *dest = dest_bi->bi_bh;
int item_num_in_dest; /* either the number of target item,
or if we must create a new item,
the number of the item we will
create it next to */
struct item_head *ih;
struct reiserfs_de_head *deh;
int copy_records_len; /* length of all records in item to be copied */
char *records;
ih = B_N_PITEM_HEAD(source, item_num);
RFALSE(!is_direntry_le_ih(ih), "vs-10000: item must be directory item");
/* length of all record to be copied and first byte of the last of them */
deh = B_I_DEH(source, ih);
if (copy_count) {
copy_records_len = (from ? deh_location(&(deh[from - 1])) :
ih_item_len(ih)) -
deh_location(&(deh[from + copy_count - 1]));
records =
source->b_data + ih_location(ih) +
deh_location(&(deh[from + copy_count - 1]));
} else {
copy_records_len = 0;
records = NULL;
}
/* when copy last to first, dest buffer can contain 0 items */
item_num_in_dest =
(last_first ==
LAST_TO_FIRST) ? ((B_NR_ITEMS(dest)) ? 0 : -1) : (B_NR_ITEMS(dest)
- 1);
/* if there are no items in dest or the first/last item in dest is not item of the same directory */
if ((item_num_in_dest == -1) ||
(last_first == FIRST_TO_LAST && le_ih_k_offset(ih) == DOT_OFFSET) ||
(last_first == LAST_TO_FIRST
&& comp_short_le_keys /*COMP_SHORT_KEYS */ (&ih->ih_key,
B_N_PKEY(dest,
item_num_in_dest))))
{
/* create new item in dest */
struct item_head new_ih;
/* form item header */
memcpy(&new_ih.ih_key, &ih->ih_key, KEY_SIZE);
put_ih_version(&new_ih, KEY_FORMAT_3_5);
/* calculate item len */
put_ih_item_len(&new_ih,
DEH_SIZE * copy_count + copy_records_len);
put_ih_entry_count(&new_ih, 0);
if (last_first == LAST_TO_FIRST) {
/* form key by the following way */
if (from < I_ENTRY_COUNT(ih)) {
set_le_ih_k_offset(&new_ih,
deh_offset(&(deh[from])));
/*memcpy (&new_ih.ih_key.k_offset, &deh[from].deh_offset, SHORT_KEY_SIZE); */
} else {
/* no entries will be copied to this item in this function */
set_le_ih_k_offset(&new_ih, U32_MAX);
/* this item is not yet valid, but we want I_IS_DIRECTORY_ITEM to return 1 for it, so we -1 */
}
set_le_key_k_type(KEY_FORMAT_3_5, &(new_ih.ih_key),
TYPE_DIRENTRY);
}
/* insert item into dest buffer */
leaf_insert_into_buf(dest_bi,
(last_first ==
LAST_TO_FIRST) ? 0 : B_NR_ITEMS(dest),
&new_ih, NULL, 0);
} else {
/* prepare space for entries */
leaf_paste_in_buffer(dest_bi,
(last_first ==
FIRST_TO_LAST) ? (B_NR_ITEMS(dest) -
1) : 0, MAX_US_INT,
DEH_SIZE * copy_count + copy_records_len,
records, 0);
}
item_num_in_dest =
(last_first == FIRST_TO_LAST) ? (B_NR_ITEMS(dest) - 1) : 0;
leaf_paste_entries(dest_bi, item_num_in_dest,
(last_first ==
FIRST_TO_LAST) ? I_ENTRY_COUNT(B_N_PITEM_HEAD(dest,
item_num_in_dest))
: 0, copy_count, deh + from, records,
DEH_SIZE * copy_count + copy_records_len);
}
/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or
part of it or nothing (see the return 0 below) from SOURCE to the end
(if last_first) or beginning (!last_first) of the DEST */
/* returns 1 if anything was copied, else 0 */
static int leaf_copy_boundary_item(struct buffer_info *dest_bi,
struct buffer_head *src, int last_first,
int bytes_or_entries)
{
struct buffer_head *dest = dest_bi->bi_bh;
int dest_nr_item, src_nr_item; /* number of items in the source and destination buffers */
struct item_head *ih;
struct item_head *dih;
dest_nr_item = B_NR_ITEMS(dest);
if (last_first == FIRST_TO_LAST) {
/* if ( DEST is empty or first item of SOURCE and last item of DEST are the items of different objects
or of different types ) then there is no need to treat this item differently from the other items
that we copy, so we return */
ih = B_N_PITEM_HEAD(src, 0);
dih = B_N_PITEM_HEAD(dest, dest_nr_item - 1);
if (!dest_nr_item
|| (!op_is_left_mergeable(&(ih->ih_key), src->b_size)))
/* there is nothing to merge */
return 0;
RFALSE(!ih_item_len(ih),
"vs-10010: item can not have empty length");
if (is_direntry_le_ih(ih)) {
if (bytes_or_entries == -1)
/* copy all entries to dest */
bytes_or_entries = ih_entry_count(ih);
leaf_copy_dir_entries(dest_bi, src, FIRST_TO_LAST, 0, 0,
bytes_or_entries);
return 1;
}
/* copy part of the body of the first item of SOURCE to the end of the body of the last item of the DEST
part defined by 'bytes_or_entries'; if bytes_or_entries == -1 copy whole body; don't create new item header
*/
if (bytes_or_entries == -1)
bytes_or_entries = ih_item_len(ih);
#ifdef CONFIG_REISERFS_CHECK
else {
if (bytes_or_entries == ih_item_len(ih)
&& is_indirect_le_ih(ih))
if (get_ih_free_space(ih))
reiserfs_panic(sb_from_bi(dest_bi),
"vs-10020",
"last unformatted node "
"must be filled "
"entirely (%h)", ih);
}
#endif
/* merge first item (or its part) of src buffer with the last
item of dest buffer. Both are of the same file */
leaf_paste_in_buffer(dest_bi,
dest_nr_item - 1, ih_item_len(dih),
bytes_or_entries, B_I_PITEM(src, ih), 0);
if (is_indirect_le_ih(dih)) {
RFALSE(get_ih_free_space(dih),
"vs-10030: merge to left: last unformatted node of non-last indirect item %h must have zerto free space",
ih);
if (bytes_or_entries == ih_item_len(ih))
set_ih_free_space(dih, get_ih_free_space(ih));
}
return 1;
}
/* copy boundary item to right (last_first == LAST_TO_FIRST) */
/* ( DEST is empty or last item of SOURCE and first item of DEST
are the items of different object or of different types )
*/
src_nr_item = B_NR_ITEMS(src);
ih = B_N_PITEM_HEAD(src, src_nr_item - 1);
dih = B_N_PITEM_HEAD(dest, 0);
if (!dest_nr_item || !op_is_left_mergeable(&(dih->ih_key), src->b_size))
return 0;
if (is_direntry_le_ih(ih)) {
if (bytes_or_entries == -1)
/* bytes_or_entries = entries number in last item body of SOURCE */
bytes_or_entries = ih_entry_count(ih);
leaf_copy_dir_entries(dest_bi, src, LAST_TO_FIRST,
src_nr_item - 1,
ih_entry_count(ih) - bytes_or_entries,
bytes_or_entries);
return 1;
}
/* copy part of the body of the last item of SOURCE to the begin of the body of the first item of the DEST;
part defined by 'bytes_or_entries'; if byte_or_entriess == -1 copy whole body; change first item key of the DEST;
don't create new item header
*/
RFALSE(is_indirect_le_ih(ih) && get_ih_free_space(ih),
"vs-10040: merge to right: last unformatted node of non-last indirect item must be filled entirely (%h)",
ih);
if (bytes_or_entries == -1) {
/* bytes_or_entries = length of last item body of SOURCE */
bytes_or_entries = ih_item_len(ih);
RFALSE(le_ih_k_offset(dih) !=
le_ih_k_offset(ih) + op_bytes_number(ih, src->b_size),
"vs-10050: items %h and %h do not match", ih, dih);
/* change first item key of the DEST */
set_le_ih_k_offset(dih, le_ih_k_offset(ih));
/* item becomes non-mergeable */
/* or mergeable if left item was */
set_le_ih_k_type(dih, le_ih_k_type(ih));
} else {
/* merge to right only part of item */
RFALSE(ih_item_len(ih) <= bytes_or_entries,
"vs-10060: no so much bytes %lu (needed %lu)",
(unsigned long)ih_item_len(ih),
(unsigned long)bytes_or_entries);
/* change first item key of the DEST */
if (is_direct_le_ih(dih)) {
RFALSE(le_ih_k_offset(dih) <=
(unsigned long)bytes_or_entries,
"vs-10070: dih %h, bytes_or_entries(%d)", dih,
bytes_or_entries);
set_le_ih_k_offset(dih,
le_ih_k_offset(dih) -
bytes_or_entries);
} else {
RFALSE(le_ih_k_offset(dih) <=
(bytes_or_entries / UNFM_P_SIZE) * dest->b_size,
"vs-10080: dih %h, bytes_or_entries(%d)",
dih,
(bytes_or_entries / UNFM_P_SIZE) * dest->b_size);
set_le_ih_k_offset(dih,
le_ih_k_offset(dih) -
((bytes_or_entries / UNFM_P_SIZE) *
dest->b_size));
}
}
leaf_paste_in_buffer(dest_bi, 0, 0, bytes_or_entries,
B_I_PITEM(src,
ih) + ih_item_len(ih) - bytes_or_entries,
0);
return 1;
}
/* copy cpy_mun items from buffer src to buffer dest
* last_first == FIRST_TO_LAST means, that we copy cpy_num items beginning from first-th item in src to tail of dest
* last_first == LAST_TO_FIRST means, that we copy cpy_num items beginning from first-th item in src to head of dest
*/
static void leaf_copy_items_entirely(struct buffer_info *dest_bi,
struct buffer_head *src, int last_first,
int first, int cpy_num)
{
struct buffer_head *dest;
int nr, free_space;
int dest_before;
int last_loc, last_inserted_loc, location;
int i, j;
struct block_head *blkh;
struct item_head *ih;
RFALSE(last_first != LAST_TO_FIRST && last_first != FIRST_TO_LAST,
"vs-10090: bad last_first parameter %d", last_first);
RFALSE(B_NR_ITEMS(src) - first < cpy_num,
"vs-10100: too few items in source %d, required %d from %d",
B_NR_ITEMS(src), cpy_num, first);
RFALSE(cpy_num < 0, "vs-10110: can not copy negative amount of items");
RFALSE(!dest_bi, "vs-10120: can not copy negative amount of items");
dest = dest_bi->bi_bh;
RFALSE(!dest, "vs-10130: can not copy negative amount of items");
if (cpy_num == 0)
return;
blkh = B_BLK_HEAD(dest);
nr = blkh_nr_item(blkh);
free_space = blkh_free_space(blkh);
/* we will insert items before 0-th or nr-th item in dest buffer. It depends of last_first parameter */
dest_before = (last_first == LAST_TO_FIRST) ? 0 : nr;
/* location of head of first new item */
ih = B_N_PITEM_HEAD(dest, dest_before);
RFALSE(blkh_free_space(blkh) < cpy_num * IH_SIZE,
"vs-10140: not enough free space for headers %d (needed %d)",
B_FREE_SPACE(dest), cpy_num * IH_SIZE);
/* prepare space for headers */
memmove(ih + cpy_num, ih, (nr - dest_before) * IH_SIZE);
/* copy item headers */
memcpy(ih, B_N_PITEM_HEAD(src, first), cpy_num * IH_SIZE);
free_space -= (IH_SIZE * cpy_num);
set_blkh_free_space(blkh, free_space);
/* location of unmovable item */
j = location = (dest_before == 0) ? dest->b_size : ih_location(ih - 1);
for (i = dest_before; i < nr + cpy_num; i++) {
location -= ih_item_len(ih + i - dest_before);
put_ih_location(ih + i - dest_before, location);
}
/* prepare space for items */
last_loc = ih_location(&(ih[nr + cpy_num - 1 - dest_before]));
last_inserted_loc = ih_location(&(ih[cpy_num - 1]));
/* check free space */
RFALSE(free_space < j - last_inserted_loc,
"vs-10150: not enough free space for items %d (needed %d)",
free_space, j - last_inserted_loc);
memmove(dest->b_data + last_loc,
dest->b_data + last_loc + j - last_inserted_loc,
last_inserted_loc - last_loc);
/* copy items */
memcpy(dest->b_data + last_inserted_loc,
B_N_PITEM(src, (first + cpy_num - 1)), j - last_inserted_loc);
/* sizes, item number */
set_blkh_nr_item(blkh, nr + cpy_num);
set_blkh_free_space(blkh, free_space - (j - last_inserted_loc));
do_balance_mark_leaf_dirty(dest_bi->tb, dest, 0);
if (dest_bi->bi_parent) {
struct disk_child *t_dc;
t_dc = B_N_CHILD(dest_bi->bi_parent, dest_bi->bi_position);
RFALSE(dc_block_number(t_dc) != dest->b_blocknr,
"vs-10160: block number in bh does not match to field in disk_child structure %lu and %lu",
(long unsigned)dest->b_blocknr,
(long unsigned)dc_block_number(t_dc));
put_dc_size(t_dc,
dc_size(t_dc) + (j - last_inserted_loc +
IH_SIZE * cpy_num));
do_balance_mark_internal_dirty(dest_bi->tb, dest_bi->bi_parent,
0);
}
}
/* This function splits the (liquid) item into two items (useful when
shifting part of an item into another node.) */
static void leaf_item_bottle(struct buffer_info *dest_bi,
struct buffer_head *src, int last_first,
int item_num, int cpy_bytes)
{
struct buffer_head *dest = dest_bi->bi_bh;
struct item_head *ih;
RFALSE(cpy_bytes == -1,
"vs-10170: bytes == - 1 means: do not split item");
if (last_first == FIRST_TO_LAST) {
/* if ( if item in position item_num in buffer SOURCE is directory item ) */
ih = B_N_PITEM_HEAD(src, item_num);
if (is_direntry_le_ih(ih))
leaf_copy_dir_entries(dest_bi, src, FIRST_TO_LAST,
item_num, 0, cpy_bytes);
else {
struct item_head n_ih;
/* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST
part defined by 'cpy_bytes'; create new item header; change old item_header (????);
n_ih = new item_header;
*/
memcpy(&n_ih, ih, IH_SIZE);
put_ih_item_len(&n_ih, cpy_bytes);
if (is_indirect_le_ih(ih)) {
RFALSE(cpy_bytes == ih_item_len(ih)
&& get_ih_free_space(ih),
"vs-10180: when whole indirect item is bottle to left neighbor, it must have free_space==0 (not %lu)",
(long unsigned)get_ih_free_space(ih));
set_ih_free_space(&n_ih, 0);
}
RFALSE(op_is_left_mergeable(&(ih->ih_key), src->b_size),
"vs-10190: bad mergeability of item %h", ih);
n_ih.ih_version = ih->ih_version; /* JDM Endian safe, both le */
leaf_insert_into_buf(dest_bi, B_NR_ITEMS(dest), &n_ih,
B_N_PITEM(src, item_num), 0);
}
} else {
/* if ( if item in position item_num in buffer SOURCE is directory item ) */
ih = B_N_PITEM_HEAD(src, item_num);
if (is_direntry_le_ih(ih))
leaf_copy_dir_entries(dest_bi, src, LAST_TO_FIRST,
item_num,
I_ENTRY_COUNT(ih) - cpy_bytes,
cpy_bytes);
else {
struct item_head n_ih;
/* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST
part defined by 'cpy_bytes'; create new item header;
n_ih = new item_header;
*/
memcpy(&n_ih, ih, SHORT_KEY_SIZE);
n_ih.ih_version = ih->ih_version; /* JDM Endian safe, both le */
if (is_direct_le_ih(ih)) {
set_le_ih_k_offset(&n_ih,
le_ih_k_offset(ih) +
ih_item_len(ih) - cpy_bytes);
set_le_ih_k_type(&n_ih, TYPE_DIRECT);
set_ih_free_space(&n_ih, MAX_US_INT);
} else {
/* indirect item */
RFALSE(!cpy_bytes && get_ih_free_space(ih),
"vs-10200: ih->ih_free_space must be 0 when indirect item will be appended");
set_le_ih_k_offset(&n_ih,
le_ih_k_offset(ih) +
(ih_item_len(ih) -
cpy_bytes) / UNFM_P_SIZE *
dest->b_size);
set_le_ih_k_type(&n_ih, TYPE_INDIRECT);
set_ih_free_space(&n_ih, get_ih_free_space(ih));
}
/* set item length */
put_ih_item_len(&n_ih, cpy_bytes);
n_ih.ih_version = ih->ih_version; /* JDM Endian safe, both le */
leaf_insert_into_buf(dest_bi, 0, &n_ih,
B_N_PITEM(src,
item_num) +
ih_item_len(ih) - cpy_bytes, 0);
}
}
}
/* If cpy_bytes equals minus one than copy cpy_num whole items from SOURCE to DEST.
If cpy_bytes not equal to minus one than copy cpy_num-1 whole items from SOURCE to DEST.
From last item copy cpy_num bytes for regular item and cpy_num directory entries for
directory item. */
static int leaf_copy_items(struct buffer_info *dest_bi, struct buffer_head *src,
int last_first, int cpy_num, int cpy_bytes)
{
struct buffer_head *dest;
int pos, i, src_nr_item, bytes;
dest = dest_bi->bi_bh;
RFALSE(!dest || !src, "vs-10210: !dest || !src");
RFALSE(last_first != FIRST_TO_LAST && last_first != LAST_TO_FIRST,
"vs-10220:last_first != FIRST_TO_LAST && last_first != LAST_TO_FIRST");
RFALSE(B_NR_ITEMS(src) < cpy_num,
"vs-10230: No enough items: %d, req. %d", B_NR_ITEMS(src),
cpy_num);
RFALSE(cpy_num < 0, "vs-10240: cpy_num < 0 (%d)", cpy_num);
if (cpy_num == 0)
return 0;
if (last_first == FIRST_TO_LAST) {
/* copy items to left */
pos = 0;
if (cpy_num == 1)
bytes = cpy_bytes;
else
bytes = -1;
/* copy the first item or it part or nothing to the end of the DEST (i = leaf_copy_boundary_item(DEST,SOURCE,0,bytes)) */
i = leaf_copy_boundary_item(dest_bi, src, FIRST_TO_LAST, bytes);
cpy_num -= i;
if (cpy_num == 0)
return i;
pos += i;
if (cpy_bytes == -1)
/* copy first cpy_num items starting from position 'pos' of SOURCE to end of DEST */
leaf_copy_items_entirely(dest_bi, src, FIRST_TO_LAST,
pos, cpy_num);
else {
/* copy first cpy_num-1 items starting from position 'pos-1' of the SOURCE to the end of the DEST */
leaf_copy_items_entirely(dest_bi, src, FIRST_TO_LAST,
pos, cpy_num - 1);
/* copy part of the item which number is cpy_num+pos-1 to the end of the DEST */
leaf_item_bottle(dest_bi, src, FIRST_TO_LAST,
cpy_num + pos - 1, cpy_bytes);
}
} else {
/* copy items to right */
src_nr_item = B_NR_ITEMS(src);
if (cpy_num == 1)
bytes = cpy_bytes;
else
bytes = -1;
/* copy the last item or it part or nothing to the begin of the DEST (i = leaf_copy_boundary_item(DEST,SOURCE,1,bytes)); */
i = leaf_copy_boundary_item(dest_bi, src, LAST_TO_FIRST, bytes);
cpy_num -= i;
if (cpy_num == 0)
return i;
pos = src_nr_item - cpy_num - i;
if (cpy_bytes == -1) {
/* starting from position 'pos' copy last cpy_num items of SOURCE to begin of DEST */
leaf_copy_items_entirely(dest_bi, src, LAST_TO_FIRST,
pos, cpy_num);
} else {
/* copy last cpy_num-1 items starting from position 'pos+1' of the SOURCE to the begin of the DEST; */
leaf_copy_items_entirely(dest_bi, src, LAST_TO_FIRST,
pos + 1, cpy_num - 1);
/* copy part of the item which number is pos to the begin of the DEST */
leaf_item_bottle(dest_bi, src, LAST_TO_FIRST, pos,
cpy_bytes);
}
}
return i;
}
/* there are types of coping: from S[0] to L[0], from S[0] to R[0],
from R[0] to L[0]. for each of these we have to define parent and
positions of destination and source buffers */
static void leaf_define_dest_src_infos(int shift_mode, struct tree_balance *tb,
struct buffer_info *dest_bi,
struct buffer_info *src_bi,
int *first_last,
struct buffer_head *Snew)
{
memset(dest_bi, 0, sizeof(struct buffer_info));
memset(src_bi, 0, sizeof(struct buffer_info));
/* define dest, src, dest parent, dest position */
switch (shift_mode) {
case LEAF_FROM_S_TO_L: /* it is used in leaf_shift_left */
src_bi->tb = tb;
src_bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path);
src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0); /* src->b_item_order */
dest_bi->tb = tb;
dest_bi->bi_bh = tb->L[0];
dest_bi->bi_parent = tb->FL[0];
dest_bi->bi_position = get_left_neighbor_position(tb, 0);
*first_last = FIRST_TO_LAST;
break;
case LEAF_FROM_S_TO_R: /* it is used in leaf_shift_right */
src_bi->tb = tb;
src_bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path);
src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0);
dest_bi->tb = tb;
dest_bi->bi_bh = tb->R[0];
dest_bi->bi_parent = tb->FR[0];
dest_bi->bi_position = get_right_neighbor_position(tb, 0);
*first_last = LAST_TO_FIRST;
break;
case LEAF_FROM_R_TO_L: /* it is used in balance_leaf_when_delete */
src_bi->tb = tb;
src_bi->bi_bh = tb->R[0];
src_bi->bi_parent = tb->FR[0];
src_bi->bi_position = get_right_neighbor_position(tb, 0);
dest_bi->tb = tb;
dest_bi->bi_bh = tb->L[0];
dest_bi->bi_parent = tb->FL[0];
dest_bi->bi_position = get_left_neighbor_position(tb, 0);
*first_last = FIRST_TO_LAST;
break;
case LEAF_FROM_L_TO_R: /* it is used in balance_leaf_when_delete */
src_bi->tb = tb;
src_bi->bi_bh = tb->L[0];
src_bi->bi_parent = tb->FL[0];
src_bi->bi_position = get_left_neighbor_position(tb, 0);
dest_bi->tb = tb;
dest_bi->bi_bh = tb->R[0];
dest_bi->bi_parent = tb->FR[0];
dest_bi->bi_position = get_right_neighbor_position(tb, 0);
*first_last = LAST_TO_FIRST;
break;
case LEAF_FROM_S_TO_SNEW:
src_bi->tb = tb;
src_bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path);
src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0);
dest_bi->tb = tb;
dest_bi->bi_bh = Snew;
dest_bi->bi_parent = NULL;
dest_bi->bi_position = 0;
*first_last = LAST_TO_FIRST;
break;
default:
reiserfs_panic(sb_from_bi(src_bi), "vs-10250",
"shift type is unknown (%d)", shift_mode);
}
RFALSE(!src_bi->bi_bh || !dest_bi->bi_bh,
"vs-10260: mode==%d, source (%p) or dest (%p) buffer is initialized incorrectly",
shift_mode, src_bi->bi_bh, dest_bi->bi_bh);
}
/* copy mov_num items and mov_bytes of the (mov_num-1)th item to
neighbor. Delete them from source */
int leaf_move_items(int shift_mode, struct tree_balance *tb, int mov_num,
int mov_bytes, struct buffer_head *Snew)
{
int ret_value;
struct buffer_info dest_bi, src_bi;
int first_last;
leaf_define_dest_src_infos(shift_mode, tb, &dest_bi, &src_bi,
&first_last, Snew);
ret_value =
leaf_copy_items(&dest_bi, src_bi.bi_bh, first_last, mov_num,
mov_bytes);
leaf_delete_items(&src_bi, first_last,
(first_last ==
FIRST_TO_LAST) ? 0 : (B_NR_ITEMS(src_bi.bi_bh) -
mov_num), mov_num, mov_bytes);
return ret_value;
}
/* Shift shift_num items (and shift_bytes of last shifted item if shift_bytes != -1)
from S[0] to L[0] and replace the delimiting key */
int leaf_shift_left(struct tree_balance *tb, int shift_num, int shift_bytes)
{
struct buffer_head *S0 = PATH_PLAST_BUFFER(tb->tb_path);
int i;
/* move shift_num (and shift_bytes bytes) items from S[0] to left neighbor L[0] */
i = leaf_move_items(LEAF_FROM_S_TO_L, tb, shift_num, shift_bytes, NULL);
if (shift_num) {
if (B_NR_ITEMS(S0) == 0) { /* number of items in S[0] == 0 */
RFALSE(shift_bytes != -1,
"vs-10270: S0 is empty now, but shift_bytes != -1 (%d)",
shift_bytes);
#ifdef CONFIG_REISERFS_CHECK
if (tb->tb_mode == M_PASTE || tb->tb_mode == M_INSERT) {
print_cur_tb("vs-10275");
reiserfs_panic(tb->tb_sb, "vs-10275",
"balance condition corrupted "
"(%c)", tb->tb_mode);
}
#endif
if (PATH_H_POSITION(tb->tb_path, 1) == 0)
replace_key(tb, tb->CFL[0], tb->lkey[0],
PATH_H_PPARENT(tb->tb_path, 0), 0);
} else {
/* replace lkey in CFL[0] by 0-th key from S[0]; */
replace_key(tb, tb->CFL[0], tb->lkey[0], S0, 0);
RFALSE((shift_bytes != -1 &&
!(is_direntry_le_ih(B_N_PITEM_HEAD(S0, 0))
&& !I_ENTRY_COUNT(B_N_PITEM_HEAD(S0, 0)))) &&
(!op_is_left_mergeable
(B_N_PKEY(S0, 0), S0->b_size)),
"vs-10280: item must be mergeable");
}
}
return i;
}
/* CLEANING STOPPED HERE */
/* Shift shift_num (shift_bytes) items from S[0] to the right neighbor, and replace the delimiting key */
int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes)
{
// struct buffer_head * S0 = PATH_PLAST_BUFFER (tb->tb_path);
int ret_value;
/* move shift_num (and shift_bytes) items from S[0] to right neighbor R[0] */
ret_value =
leaf_move_items(LEAF_FROM_S_TO_R, tb, shift_num, shift_bytes, NULL);
/* replace rkey in CFR[0] by the 0-th key from R[0] */
if (shift_num) {
replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
}
return ret_value;
}
static void leaf_delete_items_entirely(struct buffer_info *bi,
int first, int del_num);
/* If del_bytes == -1, starting from position 'first' delete del_num items in whole in buffer CUR.
If not.
If last_first == 0. Starting from position 'first' delete del_num-1 items in whole. Delete part of body of
the first item. Part defined by del_bytes. Don't delete first item header
If last_first == 1. Starting from position 'first+1' delete del_num-1 items in whole. Delete part of body of
the last item . Part defined by del_bytes. Don't delete last item header.
*/
void leaf_delete_items(struct buffer_info *cur_bi, int last_first,
int first, int del_num, int del_bytes)
{
struct buffer_head *bh;
int item_amount = B_NR_ITEMS(bh = cur_bi->bi_bh);
RFALSE(!bh, "10155: bh is not defined");
RFALSE(del_num < 0, "10160: del_num can not be < 0. del_num==%d",
del_num);
RFALSE(first < 0
|| first + del_num > item_amount,
"10165: invalid number of first item to be deleted (%d) or "
"no so much items (%d) to delete (only %d)", first,
first + del_num, item_amount);
if (del_num == 0)
return;
if (first == 0 && del_num == item_amount && del_bytes == -1) {
make_empty_node(cur_bi);
do_balance_mark_leaf_dirty(cur_bi->tb, bh, 0);
return;
}
if (del_bytes == -1)
/* delete del_num items beginning from item in position first */
leaf_delete_items_entirely(cur_bi, first, del_num);
else {
if (last_first == FIRST_TO_LAST) {
/* delete del_num-1 items beginning from item in position first */
leaf_delete_items_entirely(cur_bi, first, del_num - 1);
/* delete the part of the first item of the bh
do not delete item header
*/
leaf_cut_from_buffer(cur_bi, 0, 0, del_bytes);
} else {
struct item_head *ih;
int len;
/* delete del_num-1 items beginning from item in position first+1 */
leaf_delete_items_entirely(cur_bi, first + 1,
del_num - 1);
ih = B_N_PITEM_HEAD(bh, B_NR_ITEMS(bh) - 1);
if (is_direntry_le_ih(ih))
/* the last item is directory */
/* len = numbers of directory entries in this item */
len = ih_entry_count(ih);
else
/* len = body len of item */
len = ih_item_len(ih);
/* delete the part of the last item of the bh
do not delete item header
*/
leaf_cut_from_buffer(cur_bi, B_NR_ITEMS(bh) - 1,
len - del_bytes, del_bytes);
}
}
}
/* insert item into the leaf node in position before */
void leaf_insert_into_buf(struct buffer_info *bi, int before,
struct item_head *inserted_item_ih,
const char *inserted_item_body, int zeros_number)
{
struct buffer_head *bh = bi->bi_bh;
int nr, free_space;
struct block_head *blkh;
struct item_head *ih;
int i;
int last_loc, unmoved_loc;
char *to;
blkh = B_BLK_HEAD(bh);
nr = blkh_nr_item(blkh);
free_space = blkh_free_space(blkh);
/* check free space */
RFALSE(free_space < ih_item_len(inserted_item_ih) + IH_SIZE,
"vs-10170: not enough free space in block %z, new item %h",
bh, inserted_item_ih);
RFALSE(zeros_number > ih_item_len(inserted_item_ih),
"vs-10172: zero number == %d, item length == %d",
zeros_number, ih_item_len(inserted_item_ih));
/* get item new item must be inserted before */
ih = B_N_PITEM_HEAD(bh, before);
/* prepare space for the body of new item */
last_loc = nr ? ih_location(&(ih[nr - before - 1])) : bh->b_size;
unmoved_loc = before ? ih_location(ih - 1) : bh->b_size;
memmove(bh->b_data + last_loc - ih_item_len(inserted_item_ih),
bh->b_data + last_loc, unmoved_loc - last_loc);
to = bh->b_data + unmoved_loc - ih_item_len(inserted_item_ih);
memset(to, 0, zeros_number);
to += zeros_number;
/* copy body to prepared space */
if (inserted_item_body)
memmove(to, inserted_item_body,
ih_item_len(inserted_item_ih) - zeros_number);
else
memset(to, '\0', ih_item_len(inserted_item_ih) - zeros_number);
/* insert item header */
memmove(ih + 1, ih, IH_SIZE * (nr - before));
memmove(ih, inserted_item_ih, IH_SIZE);
/* change locations */
for (i = before; i < nr + 1; i++) {
unmoved_loc -= ih_item_len(&(ih[i - before]));
put_ih_location(&(ih[i - before]), unmoved_loc);
}
/* sizes, free space, item number */
set_blkh_nr_item(blkh, blkh_nr_item(blkh) + 1);
set_blkh_free_space(blkh,
free_space - (IH_SIZE +
ih_item_len(inserted_item_ih)));
do_balance_mark_leaf_dirty(bi->tb, bh, 1);
if (bi->bi_parent) {
struct disk_child *t_dc;
t_dc = B_N_CHILD(bi->bi_parent, bi->bi_position);
put_dc_size(t_dc,
dc_size(t_dc) + (IH_SIZE +
ih_item_len(inserted_item_ih)));
do_balance_mark_internal_dirty(bi->tb, bi->bi_parent, 0);
}
}
/* paste paste_size bytes to affected_item_num-th item.
When item is a directory, this only prepare space for new entries */
void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num,
int pos_in_item, int paste_size,
const char *body, int zeros_number)
{
struct buffer_head *bh = bi->bi_bh;
int nr, free_space;
struct block_head *blkh;
struct item_head *ih;
int i;
int last_loc, unmoved_loc;
blkh = B_BLK_HEAD(bh);
nr = blkh_nr_item(blkh);
free_space = blkh_free_space(blkh);
/* check free space */
RFALSE(free_space < paste_size,
"vs-10175: not enough free space: needed %d, available %d",
paste_size, free_space);
#ifdef CONFIG_REISERFS_CHECK
if (zeros_number > paste_size) {
struct super_block *sb = NULL;
if (bi && bi->tb)
sb = bi->tb->tb_sb;
print_cur_tb("10177");
reiserfs_panic(sb, "vs-10177",
"zeros_number == %d, paste_size == %d",
zeros_number, paste_size);
}
#endif /* CONFIG_REISERFS_CHECK */
/* item to be appended */
ih = B_N_PITEM_HEAD(bh, affected_item_num);
last_loc = ih_location(&(ih[nr - affected_item_num - 1]));
unmoved_loc = affected_item_num ? ih_location(ih - 1) : bh->b_size;
/* prepare space */
memmove(bh->b_data + last_loc - paste_size, bh->b_data + last_loc,
unmoved_loc - last_loc);
/* change locations */
for (i = affected_item_num; i < nr; i++)
put_ih_location(&(ih[i - affected_item_num]),
ih_location(&(ih[i - affected_item_num])) -
paste_size);
if (body) {
if (!is_direntry_le_ih(ih)) {
if (!pos_in_item) {
/* shift data to right */
memmove(bh->b_data + ih_location(ih) +
paste_size,
bh->b_data + ih_location(ih),
ih_item_len(ih));
/* paste data in the head of item */
memset(bh->b_data + ih_location(ih), 0,
zeros_number);
memcpy(bh->b_data + ih_location(ih) +
zeros_number, body,
paste_size - zeros_number);
} else {
memset(bh->b_data + unmoved_loc - paste_size, 0,
zeros_number);
memcpy(bh->b_data + unmoved_loc - paste_size +
zeros_number, body,
paste_size - zeros_number);
}
}
} else
memset(bh->b_data + unmoved_loc - paste_size, '\0', paste_size);
put_ih_item_len(ih, ih_item_len(ih) + paste_size);
/* change free space */
set_blkh_free_space(blkh, free_space - paste_size);
do_balance_mark_leaf_dirty(bi->tb, bh, 0);
if (bi->bi_parent) {
struct disk_child *t_dc =
B_N_CHILD(bi->bi_parent, bi->bi_position);
put_dc_size(t_dc, dc_size(t_dc) + paste_size);
do_balance_mark_internal_dirty(bi->tb, bi->bi_parent, 0);
}
}
/* cuts DEL_COUNT entries beginning from FROM-th entry. Directory item
does not have free space, so it moves DEHs and remaining records as
necessary. Return value is size of removed part of directory item
in bytes. */
static int leaf_cut_entries(struct buffer_head *bh,
struct item_head *ih, int from, int del_count)
{
char *item;
struct reiserfs_de_head *deh;
int prev_record_offset; /* offset of record, that is (from-1)th */
char *prev_record; /* */
int cut_records_len; /* length of all removed records */
int i;
/* make sure, that item is directory and there are enough entries to
remove */
RFALSE(!is_direntry_le_ih(ih), "10180: item is not directory item");
RFALSE(I_ENTRY_COUNT(ih) < from + del_count,
"10185: item contains not enough entries: entry_cout = %d, from = %d, to delete = %d",
I_ENTRY_COUNT(ih), from, del_count);
if (del_count == 0)
return 0;
/* first byte of item */
item = bh->b_data + ih_location(ih);
/* entry head array */
deh = B_I_DEH(bh, ih);
/* first byte of remaining entries, those are BEFORE cut entries
(prev_record) and length of all removed records (cut_records_len) */
prev_record_offset =
(from ? deh_location(&(deh[from - 1])) : ih_item_len(ih));
cut_records_len = prev_record_offset /*from_record */ -
deh_location(&(deh[from + del_count - 1]));
prev_record = item + prev_record_offset;
/* adjust locations of remaining entries */
for (i = I_ENTRY_COUNT(ih) - 1; i > from + del_count - 1; i--)
put_deh_location(&(deh[i]),
deh_location(&deh[i]) -
(DEH_SIZE * del_count));
for (i = 0; i < from; i++)
put_deh_location(&(deh[i]),
deh_location(&deh[i]) - (DEH_SIZE * del_count +
cut_records_len));
put_ih_entry_count(ih, ih_entry_count(ih) - del_count);
/* shift entry head array and entries those are AFTER removed entries */
memmove((char *)(deh + from),
deh + from + del_count,
prev_record - cut_records_len - (char *)(deh + from +
del_count));
/* shift records, those are BEFORE removed entries */
memmove(prev_record - cut_records_len - DEH_SIZE * del_count,
prev_record, item + ih_item_len(ih) - prev_record);
return DEH_SIZE * del_count + cut_records_len;
}
/* when cut item is part of regular file
pos_in_item - first byte that must be cut
cut_size - number of bytes to be cut beginning from pos_in_item
when cut item is part of directory
pos_in_item - number of first deleted entry
cut_size - count of deleted entries
*/
void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
int pos_in_item, int cut_size)
{
int nr;
struct buffer_head *bh = bi->bi_bh;
struct block_head *blkh;
struct item_head *ih;
int last_loc, unmoved_loc;
int i;
blkh = B_BLK_HEAD(bh);
nr = blkh_nr_item(blkh);
/* item head of truncated item */
ih = B_N_PITEM_HEAD(bh, cut_item_num);
if (is_direntry_le_ih(ih)) {
/* first cut entry () */
cut_size = leaf_cut_entries(bh, ih, pos_in_item, cut_size);
if (pos_in_item == 0) {
/* change key */
RFALSE(cut_item_num,
"when 0-th enrty of item is cut, that item must be first in the node, not %d-th",
cut_item_num);
/* change item key by key of first entry in the item */
set_le_ih_k_offset(ih, deh_offset(B_I_DEH(bh, ih)));
/*memcpy (&ih->ih_key.k_offset, &(B_I_DEH (bh, ih)->deh_offset), SHORT_KEY_SIZE); */
}
} else {
/* item is direct or indirect */
RFALSE(is_statdata_le_ih(ih), "10195: item is stat data");
RFALSE(pos_in_item && pos_in_item + cut_size != ih_item_len(ih),
"10200: invalid offset (%lu) or trunc_size (%lu) or ih_item_len (%lu)",
(long unsigned)pos_in_item, (long unsigned)cut_size,
(long unsigned)ih_item_len(ih));
/* shift item body to left if cut is from the head of item */
if (pos_in_item == 0) {
memmove(bh->b_data + ih_location(ih),
bh->b_data + ih_location(ih) + cut_size,
ih_item_len(ih) - cut_size);
/* change key of item */
if (is_direct_le_ih(ih))
set_le_ih_k_offset(ih,
le_ih_k_offset(ih) +
cut_size);
else {
set_le_ih_k_offset(ih,
le_ih_k_offset(ih) +
(cut_size / UNFM_P_SIZE) *
bh->b_size);
RFALSE(ih_item_len(ih) == cut_size
&& get_ih_free_space(ih),
"10205: invalid ih_free_space (%h)", ih);
}
}
}
/* location of the last item */
last_loc = ih_location(&(ih[nr - cut_item_num - 1]));
/* location of the item, which is remaining at the same place */
unmoved_loc = cut_item_num ? ih_location(ih - 1) : bh->b_size;
/* shift */
memmove(bh->b_data + last_loc + cut_size, bh->b_data + last_loc,
unmoved_loc - last_loc - cut_size);
/* change item length */
put_ih_item_len(ih, ih_item_len(ih) - cut_size);
if (is_indirect_le_ih(ih)) {
if (pos_in_item)
set_ih_free_space(ih, 0);
}
/* change locations */
for (i = cut_item_num; i < nr; i++)
put_ih_location(&(ih[i - cut_item_num]),
ih_location(&ih[i - cut_item_num]) + cut_size);
/* size, free space */
set_blkh_free_space(blkh, blkh_free_space(blkh) + cut_size);
do_balance_mark_leaf_dirty(bi->tb, bh, 0);
if (bi->bi_parent) {
struct disk_child *t_dc;
t_dc = B_N_CHILD(bi->bi_parent, bi->bi_position);
put_dc_size(t_dc, dc_size(t_dc) - cut_size);
do_balance_mark_internal_dirty(bi->tb, bi->bi_parent, 0);
}
}
/* delete del_num items from buffer starting from the first'th item */
static void leaf_delete_items_entirely(struct buffer_info *bi,
int first, int del_num)
{
struct buffer_head *bh = bi->bi_bh;
int nr;
int i, j;
int last_loc, last_removed_loc;
struct block_head *blkh;
struct item_head *ih;
RFALSE(bh == NULL, "10210: buffer is 0");
RFALSE(del_num < 0, "10215: del_num less than 0 (%d)", del_num);
if (del_num == 0)
return;
blkh = B_BLK_HEAD(bh);
nr = blkh_nr_item(blkh);
RFALSE(first < 0 || first + del_num > nr,
"10220: first=%d, number=%d, there is %d items", first, del_num,
nr);
if (first == 0 && del_num == nr) {
/* this does not work */
make_empty_node(bi);
do_balance_mark_leaf_dirty(bi->tb, bh, 0);
return;
}
ih = B_N_PITEM_HEAD(bh, first);
/* location of unmovable item */
j = (first == 0) ? bh->b_size : ih_location(ih - 1);
/* delete items */
last_loc = ih_location(&(ih[nr - 1 - first]));
last_removed_loc = ih_location(&(ih[del_num - 1]));
memmove(bh->b_data + last_loc + j - last_removed_loc,
bh->b_data + last_loc, last_removed_loc - last_loc);
/* delete item headers */
memmove(ih, ih + del_num, (nr - first - del_num) * IH_SIZE);
/* change item location */
for (i = first; i < nr - del_num; i++)
put_ih_location(&(ih[i - first]),
ih_location(&(ih[i - first])) + (j -
last_removed_loc));
/* sizes, item number */
set_blkh_nr_item(blkh, blkh_nr_item(blkh) - del_num);
set_blkh_free_space(blkh,
blkh_free_space(blkh) + (j - last_removed_loc +
IH_SIZE * del_num));
do_balance_mark_leaf_dirty(bi->tb, bh, 0);
if (bi->bi_parent) {
struct disk_child *t_dc =
B_N_CHILD(bi->bi_parent, bi->bi_position);
put_dc_size(t_dc,
dc_size(t_dc) - (j - last_removed_loc +
IH_SIZE * del_num));
do_balance_mark_internal_dirty(bi->tb, bi->bi_parent, 0);
}
}
/* paste new_entry_count entries (new_dehs, records) into position before to item_num-th item */
void leaf_paste_entries(struct buffer_info *bi,
int item_num,
int before,
int new_entry_count,
struct reiserfs_de_head *new_dehs,
const char *records, int paste_size)
{
struct item_head *ih;
char *item;
struct reiserfs_de_head *deh;
char *insert_point;
int i, old_entry_num;
struct buffer_head *bh = bi->bi_bh;
if (new_entry_count == 0)
return;
ih = B_N_PITEM_HEAD(bh, item_num);
/* make sure, that item is directory, and there are enough records in it */
RFALSE(!is_direntry_le_ih(ih), "10225: item is not directory item");
RFALSE(I_ENTRY_COUNT(ih) < before,
"10230: there are no entry we paste entries before. entry_count = %d, before = %d",
I_ENTRY_COUNT(ih), before);
/* first byte of dest item */
item = bh->b_data + ih_location(ih);
/* entry head array */
deh = B_I_DEH(bh, ih);
/* new records will be pasted at this point */
insert_point =
item +
(before ? deh_location(&(deh[before - 1]))
: (ih_item_len(ih) - paste_size));
/* adjust locations of records that will be AFTER new records */
for (i = I_ENTRY_COUNT(ih) - 1; i >= before; i--)
put_deh_location(&(deh[i]),
deh_location(&(deh[i])) +
(DEH_SIZE * new_entry_count));
/* adjust locations of records that will be BEFORE new records */
for (i = 0; i < before; i++)
put_deh_location(&(deh[i]),
deh_location(&(deh[i])) + paste_size);
old_entry_num = I_ENTRY_COUNT(ih);
put_ih_entry_count(ih, ih_entry_count(ih) + new_entry_count);
/* prepare space for pasted records */
memmove(insert_point + paste_size, insert_point,
item + (ih_item_len(ih) - paste_size) - insert_point);
/* copy new records */
memcpy(insert_point + DEH_SIZE * new_entry_count, records,
paste_size - DEH_SIZE * new_entry_count);
/* prepare space for new entry heads */
deh += before;
memmove((char *)(deh + new_entry_count), deh,
insert_point - (char *)deh);
/* copy new entry heads */
deh = (struct reiserfs_de_head *)((char *)deh);
memcpy(deh, new_dehs, DEH_SIZE * new_entry_count);
/* set locations of new records */
for (i = 0; i < new_entry_count; i++) {
put_deh_location(&(deh[i]),
deh_location(&(deh[i])) +
(-deh_location
(&(new_dehs[new_entry_count - 1])) +
insert_point + DEH_SIZE * new_entry_count -
item));
}
/* change item key if necessary (when we paste before 0-th entry */
if (!before) {
set_le_ih_k_offset(ih, deh_offset(new_dehs));
/* memcpy (&ih->ih_key.k_offset,
&new_dehs->deh_offset, SHORT_KEY_SIZE);*/
}
#ifdef CONFIG_REISERFS_CHECK
{
int prev, next;
/* check record locations */
deh = B_I_DEH(bh, ih);
for (i = 0; i < I_ENTRY_COUNT(ih); i++) {
next =
(i <
I_ENTRY_COUNT(ih) -
1) ? deh_location(&(deh[i + 1])) : 0;
prev = (i != 0) ? deh_location(&(deh[i - 1])) : 0;
if (prev && prev <= deh_location(&(deh[i])))
reiserfs_error(sb_from_bi(bi), "vs-10240",
"directory item (%h) "
"corrupted (prev %a, "
"cur(%d) %a)",
ih, deh + i - 1, i, deh + i);
if (next && next >= deh_location(&(deh[i])))
reiserfs_error(sb_from_bi(bi), "vs-10250",
"directory item (%h) "
"corrupted (cur(%d) %a, "
"next %a)",
ih, i, deh + i, deh + i + 1);
}
}
#endif
}
| gpl-2.0 |
piasek1906/Piasek-KK | drivers/usb/atm/ueagle-atm.c | 4860 | 68475 | /*-
* Copyright (c) 2003, 2004
* Damien Bergamini <damien.bergamini@free.fr>. All rights reserved.
*
* Copyright (c) 2005-2007 Matthieu Castet <castet.matthieu@free.fr>
* Copyright (c) 2005-2007 Stanislaw Gruszka <stf_xl@wp.pl>
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* GPL license :
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*
* HISTORY : some part of the code was base on ueagle 1.3 BSD driver,
* Damien Bergamini agree to put his code under a DUAL GPL/BSD license.
*
* The rest of the code was was rewritten from scratch.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/usb.h>
#include <linux/firmware.h>
#include <linux/ctype.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <asm/unaligned.h>
#include "usbatm.h"
#define EAGLEUSBVERSION "ueagle 1.4"
/*
* Debug macros
*/
#define uea_dbg(usb_dev, format, args...) \
do { \
if (debug >= 1) \
dev_dbg(&(usb_dev)->dev, \
"[ueagle-atm dbg] %s: " format, \
__func__, ##args); \
} while (0)
#define uea_vdbg(usb_dev, format, args...) \
do { \
if (debug >= 2) \
dev_dbg(&(usb_dev)->dev, \
"[ueagle-atm vdbg] " format, ##args); \
} while (0)
#define uea_enters(usb_dev) \
uea_vdbg(usb_dev, "entering %s\n" , __func__)
#define uea_leaves(usb_dev) \
uea_vdbg(usb_dev, "leaving %s\n" , __func__)
#define uea_err(usb_dev, format, args...) \
dev_err(&(usb_dev)->dev , "[UEAGLE-ATM] " format , ##args)
#define uea_warn(usb_dev, format, args...) \
dev_warn(&(usb_dev)->dev , "[Ueagle-atm] " format, ##args)
#define uea_info(usb_dev, format, args...) \
dev_info(&(usb_dev)->dev , "[ueagle-atm] " format, ##args)
struct intr_pkt;
/* cmv's from firmware */
struct uea_cmvs_v1 {
u32 address;
u16 offset;
u32 data;
} __packed;
struct uea_cmvs_v2 {
u32 group;
u32 address;
u32 offset;
u32 data;
} __packed;
/* information about currently processed cmv */
struct cmv_dsc_e1 {
u8 function;
u16 idx;
u32 address;
u16 offset;
};
struct cmv_dsc_e4 {
u16 function;
u16 offset;
u16 address;
u16 group;
};
union cmv_dsc {
struct cmv_dsc_e1 e1;
struct cmv_dsc_e4 e4;
};
struct uea_softc {
struct usb_device *usb_dev;
struct usbatm_data *usbatm;
int modem_index;
unsigned int driver_info;
int annex;
#define ANNEXA 0
#define ANNEXB 1
int booting;
int reset;
wait_queue_head_t sync_q;
struct task_struct *kthread;
u32 data;
u32 data1;
int cmv_ack;
union cmv_dsc cmv_dsc;
struct work_struct task;
u16 pageno;
u16 ovl;
const struct firmware *dsp_firm;
struct urb *urb_int;
void (*dispatch_cmv) (struct uea_softc *, struct intr_pkt *);
void (*schedule_load_page) (struct uea_softc *, struct intr_pkt *);
int (*stat) (struct uea_softc *);
int (*send_cmvs) (struct uea_softc *);
/* keep in sync with eaglectl */
struct uea_stats {
struct {
u32 state;
u32 flags;
u32 mflags;
u32 vidcpe;
u32 vidco;
u32 dsrate;
u32 usrate;
u32 dsunc;
u32 usunc;
u32 dscorr;
u32 uscorr;
u32 txflow;
u32 rxflow;
u32 usattenuation;
u32 dsattenuation;
u32 dsmargin;
u32 usmargin;
u32 firmid;
} phy;
} stats;
};
/*
* Elsa IDs
*/
#define ELSA_VID 0x05CC
#define ELSA_PID_PSTFIRM 0x3350
#define ELSA_PID_PREFIRM 0x3351
#define ELSA_PID_A_PREFIRM 0x3352
#define ELSA_PID_A_PSTFIRM 0x3353
#define ELSA_PID_B_PREFIRM 0x3362
#define ELSA_PID_B_PSTFIRM 0x3363
/*
* Devolo IDs : pots if (pid & 0x10)
*/
#define DEVOLO_VID 0x1039
#define DEVOLO_EAGLE_I_A_PID_PSTFIRM 0x2110
#define DEVOLO_EAGLE_I_A_PID_PREFIRM 0x2111
#define DEVOLO_EAGLE_I_B_PID_PSTFIRM 0x2100
#define DEVOLO_EAGLE_I_B_PID_PREFIRM 0x2101
#define DEVOLO_EAGLE_II_A_PID_PSTFIRM 0x2130
#define DEVOLO_EAGLE_II_A_PID_PREFIRM 0x2131
#define DEVOLO_EAGLE_II_B_PID_PSTFIRM 0x2120
#define DEVOLO_EAGLE_II_B_PID_PREFIRM 0x2121
/*
* Reference design USB IDs
*/
#define ANALOG_VID 0x1110
#define ADI930_PID_PREFIRM 0x9001
#define ADI930_PID_PSTFIRM 0x9000
#define EAGLE_I_PID_PREFIRM 0x9010 /* Eagle I */
#define EAGLE_I_PID_PSTFIRM 0x900F /* Eagle I */
#define EAGLE_IIC_PID_PREFIRM 0x9024 /* Eagle IIC */
#define EAGLE_IIC_PID_PSTFIRM 0x9023 /* Eagle IIC */
#define EAGLE_II_PID_PREFIRM 0x9022 /* Eagle II */
#define EAGLE_II_PID_PSTFIRM 0x9021 /* Eagle II */
#define EAGLE_III_PID_PREFIRM 0x9032 /* Eagle III */
#define EAGLE_III_PID_PSTFIRM 0x9031 /* Eagle III */
#define EAGLE_IV_PID_PREFIRM 0x9042 /* Eagle IV */
#define EAGLE_IV_PID_PSTFIRM 0x9041 /* Eagle IV */
/*
* USR USB IDs
*/
#define USR_VID 0x0BAF
#define MILLER_A_PID_PREFIRM 0x00F2
#define MILLER_A_PID_PSTFIRM 0x00F1
#define MILLER_B_PID_PREFIRM 0x00FA
#define MILLER_B_PID_PSTFIRM 0x00F9
#define HEINEKEN_A_PID_PREFIRM 0x00F6
#define HEINEKEN_A_PID_PSTFIRM 0x00F5
#define HEINEKEN_B_PID_PREFIRM 0x00F8
#define HEINEKEN_B_PID_PSTFIRM 0x00F7
#define PREFIRM 0
#define PSTFIRM (1<<7)
#define AUTO_ANNEX_A (1<<8)
#define AUTO_ANNEX_B (1<<9)
enum {
ADI930 = 0,
EAGLE_I,
EAGLE_II,
EAGLE_III,
EAGLE_IV
};
/* macros for both struct usb_device_id and struct uea_softc */
#define UEA_IS_PREFIRM(x) \
(!((x)->driver_info & PSTFIRM))
#define UEA_CHIP_VERSION(x) \
((x)->driver_info & 0xf)
#define IS_ISDN(x) \
((x)->annex & ANNEXB)
#define INS_TO_USBDEV(ins) (ins->usb_dev)
#define GET_STATUS(data) \
((data >> 8) & 0xf)
#define IS_OPERATIONAL(sc) \
((UEA_CHIP_VERSION(sc) != EAGLE_IV) ? \
(GET_STATUS(sc->stats.phy.state) == 2) : \
(sc->stats.phy.state == 7))
/*
* Set of macros to handle unaligned data in the firmware blob.
* The FW_GET_BYTE() macro is provided only for consistency.
*/
#define FW_GET_BYTE(p) (*((__u8 *) (p)))
#define FW_DIR "ueagle-atm/"
#define UEA_FW_NAME_MAX 30
#define NB_MODEM 4
#define BULK_TIMEOUT 300
#define CTRL_TIMEOUT 1000
#define ACK_TIMEOUT msecs_to_jiffies(3000)
#define UEA_INTR_IFACE_NO 0
#define UEA_US_IFACE_NO 1
#define UEA_DS_IFACE_NO 2
#define FASTEST_ISO_INTF 8
#define UEA_BULK_DATA_PIPE 0x02
#define UEA_IDMA_PIPE 0x04
#define UEA_INTR_PIPE 0x04
#define UEA_ISO_DATA_PIPE 0x08
#define UEA_E1_SET_BLOCK 0x0001
#define UEA_E4_SET_BLOCK 0x002c
#define UEA_SET_MODE 0x0003
#define UEA_SET_2183_DATA 0x0004
#define UEA_SET_TIMEOUT 0x0011
#define UEA_LOOPBACK_OFF 0x0002
#define UEA_LOOPBACK_ON 0x0003
#define UEA_BOOT_IDMA 0x0006
#define UEA_START_RESET 0x0007
#define UEA_END_RESET 0x0008
#define UEA_SWAP_MAILBOX (0x3fcd | 0x4000)
#define UEA_MPTX_START (0x3fce | 0x4000)
#define UEA_MPTX_MAILBOX (0x3fd6 | 0x4000)
#define UEA_MPRX_MAILBOX (0x3fdf | 0x4000)
/* block information in eagle4 dsp firmware */
struct block_index {
__le32 PageOffset;
__le32 NotLastBlock;
__le32 dummy;
__le32 PageSize;
__le32 PageAddress;
__le16 dummy1;
__le16 PageNumber;
} __packed;
#define E4_IS_BOOT_PAGE(PageSize) ((le32_to_cpu(PageSize)) & 0x80000000)
#define E4_PAGE_BYTES(PageSize) ((le32_to_cpu(PageSize) & 0x7fffffff) * 4)
#define E4_L1_STRING_HEADER 0x10
#define E4_MAX_PAGE_NUMBER 0x58
#define E4_NO_SWAPPAGE_HEADERS 0x31
/* l1_code is eagle4 dsp firmware format */
struct l1_code {
u8 string_header[E4_L1_STRING_HEADER];
u8 page_number_to_block_index[E4_MAX_PAGE_NUMBER];
struct block_index page_header[E4_NO_SWAPPAGE_HEADERS];
u8 code[0];
} __packed;
/* structures describing a block within a DSP page */
struct block_info_e1 {
__le16 wHdr;
__le16 wAddress;
__le16 wSize;
__le16 wOvlOffset;
__le16 wOvl; /* overlay */
__le16 wLast;
} __packed;
#define E1_BLOCK_INFO_SIZE 12
struct block_info_e4 {
__be16 wHdr;
__u8 bBootPage;
__u8 bPageNumber;
__be32 dwSize;
__be32 dwAddress;
__be16 wReserved;
} __packed;
#define E4_BLOCK_INFO_SIZE 14
#define UEA_BIHDR 0xabcd
#define UEA_RESERVED 0xffff
/* constants describing cmv type */
#define E1_PREAMBLE 0x535c
#define E1_MODEMTOHOST 0x01
#define E1_HOSTTOMODEM 0x10
#define E1_MEMACCESS 0x1
#define E1_ADSLDIRECTIVE 0x7
#define E1_FUNCTION_TYPE(f) ((f) >> 4)
#define E1_FUNCTION_SUBTYPE(f) ((f) & 0x0f)
#define E4_MEMACCESS 0
#define E4_ADSLDIRECTIVE 0xf
#define E4_FUNCTION_TYPE(f) ((f) >> 8)
#define E4_FUNCTION_SIZE(f) ((f) & 0x0f)
#define E4_FUNCTION_SUBTYPE(f) (((f) >> 4) & 0x0f)
/* for MEMACCESS */
#define E1_REQUESTREAD 0x0
#define E1_REQUESTWRITE 0x1
#define E1_REPLYREAD 0x2
#define E1_REPLYWRITE 0x3
#define E4_REQUESTREAD 0x0
#define E4_REQUESTWRITE 0x4
#define E4_REPLYREAD (E4_REQUESTREAD | 1)
#define E4_REPLYWRITE (E4_REQUESTWRITE | 1)
/* for ADSLDIRECTIVE */
#define E1_KERNELREADY 0x0
#define E1_MODEMREADY 0x1
#define E4_KERNELREADY 0x0
#define E4_MODEMREADY 0x1
#define E1_MAKEFUNCTION(t, s) (((t) & 0xf) << 4 | ((s) & 0xf))
#define E4_MAKEFUNCTION(t, st, s) (((t) & 0xf) << 8 | \
((st) & 0xf) << 4 | ((s) & 0xf))
#define E1_MAKESA(a, b, c, d) \
(((c) & 0xff) << 24 | \
((d) & 0xff) << 16 | \
((a) & 0xff) << 8 | \
((b) & 0xff))
#define E1_GETSA1(a) ((a >> 8) & 0xff)
#define E1_GETSA2(a) (a & 0xff)
#define E1_GETSA3(a) ((a >> 24) & 0xff)
#define E1_GETSA4(a) ((a >> 16) & 0xff)
#define E1_SA_CNTL E1_MAKESA('C', 'N', 'T', 'L')
#define E1_SA_DIAG E1_MAKESA('D', 'I', 'A', 'G')
#define E1_SA_INFO E1_MAKESA('I', 'N', 'F', 'O')
#define E1_SA_OPTN E1_MAKESA('O', 'P', 'T', 'N')
#define E1_SA_RATE E1_MAKESA('R', 'A', 'T', 'E')
#define E1_SA_STAT E1_MAKESA('S', 'T', 'A', 'T')
#define E4_SA_CNTL 1
#define E4_SA_STAT 2
#define E4_SA_INFO 3
#define E4_SA_TEST 4
#define E4_SA_OPTN 5
#define E4_SA_RATE 6
#define E4_SA_DIAG 7
#define E4_SA_CNFG 8
/* structures representing a CMV (Configuration and Management Variable) */
struct cmv_e1 {
__le16 wPreamble;
__u8 bDirection;
__u8 bFunction;
__le16 wIndex;
__le32 dwSymbolicAddress;
__le16 wOffsetAddress;
__le32 dwData;
} __packed;
struct cmv_e4 {
__be16 wGroup;
__be16 wFunction;
__be16 wOffset;
__be16 wAddress;
__be32 dwData[6];
} __packed;
/* structures representing swap information */
struct swap_info_e1 {
__u8 bSwapPageNo;
__u8 bOvl; /* overlay */
} __packed;
struct swap_info_e4 {
__u8 bSwapPageNo;
} __packed;
/* structures representing interrupt data */
#define e1_bSwapPageNo u.e1.s1.swapinfo.bSwapPageNo
#define e1_bOvl u.e1.s1.swapinfo.bOvl
#define e4_bSwapPageNo u.e4.s1.swapinfo.bSwapPageNo
#define INT_LOADSWAPPAGE 0x0001
#define INT_INCOMINGCMV 0x0002
union intr_data_e1 {
struct {
struct swap_info_e1 swapinfo;
__le16 wDataSize;
} __packed s1;
struct {
struct cmv_e1 cmv;
__le16 wDataSize;
} __packed s2;
} __packed;
union intr_data_e4 {
struct {
struct swap_info_e4 swapinfo;
__le16 wDataSize;
} __packed s1;
struct {
struct cmv_e4 cmv;
__le16 wDataSize;
} __packed s2;
} __packed;
struct intr_pkt {
__u8 bType;
__u8 bNotification;
__le16 wValue;
__le16 wIndex;
__le16 wLength;
__le16 wInterrupt;
union {
union intr_data_e1 e1;
union intr_data_e4 e4;
} u;
} __packed;
#define E1_INTR_PKT_SIZE 28
#define E4_INTR_PKT_SIZE 64
static struct usb_driver uea_driver;
static DEFINE_MUTEX(uea_mutex);
static const char * const chip_name[] = {
"ADI930", "Eagle I", "Eagle II", "Eagle III", "Eagle IV"};
static int modem_index;
static unsigned int debug;
static unsigned int altsetting[NB_MODEM] = {
[0 ... (NB_MODEM - 1)] = FASTEST_ISO_INTF};
static bool sync_wait[NB_MODEM];
static char *cmv_file[NB_MODEM];
static int annex[NB_MODEM];
module_param(debug, uint, 0644);
MODULE_PARM_DESC(debug, "module debug level (0=off,1=on,2=verbose)");
module_param_array(altsetting, uint, NULL, 0644);
MODULE_PARM_DESC(altsetting, "alternate setting for incoming traffic: 0=bulk, "
"1=isoc slowest, ... , 8=isoc fastest (default)");
module_param_array(sync_wait, bool, NULL, 0644);
MODULE_PARM_DESC(sync_wait, "wait the synchronisation before starting ATM");
module_param_array(cmv_file, charp, NULL, 0644);
MODULE_PARM_DESC(cmv_file,
"file name with configuration and management variables");
module_param_array(annex, uint, NULL, 0644);
MODULE_PARM_DESC(annex,
"manually set annex a/b (0=auto, 1=annex a, 2=annex b)");
#define uea_wait(sc, cond, timeo) \
({ \
int _r = wait_event_interruptible_timeout(sc->sync_q, \
(cond) || kthread_should_stop(), timeo); \
if (kthread_should_stop()) \
_r = -ENODEV; \
_r; \
})
#define UPDATE_ATM_STAT(type, val) \
do { \
if (sc->usbatm->atm_dev) \
sc->usbatm->atm_dev->type = val; \
} while (0)
#define UPDATE_ATM_SIGNAL(val) \
do { \
if (sc->usbatm->atm_dev) \
atm_dev_signal_change(sc->usbatm->atm_dev, val); \
} while (0)
/* Firmware loading */
#define LOAD_INTERNAL 0xA0
#define F8051_USBCS 0x7f92
/**
* uea_send_modem_cmd - Send a command for pre-firmware devices.
*/
static int uea_send_modem_cmd(struct usb_device *usb,
u16 addr, u16 size, const u8 *buff)
{
int ret = -ENOMEM;
u8 *xfer_buff;
xfer_buff = kmemdup(buff, size, GFP_KERNEL);
if (xfer_buff) {
ret = usb_control_msg(usb,
usb_sndctrlpipe(usb, 0),
LOAD_INTERNAL,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, addr, 0, xfer_buff,
size, CTRL_TIMEOUT);
kfree(xfer_buff);
}
if (ret < 0)
return ret;
return (ret == size) ? 0 : -EIO;
}
static void uea_upload_pre_firmware(const struct firmware *fw_entry,
void *context)
{
struct usb_device *usb = context;
const u8 *pfw;
u8 value;
u32 crc = 0;
int ret, size;
uea_enters(usb);
if (!fw_entry) {
uea_err(usb, "firmware is not available\n");
goto err;
}
pfw = fw_entry->data;
size = fw_entry->size;
if (size < 4)
goto err_fw_corrupted;
crc = get_unaligned_le32(pfw);
pfw += 4;
size -= 4;
if (crc32_be(0, pfw, size) != crc)
goto err_fw_corrupted;
/*
* Start to upload firmware : send reset
*/
value = 1;
ret = uea_send_modem_cmd(usb, F8051_USBCS, sizeof(value), &value);
if (ret < 0) {
uea_err(usb, "modem reset failed with error %d\n", ret);
goto err;
}
while (size > 3) {
u8 len = FW_GET_BYTE(pfw);
u16 add = get_unaligned_le16(pfw + 1);
size -= len + 3;
if (size < 0)
goto err_fw_corrupted;
ret = uea_send_modem_cmd(usb, add, len, pfw + 3);
if (ret < 0) {
uea_err(usb, "uploading firmware data failed "
"with error %d\n", ret);
goto err;
}
pfw += len + 3;
}
if (size != 0)
goto err_fw_corrupted;
/*
* Tell the modem we finish : de-assert reset
*/
value = 0;
ret = uea_send_modem_cmd(usb, F8051_USBCS, 1, &value);
if (ret < 0)
uea_err(usb, "modem de-assert failed with error %d\n", ret);
else
uea_info(usb, "firmware uploaded\n");
goto err;
err_fw_corrupted:
uea_err(usb, "firmware is corrupted\n");
err:
release_firmware(fw_entry);
uea_leaves(usb);
}
/**
* uea_load_firmware - Load usb firmware for pre-firmware devices.
*/
static int uea_load_firmware(struct usb_device *usb, unsigned int ver)
{
int ret;
char *fw_name = FW_DIR "eagle.fw";
uea_enters(usb);
uea_info(usb, "pre-firmware device, uploading firmware\n");
switch (ver) {
case ADI930:
fw_name = FW_DIR "adi930.fw";
break;
case EAGLE_I:
fw_name = FW_DIR "eagleI.fw";
break;
case EAGLE_II:
fw_name = FW_DIR "eagleII.fw";
break;
case EAGLE_III:
fw_name = FW_DIR "eagleIII.fw";
break;
case EAGLE_IV:
fw_name = FW_DIR "eagleIV.fw";
break;
}
ret = request_firmware_nowait(THIS_MODULE, 1, fw_name, &usb->dev,
GFP_KERNEL, usb,
uea_upload_pre_firmware);
if (ret)
uea_err(usb, "firmware %s is not available\n", fw_name);
else
uea_info(usb, "loading firmware %s\n", fw_name);
uea_leaves(usb);
return ret;
}
/* modem management : dsp firmware, send/read CMV, monitoring statistic
*/
/*
* Make sure that the DSP code provided is safe to use.
*/
static int check_dsp_e1(const u8 *dsp, unsigned int len)
{
u8 pagecount, blockcount;
u16 blocksize;
u32 pageoffset;
unsigned int i, j, p, pp;
pagecount = FW_GET_BYTE(dsp);
p = 1;
/* enough space for page offsets? */
if (p + 4 * pagecount > len)
return 1;
for (i = 0; i < pagecount; i++) {
pageoffset = get_unaligned_le32(dsp + p);
p += 4;
if (pageoffset == 0)
continue;
/* enough space for blockcount? */
if (pageoffset >= len)
return 1;
pp = pageoffset;
blockcount = FW_GET_BYTE(dsp + pp);
pp += 1;
for (j = 0; j < blockcount; j++) {
/* enough space for block header? */
if (pp + 4 > len)
return 1;
pp += 2; /* skip blockaddr */
blocksize = get_unaligned_le16(dsp + pp);
pp += 2;
/* enough space for block data? */
if (pp + blocksize > len)
return 1;
pp += blocksize;
}
}
return 0;
}
static int check_dsp_e4(const u8 *dsp, int len)
{
int i;
struct l1_code *p = (struct l1_code *) dsp;
unsigned int sum = p->code - dsp;
if (len < sum)
return 1;
if (strcmp("STRATIPHY ANEXA", p->string_header) != 0 &&
strcmp("STRATIPHY ANEXB", p->string_header) != 0)
return 1;
for (i = 0; i < E4_MAX_PAGE_NUMBER; i++) {
struct block_index *blockidx;
u8 blockno = p->page_number_to_block_index[i];
if (blockno >= E4_NO_SWAPPAGE_HEADERS)
continue;
do {
u64 l;
if (blockno >= E4_NO_SWAPPAGE_HEADERS)
return 1;
blockidx = &p->page_header[blockno++];
if ((u8 *)(blockidx + 1) - dsp >= len)
return 1;
if (le16_to_cpu(blockidx->PageNumber) != i)
return 1;
l = E4_PAGE_BYTES(blockidx->PageSize);
sum += l;
l += le32_to_cpu(blockidx->PageOffset);
if (l > len)
return 1;
/* zero is zero regardless endianes */
} while (blockidx->NotLastBlock);
}
return (sum == len) ? 0 : 1;
}
/*
* send data to the idma pipe
* */
static int uea_idma_write(struct uea_softc *sc, const void *data, u32 size)
{
int ret = -ENOMEM;
u8 *xfer_buff;
int bytes_read;
xfer_buff = kmemdup(data, size, GFP_KERNEL);
if (!xfer_buff) {
uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n");
return ret;
}
ret = usb_bulk_msg(sc->usb_dev,
usb_sndbulkpipe(sc->usb_dev, UEA_IDMA_PIPE),
xfer_buff, size, &bytes_read, BULK_TIMEOUT);
kfree(xfer_buff);
if (ret < 0)
return ret;
if (size != bytes_read) {
uea_err(INS_TO_USBDEV(sc), "size != bytes_read %d %d\n", size,
bytes_read);
return -EIO;
}
return 0;
}
static int request_dsp(struct uea_softc *sc)
{
int ret;
char *dsp_name;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
if (IS_ISDN(sc))
dsp_name = FW_DIR "DSP4i.bin";
else
dsp_name = FW_DIR "DSP4p.bin";
} else if (UEA_CHIP_VERSION(sc) == ADI930) {
if (IS_ISDN(sc))
dsp_name = FW_DIR "DSP9i.bin";
else
dsp_name = FW_DIR "DSP9p.bin";
} else {
if (IS_ISDN(sc))
dsp_name = FW_DIR "DSPei.bin";
else
dsp_name = FW_DIR "DSPep.bin";
}
ret = request_firmware(&sc->dsp_firm, dsp_name, &sc->usb_dev->dev);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"requesting firmware %s failed with error %d\n",
dsp_name, ret);
return ret;
}
if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
ret = check_dsp_e4(sc->dsp_firm->data, sc->dsp_firm->size);
else
ret = check_dsp_e1(sc->dsp_firm->data, sc->dsp_firm->size);
if (ret) {
uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n",
dsp_name);
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
return -EILSEQ;
}
return 0;
}
/*
* The uea_load_page() function must be called within a process context
*/
static void uea_load_page_e1(struct work_struct *work)
{
struct uea_softc *sc = container_of(work, struct uea_softc, task);
u16 pageno = sc->pageno;
u16 ovl = sc->ovl;
struct block_info_e1 bi;
const u8 *p;
u8 pagecount, blockcount;
u16 blockaddr, blocksize;
u32 pageoffset;
int i;
/* reload firmware when reboot start and it's loaded already */
if (ovl == 0 && pageno == 0 && sc->dsp_firm) {
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
}
if (sc->dsp_firm == NULL && request_dsp(sc) < 0)
return;
p = sc->dsp_firm->data;
pagecount = FW_GET_BYTE(p);
p += 1;
if (pageno >= pagecount)
goto bad1;
p += 4 * pageno;
pageoffset = get_unaligned_le32(p);
if (pageoffset == 0)
goto bad1;
p = sc->dsp_firm->data + pageoffset;
blockcount = FW_GET_BYTE(p);
p += 1;
uea_dbg(INS_TO_USBDEV(sc),
"sending %u blocks for DSP page %u\n", blockcount, pageno);
bi.wHdr = cpu_to_le16(UEA_BIHDR);
bi.wOvl = cpu_to_le16(ovl);
bi.wOvlOffset = cpu_to_le16(ovl | 0x8000);
for (i = 0; i < blockcount; i++) {
blockaddr = get_unaligned_le16(p);
p += 2;
blocksize = get_unaligned_le16(p);
p += 2;
bi.wSize = cpu_to_le16(blocksize);
bi.wAddress = cpu_to_le16(blockaddr);
bi.wLast = cpu_to_le16((i == blockcount - 1) ? 1 : 0);
/* send block info through the IDMA pipe */
if (uea_idma_write(sc, &bi, E1_BLOCK_INFO_SIZE))
goto bad2;
/* send block data through the IDMA pipe */
if (uea_idma_write(sc, p, blocksize))
goto bad2;
p += blocksize;
}
return;
bad2:
uea_err(INS_TO_USBDEV(sc), "sending DSP block %u failed\n", i);
return;
bad1:
uea_err(INS_TO_USBDEV(sc), "invalid DSP page %u requested\n", pageno);
}
static void __uea_load_page_e4(struct uea_softc *sc, u8 pageno, int boot)
{
struct block_info_e4 bi;
struct block_index *blockidx;
struct l1_code *p = (struct l1_code *) sc->dsp_firm->data;
u8 blockno = p->page_number_to_block_index[pageno];
bi.wHdr = cpu_to_be16(UEA_BIHDR);
bi.bBootPage = boot;
bi.bPageNumber = pageno;
bi.wReserved = cpu_to_be16(UEA_RESERVED);
do {
const u8 *blockoffset;
unsigned int blocksize;
blockidx = &p->page_header[blockno];
blocksize = E4_PAGE_BYTES(blockidx->PageSize);
blockoffset = sc->dsp_firm->data + le32_to_cpu(
blockidx->PageOffset);
bi.dwSize = cpu_to_be32(blocksize);
bi.dwAddress = cpu_to_be32(le32_to_cpu(blockidx->PageAddress));
uea_dbg(INS_TO_USBDEV(sc),
"sending block %u for DSP page "
"%u size %u address %x\n",
blockno, pageno, blocksize,
le32_to_cpu(blockidx->PageAddress));
/* send block info through the IDMA pipe */
if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE))
goto bad;
/* send block data through the IDMA pipe */
if (uea_idma_write(sc, blockoffset, blocksize))
goto bad;
blockno++;
} while (blockidx->NotLastBlock);
return;
bad:
uea_err(INS_TO_USBDEV(sc), "sending DSP block %u failed\n", blockno);
return;
}
static void uea_load_page_e4(struct work_struct *work)
{
struct uea_softc *sc = container_of(work, struct uea_softc, task);
u8 pageno = sc->pageno;
int i;
struct block_info_e4 bi;
struct l1_code *p;
uea_dbg(INS_TO_USBDEV(sc), "sending DSP page %u\n", pageno);
/* reload firmware when reboot start and it's loaded already */
if (pageno == 0 && sc->dsp_firm) {
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
}
if (sc->dsp_firm == NULL && request_dsp(sc) < 0)
return;
p = (struct l1_code *) sc->dsp_firm->data;
if (pageno >= le16_to_cpu(p->page_header[0].PageNumber)) {
uea_err(INS_TO_USBDEV(sc), "invalid DSP "
"page %u requested\n", pageno);
return;
}
if (pageno != 0) {
__uea_load_page_e4(sc, pageno, 0);
return;
}
uea_dbg(INS_TO_USBDEV(sc),
"sending Main DSP page %u\n", p->page_header[0].PageNumber);
for (i = 0; i < le16_to_cpu(p->page_header[0].PageNumber); i++) {
if (E4_IS_BOOT_PAGE(p->page_header[i].PageSize))
__uea_load_page_e4(sc, i, 1);
}
uea_dbg(INS_TO_USBDEV(sc) , "sending start bi\n");
bi.wHdr = cpu_to_be16(UEA_BIHDR);
bi.bBootPage = 0;
bi.bPageNumber = 0xff;
bi.wReserved = cpu_to_be16(UEA_RESERVED);
bi.dwSize = cpu_to_be32(E4_PAGE_BYTES(p->page_header[0].PageSize));
bi.dwAddress = cpu_to_be32(le32_to_cpu(p->page_header[0].PageAddress));
/* send block info through the IDMA pipe */
if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE))
uea_err(INS_TO_USBDEV(sc), "sending DSP start bi failed\n");
}
static inline void wake_up_cmv_ack(struct uea_softc *sc)
{
BUG_ON(sc->cmv_ack);
sc->cmv_ack = 1;
wake_up(&sc->sync_q);
}
static inline int wait_cmv_ack(struct uea_softc *sc)
{
int ret = uea_wait(sc, sc->cmv_ack , ACK_TIMEOUT);
sc->cmv_ack = 0;
uea_dbg(INS_TO_USBDEV(sc), "wait_event_timeout : %d ms\n",
jiffies_to_msecs(ret));
if (ret < 0)
return ret;
return (ret == 0) ? -ETIMEDOUT : 0;
}
#define UCDC_SEND_ENCAPSULATED_COMMAND 0x00
static int uea_request(struct uea_softc *sc,
u16 value, u16 index, u16 size, const void *data)
{
u8 *xfer_buff;
int ret = -ENOMEM;
xfer_buff = kmemdup(data, size, GFP_KERNEL);
if (!xfer_buff) {
uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n");
return ret;
}
ret = usb_control_msg(sc->usb_dev, usb_sndctrlpipe(sc->usb_dev, 0),
UCDC_SEND_ENCAPSULATED_COMMAND,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, xfer_buff, size, CTRL_TIMEOUT);
kfree(xfer_buff);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc), "usb_control_msg error %d\n", ret);
return ret;
}
if (ret != size) {
uea_err(INS_TO_USBDEV(sc),
"usb_control_msg send only %d bytes (instead of %d)\n",
ret, size);
return -EIO;
}
return 0;
}
static int uea_cmv_e1(struct uea_softc *sc,
u8 function, u32 address, u16 offset, u32 data)
{
struct cmv_e1 cmv;
int ret;
uea_enters(INS_TO_USBDEV(sc));
uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Address : %c%c%c%c, "
"offset : 0x%04x, data : 0x%08x\n",
E1_FUNCTION_TYPE(function),
E1_FUNCTION_SUBTYPE(function),
E1_GETSA1(address), E1_GETSA2(address),
E1_GETSA3(address),
E1_GETSA4(address), offset, data);
/* we send a request, but we expect a reply */
sc->cmv_dsc.e1.function = function | 0x2;
sc->cmv_dsc.e1.idx++;
sc->cmv_dsc.e1.address = address;
sc->cmv_dsc.e1.offset = offset;
cmv.wPreamble = cpu_to_le16(E1_PREAMBLE);
cmv.bDirection = E1_HOSTTOMODEM;
cmv.bFunction = function;
cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx);
put_unaligned_le32(address, &cmv.dwSymbolicAddress);
cmv.wOffsetAddress = cpu_to_le16(offset);
put_unaligned_le32(data >> 16 | data << 16, &cmv.dwData);
ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START,
sizeof(cmv), &cmv);
if (ret < 0)
return ret;
ret = wait_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
static int uea_cmv_e4(struct uea_softc *sc,
u16 function, u16 group, u16 address, u16 offset, u32 data)
{
struct cmv_e4 cmv;
int ret;
uea_enters(INS_TO_USBDEV(sc));
memset(&cmv, 0, sizeof(cmv));
uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Group : 0x%04x, "
"Address : 0x%04x, offset : 0x%04x, data : 0x%08x\n",
E4_FUNCTION_TYPE(function), E4_FUNCTION_SUBTYPE(function),
group, address, offset, data);
/* we send a request, but we expect a reply */
sc->cmv_dsc.e4.function = function | (0x1 << 4);
sc->cmv_dsc.e4.offset = offset;
sc->cmv_dsc.e4.address = address;
sc->cmv_dsc.e4.group = group;
cmv.wFunction = cpu_to_be16(function);
cmv.wGroup = cpu_to_be16(group);
cmv.wAddress = cpu_to_be16(address);
cmv.wOffset = cpu_to_be16(offset);
cmv.dwData[0] = cpu_to_be32(data);
ret = uea_request(sc, UEA_E4_SET_BLOCK, UEA_MPTX_START,
sizeof(cmv), &cmv);
if (ret < 0)
return ret;
ret = wait_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
static inline int uea_read_cmv_e1(struct uea_softc *sc,
u32 address, u16 offset, u32 *data)
{
int ret = uea_cmv_e1(sc, E1_MAKEFUNCTION(E1_MEMACCESS, E1_REQUESTREAD),
address, offset, 0);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"reading cmv failed with error %d\n", ret);
else
*data = sc->data;
return ret;
}
static inline int uea_read_cmv_e4(struct uea_softc *sc,
u8 size, u16 group, u16 address, u16 offset, u32 *data)
{
int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS,
E4_REQUESTREAD, size),
group, address, offset, 0);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"reading cmv failed with error %d\n", ret);
else {
*data = sc->data;
/* size is in 16-bit word quantities */
if (size > 2)
*(data + 1) = sc->data1;
}
return ret;
}
static inline int uea_write_cmv_e1(struct uea_softc *sc,
u32 address, u16 offset, u32 data)
{
int ret = uea_cmv_e1(sc, E1_MAKEFUNCTION(E1_MEMACCESS, E1_REQUESTWRITE),
address, offset, data);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"writing cmv failed with error %d\n", ret);
return ret;
}
static inline int uea_write_cmv_e4(struct uea_softc *sc,
u8 size, u16 group, u16 address, u16 offset, u32 data)
{
int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS,
E4_REQUESTWRITE, size),
group, address, offset, data);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"writing cmv failed with error %d\n", ret);
return ret;
}
static void uea_set_bulk_timeout(struct uea_softc *sc, u32 dsrate)
{
int ret;
u16 timeout;
/* in bulk mode the modem have problem with high rate
* changing internal timing could improve things, but the
* value is mysterious.
* ADI930 don't support it (-EPIPE error).
*/
if (UEA_CHIP_VERSION(sc) == ADI930 ||
altsetting[sc->modem_index] > 0 ||
sc->stats.phy.dsrate == dsrate)
return;
/* Original timming (1Mbit/s) from ADI (used in windows driver) */
timeout = (dsrate <= 1024*1024) ? 0 : 1;
ret = uea_request(sc, UEA_SET_TIMEOUT, timeout, 0, NULL);
uea_info(INS_TO_USBDEV(sc), "setting new timeout %d%s\n",
timeout, ret < 0 ? " failed" : "");
}
/*
* Monitor the modem and update the stat
* return 0 if everything is ok
* return < 0 if an error occurs (-EAGAIN reboot needed)
*/
static int uea_stat_e1(struct uea_softc *sc)
{
u32 data;
int ret;
uea_enters(INS_TO_USBDEV(sc));
data = sc->stats.phy.state;
ret = uea_read_cmv_e1(sc, E1_SA_STAT, 0, &sc->stats.phy.state);
if (ret < 0)
return ret;
switch (GET_STATUS(sc->stats.phy.state)) {
case 0: /* not yet synchronized */
uea_dbg(INS_TO_USBDEV(sc),
"modem not yet synchronized\n");
return 0;
case 1: /* initialization */
uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n");
return 0;
case 2: /* operational */
uea_vdbg(INS_TO_USBDEV(sc), "modem operational\n");
break;
case 3: /* fail ... */
uea_info(INS_TO_USBDEV(sc), "modem synchronization failed"
" (may be try other cmv/dsp)\n");
return -EAGAIN;
case 4 ... 6: /* test state */
uea_warn(INS_TO_USBDEV(sc),
"modem in test mode - not supported\n");
return -EAGAIN;
case 7: /* fast-retain ... */
uea_info(INS_TO_USBDEV(sc), "modem in fast-retain mode\n");
return 0;
default:
uea_err(INS_TO_USBDEV(sc), "modem invalid SW mode %d\n",
GET_STATUS(sc->stats.phy.state));
return -EAGAIN;
}
if (GET_STATUS(data) != 2) {
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_OFF, 0, NULL);
uea_info(INS_TO_USBDEV(sc), "modem operational\n");
/* release the dsp firmware as it is not needed until
* the next failure
*/
if (sc->dsp_firm) {
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
}
}
/* always update it as atm layer could not be init when we switch to
* operational state
*/
UPDATE_ATM_SIGNAL(ATM_PHY_SIG_FOUND);
/* wake up processes waiting for synchronization */
wake_up(&sc->sync_q);
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 2, &sc->stats.phy.flags);
if (ret < 0)
return ret;
sc->stats.phy.mflags |= sc->stats.phy.flags;
/* in case of a flags ( for example delineation LOSS (& 0x10)),
* we check the status again in order to detect the failure earlier
*/
if (sc->stats.phy.flags) {
uea_dbg(INS_TO_USBDEV(sc), "Stat flag = 0x%x\n",
sc->stats.phy.flags);
return 0;
}
ret = uea_read_cmv_e1(sc, E1_SA_RATE, 0, &data);
if (ret < 0)
return ret;
uea_set_bulk_timeout(sc, (data >> 16) * 32);
sc->stats.phy.dsrate = (data >> 16) * 32;
sc->stats.phy.usrate = (data & 0xffff) * 32;
UPDATE_ATM_STAT(link_rate, sc->stats.phy.dsrate * 1000 / 424);
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 23, &data);
if (ret < 0)
return ret;
sc->stats.phy.dsattenuation = (data & 0xff) / 2;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 47, &data);
if (ret < 0)
return ret;
sc->stats.phy.usattenuation = (data & 0xff) / 2;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 25, &sc->stats.phy.dsmargin);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 49, &sc->stats.phy.usmargin);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 51, &sc->stats.phy.rxflow);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 52, &sc->stats.phy.txflow);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 54, &sc->stats.phy.dsunc);
if (ret < 0)
return ret;
/* only for atu-c */
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 58, &sc->stats.phy.usunc);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 53, &sc->stats.phy.dscorr);
if (ret < 0)
return ret;
/* only for atu-c */
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 57, &sc->stats.phy.uscorr);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_INFO, 8, &sc->stats.phy.vidco);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_INFO, 13, &sc->stats.phy.vidcpe);
if (ret < 0)
return ret;
return 0;
}
static int uea_stat_e4(struct uea_softc *sc)
{
u32 data;
u32 tmp_arr[2];
int ret;
uea_enters(INS_TO_USBDEV(sc));
data = sc->stats.phy.state;
/* XXX only need to be done before operationnal... */
ret = uea_read_cmv_e4(sc, 1, E4_SA_STAT, 0, 0, &sc->stats.phy.state);
if (ret < 0)
return ret;
switch (sc->stats.phy.state) {
case 0x0: /* not yet synchronized */
case 0x1:
case 0x3:
case 0x4:
uea_dbg(INS_TO_USBDEV(sc), "modem not yet "
"synchronized\n");
return 0;
case 0x5: /* initialization */
case 0x6:
case 0x9:
case 0xa:
uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n");
return 0;
case 0x2: /* fail ... */
uea_info(INS_TO_USBDEV(sc), "modem synchronization "
"failed (may be try other cmv/dsp)\n");
return -EAGAIN;
case 0x7: /* operational */
break;
default:
uea_warn(INS_TO_USBDEV(sc), "unknown state: %x\n",
sc->stats.phy.state);
return 0;
}
if (data != 7) {
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_OFF, 0, NULL);
uea_info(INS_TO_USBDEV(sc), "modem operational\n");
/* release the dsp firmware as it is not needed until
* the next failure
*/
if (sc->dsp_firm) {
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
}
}
/* always update it as atm layer could not be init when we switch to
* operational state
*/
UPDATE_ATM_SIGNAL(ATM_PHY_SIG_FOUND);
/* wake up processes waiting for synchronization */
wake_up(&sc->sync_q);
/* TODO improve this state machine :
* we need some CMV info : what they do and their unit
* we should find the equivalent of eagle3- CMV
*/
/* check flags */
ret = uea_read_cmv_e4(sc, 1, E4_SA_DIAG, 0, 0, &sc->stats.phy.flags);
if (ret < 0)
return ret;
sc->stats.phy.mflags |= sc->stats.phy.flags;
/* in case of a flags ( for example delineation LOSS (& 0x10)),
* we check the status again in order to detect the failure earlier
*/
if (sc->stats.phy.flags) {
uea_dbg(INS_TO_USBDEV(sc), "Stat flag = 0x%x\n",
sc->stats.phy.flags);
if (sc->stats.phy.flags & 1) /* delineation LOSS */
return -EAGAIN;
if (sc->stats.phy.flags & 0x4000) /* Reset Flag */
return -EAGAIN;
return 0;
}
/* rate data may be in upper or lower half of 64 bit word, strange */
ret = uea_read_cmv_e4(sc, 4, E4_SA_RATE, 0, 0, tmp_arr);
if (ret < 0)
return ret;
data = (tmp_arr[0]) ? tmp_arr[0] : tmp_arr[1];
sc->stats.phy.usrate = data / 1000;
ret = uea_read_cmv_e4(sc, 4, E4_SA_RATE, 1, 0, tmp_arr);
if (ret < 0)
return ret;
data = (tmp_arr[0]) ? tmp_arr[0] : tmp_arr[1];
uea_set_bulk_timeout(sc, data / 1000);
sc->stats.phy.dsrate = data / 1000;
UPDATE_ATM_STAT(link_rate, sc->stats.phy.dsrate * 1000 / 424);
ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 68, 1, &data);
if (ret < 0)
return ret;
sc->stats.phy.dsattenuation = data / 10;
ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 69, 1, &data);
if (ret < 0)
return ret;
sc->stats.phy.usattenuation = data / 10;
ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 68, 3, &data);
if (ret < 0)
return ret;
sc->stats.phy.dsmargin = data / 2;
ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 69, 3, &data);
if (ret < 0)
return ret;
sc->stats.phy.usmargin = data / 10;
return 0;
}
static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
{
char file_arr[] = "CMVxy.bin";
char *file;
kparam_block_sysfs_write(cmv_file);
/* set proper name corresponding modem version and line type */
if (cmv_file[sc->modem_index] == NULL) {
if (UEA_CHIP_VERSION(sc) == ADI930)
file_arr[3] = '9';
else if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
file_arr[3] = '4';
else
file_arr[3] = 'e';
file_arr[4] = IS_ISDN(sc) ? 'i' : 'p';
file = file_arr;
} else
file = cmv_file[sc->modem_index];
strcpy(cmv_name, FW_DIR);
strlcat(cmv_name, file, UEA_FW_NAME_MAX);
if (ver == 2)
strlcat(cmv_name, ".v2", UEA_FW_NAME_MAX);
kparam_unblock_sysfs_write(cmv_file);
}
static int request_cmvs_old(struct uea_softc *sc,
void **cmvs, const struct firmware **fw)
{
int ret, size;
u8 *data;
char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
cmvs_file_name(sc, cmv_name, 1);
ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"requesting firmware %s failed with error %d\n",
cmv_name, ret);
return ret;
}
data = (u8 *) (*fw)->data;
size = (*fw)->size;
if (size < 1)
goto err_fw_corrupted;
if (size != *data * sizeof(struct uea_cmvs_v1) + 1)
goto err_fw_corrupted;
*cmvs = (void *)(data + 1);
return *data;
err_fw_corrupted:
uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", cmv_name);
release_firmware(*fw);
return -EILSEQ;
}
static int request_cmvs(struct uea_softc *sc,
void **cmvs, const struct firmware **fw, int *ver)
{
int ret, size;
u32 crc;
u8 *data;
char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
cmvs_file_name(sc, cmv_name, 2);
ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
if (ret < 0) {
/* if caller can handle old version, try to provide it */
if (*ver == 1) {
uea_warn(INS_TO_USBDEV(sc), "requesting "
"firmware %s failed, "
"try to get older cmvs\n", cmv_name);
return request_cmvs_old(sc, cmvs, fw);
}
uea_err(INS_TO_USBDEV(sc),
"requesting firmware %s failed with error %d\n",
cmv_name, ret);
return ret;
}
size = (*fw)->size;
data = (u8 *) (*fw)->data;
if (size < 4 || strncmp(data, "cmv2", 4) != 0) {
if (*ver == 1) {
uea_warn(INS_TO_USBDEV(sc), "firmware %s is corrupted,"
" try to get older cmvs\n", cmv_name);
release_firmware(*fw);
return request_cmvs_old(sc, cmvs, fw);
}
goto err_fw_corrupted;
}
*ver = 2;
data += 4;
size -= 4;
if (size < 5)
goto err_fw_corrupted;
crc = get_unaligned_le32(data);
data += 4;
size -= 4;
if (crc32_be(0, data, size) != crc)
goto err_fw_corrupted;
if (size != *data * sizeof(struct uea_cmvs_v2) + 1)
goto err_fw_corrupted;
*cmvs = (void *) (data + 1);
return *data;
err_fw_corrupted:
uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", cmv_name);
release_firmware(*fw);
return -EILSEQ;
}
static int uea_send_cmvs_e1(struct uea_softc *sc)
{
int i, ret, len;
void *cmvs_ptr;
const struct firmware *cmvs_fw;
int ver = 1; /* we can handle v1 cmv firmware version; */
/* Enter in R-IDLE (cmv) until instructed otherwise */
ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 1);
if (ret < 0)
return ret;
/* Dump firmware version */
ret = uea_read_cmv_e1(sc, E1_SA_INFO, 10, &sc->stats.phy.firmid);
if (ret < 0)
return ret;
uea_info(INS_TO_USBDEV(sc), "ATU-R firmware version : %x\n",
sc->stats.phy.firmid);
/* get options */
ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
if (ret < 0)
return ret;
/* send options */
if (ver == 1) {
struct uea_cmvs_v1 *cmvs_v1 = cmvs_ptr;
uea_warn(INS_TO_USBDEV(sc), "use deprecated cmvs version, "
"please update your firmware\n");
for (i = 0; i < len; i++) {
ret = uea_write_cmv_e1(sc,
get_unaligned_le32(&cmvs_v1[i].address),
get_unaligned_le16(&cmvs_v1[i].offset),
get_unaligned_le32(&cmvs_v1[i].data));
if (ret < 0)
goto out;
}
} else if (ver == 2) {
struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr;
for (i = 0; i < len; i++) {
ret = uea_write_cmv_e1(sc,
get_unaligned_le32(&cmvs_v2[i].address),
(u16) get_unaligned_le32(&cmvs_v2[i].offset),
get_unaligned_le32(&cmvs_v2[i].data));
if (ret < 0)
goto out;
}
} else {
/* This really should not happen */
uea_err(INS_TO_USBDEV(sc), "bad cmvs version %d\n", ver);
goto out;
}
/* Enter in R-ACT-REQ */
ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 2);
uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n");
uea_info(INS_TO_USBDEV(sc), "modem started, waiting "
"synchronization...\n");
out:
release_firmware(cmvs_fw);
return ret;
}
static int uea_send_cmvs_e4(struct uea_softc *sc)
{
int i, ret, len;
void *cmvs_ptr;
const struct firmware *cmvs_fw;
int ver = 2; /* we can only handle v2 cmv firmware version; */
/* Enter in R-IDLE (cmv) until instructed otherwise */
ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 1);
if (ret < 0)
return ret;
/* Dump firmware version */
/* XXX don't read the 3th byte as it is always 6 */
ret = uea_read_cmv_e4(sc, 2, E4_SA_INFO, 55, 0, &sc->stats.phy.firmid);
if (ret < 0)
return ret;
uea_info(INS_TO_USBDEV(sc), "ATU-R firmware version : %x\n",
sc->stats.phy.firmid);
/* get options */
ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
if (ret < 0)
return ret;
/* send options */
if (ver == 2) {
struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr;
for (i = 0; i < len; i++) {
ret = uea_write_cmv_e4(sc, 1,
get_unaligned_le32(&cmvs_v2[i].group),
get_unaligned_le32(&cmvs_v2[i].address),
get_unaligned_le32(&cmvs_v2[i].offset),
get_unaligned_le32(&cmvs_v2[i].data));
if (ret < 0)
goto out;
}
} else {
/* This really should not happen */
uea_err(INS_TO_USBDEV(sc), "bad cmvs version %d\n", ver);
goto out;
}
/* Enter in R-ACT-REQ */
ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 2);
uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n");
uea_info(INS_TO_USBDEV(sc), "modem started, waiting "
"synchronization...\n");
out:
release_firmware(cmvs_fw);
return ret;
}
/* Start boot post firmware modem:
* - send reset commands through usb control pipe
* - start workqueue for DSP loading
* - send CMV options to modem
*/
static int uea_start_reset(struct uea_softc *sc)
{
u16 zero = 0; /* ;-) */
int ret;
uea_enters(INS_TO_USBDEV(sc));
uea_info(INS_TO_USBDEV(sc), "(re)booting started\n");
/* mask interrupt */
sc->booting = 1;
/* We need to set this here because, a ack timeout could have occurred,
* but before we start the reboot, the ack occurs and set this to 1.
* So we will failed to wait Ready CMV.
*/
sc->cmv_ack = 0;
UPDATE_ATM_SIGNAL(ATM_PHY_SIG_LOST);
/* reset statistics */
memset(&sc->stats, 0, sizeof(struct uea_stats));
/* tell the modem that we want to boot in IDMA mode */
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL);
uea_request(sc, UEA_SET_MODE, UEA_BOOT_IDMA, 0, NULL);
/* enter reset mode */
uea_request(sc, UEA_SET_MODE, UEA_START_RESET, 0, NULL);
/* original driver use 200ms, but windows driver use 100ms */
ret = uea_wait(sc, 0, msecs_to_jiffies(100));
if (ret < 0)
return ret;
/* leave reset mode */
uea_request(sc, UEA_SET_MODE, UEA_END_RESET, 0, NULL);
if (UEA_CHIP_VERSION(sc) != EAGLE_IV) {
/* clear tx and rx mailboxes */
uea_request(sc, UEA_SET_2183_DATA, UEA_MPTX_MAILBOX, 2, &zero);
uea_request(sc, UEA_SET_2183_DATA, UEA_MPRX_MAILBOX, 2, &zero);
uea_request(sc, UEA_SET_2183_DATA, UEA_SWAP_MAILBOX, 2, &zero);
}
ret = uea_wait(sc, 0, msecs_to_jiffies(1000));
if (ret < 0)
return ret;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
sc->cmv_dsc.e4.function = E4_MAKEFUNCTION(E4_ADSLDIRECTIVE,
E4_MODEMREADY, 1);
else
sc->cmv_dsc.e1.function = E1_MAKEFUNCTION(E1_ADSLDIRECTIVE,
E1_MODEMREADY);
/* demask interrupt */
sc->booting = 0;
/* start loading DSP */
sc->pageno = 0;
sc->ovl = 0;
schedule_work(&sc->task);
/* wait for modem ready CMV */
ret = wait_cmv_ack(sc);
if (ret < 0)
return ret;
uea_vdbg(INS_TO_USBDEV(sc), "Ready CMV received\n");
ret = sc->send_cmvs(sc);
if (ret < 0)
return ret;
sc->reset = 0;
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
/*
* In case of an error wait 1s before rebooting the modem
* if the modem don't request reboot (-EAGAIN).
* Monitor the modem every 1s.
*/
static int uea_kthread(void *data)
{
struct uea_softc *sc = data;
int ret = -EAGAIN;
set_freezable();
uea_enters(INS_TO_USBDEV(sc));
while (!kthread_should_stop()) {
if (ret < 0 || sc->reset)
ret = uea_start_reset(sc);
if (!ret)
ret = sc->stat(sc);
if (ret != -EAGAIN)
uea_wait(sc, 0, msecs_to_jiffies(1000));
try_to_freeze();
}
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
/* Load second usb firmware for ADI930 chip */
static int load_XILINX_firmware(struct uea_softc *sc)
{
const struct firmware *fw_entry;
int ret, size, u, ln;
const u8 *pfw;
u8 value;
char *fw_name = FW_DIR "930-fpga.bin";
uea_enters(INS_TO_USBDEV(sc));
ret = request_firmware(&fw_entry, fw_name, &sc->usb_dev->dev);
if (ret) {
uea_err(INS_TO_USBDEV(sc), "firmware %s is not available\n",
fw_name);
goto err0;
}
pfw = fw_entry->data;
size = fw_entry->size;
if (size != 0x577B) {
uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n",
fw_name);
ret = -EILSEQ;
goto err1;
}
for (u = 0; u < size; u += ln) {
ln = min(size - u, 64);
ret = uea_request(sc, 0xe, 0, ln, pfw + u);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"elsa download data failed (%d)\n", ret);
goto err1;
}
}
/* finish to send the fpga */
ret = uea_request(sc, 0xe, 1, 0, NULL);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"elsa download data failed (%d)\n", ret);
goto err1;
}
/* Tell the modem we finish : de-assert reset */
value = 0;
ret = uea_send_modem_cmd(sc->usb_dev, 0xe, 1, &value);
if (ret < 0)
uea_err(sc->usb_dev, "elsa de-assert failed with error"
" %d\n", ret);
err1:
release_firmware(fw_entry);
err0:
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
/* The modem send us an ack. First with check if it right */
static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr)
{
struct cmv_dsc_e1 *dsc = &sc->cmv_dsc.e1;
struct cmv_e1 *cmv = &intr->u.e1.s2.cmv;
uea_enters(INS_TO_USBDEV(sc));
if (le16_to_cpu(cmv->wPreamble) != E1_PREAMBLE)
goto bad1;
if (cmv->bDirection != E1_MODEMTOHOST)
goto bad1;
/* FIXME : ADI930 reply wrong preambule (func = 2, sub = 2) to
* the first MEMACCESS cmv. Ignore it...
*/
if (cmv->bFunction != dsc->function) {
if (UEA_CHIP_VERSION(sc) == ADI930
&& cmv->bFunction == E1_MAKEFUNCTION(2, 2)) {
cmv->wIndex = cpu_to_le16(dsc->idx);
put_unaligned_le32(dsc->address,
&cmv->dwSymbolicAddress);
cmv->wOffsetAddress = cpu_to_le16(dsc->offset);
} else
goto bad2;
}
if (cmv->bFunction == E1_MAKEFUNCTION(E1_ADSLDIRECTIVE,
E1_MODEMREADY)) {
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
}
/* in case of MEMACCESS */
if (le16_to_cpu(cmv->wIndex) != dsc->idx ||
get_unaligned_le32(&cmv->dwSymbolicAddress) != dsc->address ||
le16_to_cpu(cmv->wOffsetAddress) != dsc->offset)
goto bad2;
sc->data = get_unaligned_le32(&cmv->dwData);
sc->data = sc->data << 16 | sc->data >> 16;
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
bad2:
uea_err(INS_TO_USBDEV(sc), "unexpected cmv received, "
"Function : %d, Subfunction : %d\n",
E1_FUNCTION_TYPE(cmv->bFunction),
E1_FUNCTION_SUBTYPE(cmv->bFunction));
uea_leaves(INS_TO_USBDEV(sc));
return;
bad1:
uea_err(INS_TO_USBDEV(sc), "invalid cmv received, "
"wPreamble %d, bDirection %d\n",
le16_to_cpu(cmv->wPreamble), cmv->bDirection);
uea_leaves(INS_TO_USBDEV(sc));
}
/* The modem send us an ack. First with check if it right */
static void uea_dispatch_cmv_e4(struct uea_softc *sc, struct intr_pkt *intr)
{
struct cmv_dsc_e4 *dsc = &sc->cmv_dsc.e4;
struct cmv_e4 *cmv = &intr->u.e4.s2.cmv;
uea_enters(INS_TO_USBDEV(sc));
uea_dbg(INS_TO_USBDEV(sc), "cmv %x %x %x %x %x %x\n",
be16_to_cpu(cmv->wGroup), be16_to_cpu(cmv->wFunction),
be16_to_cpu(cmv->wOffset), be16_to_cpu(cmv->wAddress),
be32_to_cpu(cmv->dwData[0]), be32_to_cpu(cmv->dwData[1]));
if (be16_to_cpu(cmv->wFunction) != dsc->function)
goto bad2;
if (be16_to_cpu(cmv->wFunction) == E4_MAKEFUNCTION(E4_ADSLDIRECTIVE,
E4_MODEMREADY, 1)) {
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
}
/* in case of MEMACCESS */
if (be16_to_cpu(cmv->wOffset) != dsc->offset ||
be16_to_cpu(cmv->wGroup) != dsc->group ||
be16_to_cpu(cmv->wAddress) != dsc->address)
goto bad2;
sc->data = be32_to_cpu(cmv->dwData[0]);
sc->data1 = be32_to_cpu(cmv->dwData[1]);
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
bad2:
uea_err(INS_TO_USBDEV(sc), "unexpected cmv received, "
"Function : %d, Subfunction : %d\n",
E4_FUNCTION_TYPE(cmv->wFunction),
E4_FUNCTION_SUBTYPE(cmv->wFunction));
uea_leaves(INS_TO_USBDEV(sc));
return;
}
static void uea_schedule_load_page_e1(struct uea_softc *sc,
struct intr_pkt *intr)
{
sc->pageno = intr->e1_bSwapPageNo;
sc->ovl = intr->e1_bOvl >> 4 | intr->e1_bOvl << 4;
schedule_work(&sc->task);
}
static void uea_schedule_load_page_e4(struct uea_softc *sc,
struct intr_pkt *intr)
{
sc->pageno = intr->e4_bSwapPageNo;
schedule_work(&sc->task);
}
/*
* interrupt handler
*/
static void uea_intr(struct urb *urb)
{
struct uea_softc *sc = urb->context;
struct intr_pkt *intr = urb->transfer_buffer;
int status = urb->status;
uea_enters(INS_TO_USBDEV(sc));
if (unlikely(status < 0)) {
uea_err(INS_TO_USBDEV(sc), "uea_intr() failed with %d\n",
status);
return;
}
/* device-to-host interrupt */
if (intr->bType != 0x08 || sc->booting) {
uea_err(INS_TO_USBDEV(sc), "wrong interrupt\n");
goto resubmit;
}
switch (le16_to_cpu(intr->wInterrupt)) {
case INT_LOADSWAPPAGE:
sc->schedule_load_page(sc, intr);
break;
case INT_INCOMINGCMV:
sc->dispatch_cmv(sc, intr);
break;
default:
uea_err(INS_TO_USBDEV(sc), "unknown interrupt %u\n",
le16_to_cpu(intr->wInterrupt));
}
resubmit:
usb_submit_urb(sc->urb_int, GFP_ATOMIC);
}
/*
* Start the modem : init the data and start kernel thread
*/
static int uea_boot(struct uea_softc *sc)
{
int ret, size;
struct intr_pkt *intr;
uea_enters(INS_TO_USBDEV(sc));
if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
size = E4_INTR_PKT_SIZE;
sc->dispatch_cmv = uea_dispatch_cmv_e4;
sc->schedule_load_page = uea_schedule_load_page_e4;
sc->stat = uea_stat_e4;
sc->send_cmvs = uea_send_cmvs_e4;
INIT_WORK(&sc->task, uea_load_page_e4);
} else {
size = E1_INTR_PKT_SIZE;
sc->dispatch_cmv = uea_dispatch_cmv_e1;
sc->schedule_load_page = uea_schedule_load_page_e1;
sc->stat = uea_stat_e1;
sc->send_cmvs = uea_send_cmvs_e1;
INIT_WORK(&sc->task, uea_load_page_e1);
}
init_waitqueue_head(&sc->sync_q);
if (UEA_CHIP_VERSION(sc) == ADI930)
load_XILINX_firmware(sc);
intr = kmalloc(size, GFP_KERNEL);
if (!intr) {
uea_err(INS_TO_USBDEV(sc),
"cannot allocate interrupt package\n");
goto err0;
}
sc->urb_int = usb_alloc_urb(0, GFP_KERNEL);
if (!sc->urb_int) {
uea_err(INS_TO_USBDEV(sc), "cannot allocate interrupt URB\n");
goto err1;
}
usb_fill_int_urb(sc->urb_int, sc->usb_dev,
usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE),
intr, size, uea_intr, sc,
sc->usb_dev->actconfig->interface[0]->altsetting[0].
endpoint[0].desc.bInterval);
ret = usb_submit_urb(sc->urb_int, GFP_KERNEL);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"urb submition failed with error %d\n", ret);
goto err1;
}
/* Create worker thread, but don't start it here. Start it after
* all usbatm generic initialization is done.
*/
sc->kthread = kthread_create(uea_kthread, sc, "ueagle-atm");
if (IS_ERR(sc->kthread)) {
uea_err(INS_TO_USBDEV(sc), "failed to create thread\n");
goto err2;
}
uea_leaves(INS_TO_USBDEV(sc));
return 0;
err2:
usb_kill_urb(sc->urb_int);
err1:
usb_free_urb(sc->urb_int);
sc->urb_int = NULL;
kfree(intr);
err0:
uea_leaves(INS_TO_USBDEV(sc));
return -ENOMEM;
}
/*
* Stop the modem : kill kernel thread and free data
*/
static void uea_stop(struct uea_softc *sc)
{
int ret;
uea_enters(INS_TO_USBDEV(sc));
ret = kthread_stop(sc->kthread);
uea_dbg(INS_TO_USBDEV(sc), "kthread finish with status %d\n", ret);
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL);
usb_kill_urb(sc->urb_int);
kfree(sc->urb_int->transfer_buffer);
usb_free_urb(sc->urb_int);
/* flush the work item, when no one can schedule it */
flush_work_sync(&sc->task);
if (sc->dsp_firm)
release_firmware(sc->dsp_firm);
uea_leaves(INS_TO_USBDEV(sc));
}
/* syfs interface */
static struct uea_softc *dev_to_uea(struct device *dev)
{
struct usb_interface *intf;
struct usbatm_data *usbatm;
intf = to_usb_interface(dev);
if (!intf)
return NULL;
usbatm = usb_get_intfdata(intf);
if (!usbatm)
return NULL;
return usbatm->driver_data;
}
static ssize_t read_status(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret = -ENODEV;
struct uea_softc *sc;
mutex_lock(&uea_mutex);
sc = dev_to_uea(dev);
if (!sc)
goto out;
ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.state);
out:
mutex_unlock(&uea_mutex);
return ret;
}
static ssize_t reboot(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret = -ENODEV;
struct uea_softc *sc;
mutex_lock(&uea_mutex);
sc = dev_to_uea(dev);
if (!sc)
goto out;
sc->reset = 1;
ret = count;
out:
mutex_unlock(&uea_mutex);
return ret;
}
static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot);
static ssize_t read_human_status(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret = -ENODEV;
int modem_state;
struct uea_softc *sc;
mutex_lock(&uea_mutex);
sc = dev_to_uea(dev);
if (!sc)
goto out;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
switch (sc->stats.phy.state) {
case 0x0: /* not yet synchronized */
case 0x1:
case 0x3:
case 0x4:
modem_state = 0;
break;
case 0x5: /* initialization */
case 0x6:
case 0x9:
case 0xa:
modem_state = 1;
break;
case 0x7: /* operational */
modem_state = 2;
break;
case 0x2: /* fail ... */
modem_state = 3;
break;
default: /* unknown */
modem_state = 4;
break;
}
} else
modem_state = GET_STATUS(sc->stats.phy.state);
switch (modem_state) {
case 0:
ret = sprintf(buf, "Modem is booting\n");
break;
case 1:
ret = sprintf(buf, "Modem is initializing\n");
break;
case 2:
ret = sprintf(buf, "Modem is operational\n");
break;
case 3:
ret = sprintf(buf, "Modem synchronization failed\n");
break;
default:
ret = sprintf(buf, "Modem state is unknown\n");
break;
}
out:
mutex_unlock(&uea_mutex);
return ret;
}
static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL);
static ssize_t read_delin(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret = -ENODEV;
struct uea_softc *sc;
char *delin = "GOOD";
mutex_lock(&uea_mutex);
sc = dev_to_uea(dev);
if (!sc)
goto out;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
if (sc->stats.phy.flags & 0x4000)
delin = "RESET";
else if (sc->stats.phy.flags & 0x0001)
delin = "LOSS";
} else {
if (sc->stats.phy.flags & 0x0C00)
delin = "ERROR";
else if (sc->stats.phy.flags & 0x0030)
delin = "LOSS";
}
ret = sprintf(buf, "%s\n", delin);
out:
mutex_unlock(&uea_mutex);
return ret;
}
static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL);
#define UEA_ATTR(name, reset) \
\
static ssize_t read_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
int ret = -ENODEV; \
struct uea_softc *sc; \
\
mutex_lock(&uea_mutex); \
sc = dev_to_uea(dev); \
if (!sc) \
goto out; \
ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.name); \
if (reset) \
sc->stats.phy.name = 0; \
out: \
mutex_unlock(&uea_mutex); \
return ret; \
} \
\
static DEVICE_ATTR(stat_##name, S_IRUGO, read_##name, NULL)
UEA_ATTR(mflags, 1);
UEA_ATTR(vidcpe, 0);
UEA_ATTR(usrate, 0);
UEA_ATTR(dsrate, 0);
UEA_ATTR(usattenuation, 0);
UEA_ATTR(dsattenuation, 0);
UEA_ATTR(usmargin, 0);
UEA_ATTR(dsmargin, 0);
UEA_ATTR(txflow, 0);
UEA_ATTR(rxflow, 0);
UEA_ATTR(uscorr, 0);
UEA_ATTR(dscorr, 0);
UEA_ATTR(usunc, 0);
UEA_ATTR(dsunc, 0);
UEA_ATTR(firmid, 0);
/* Retrieve the device End System Identifier (MAC) */
static int uea_getesi(struct uea_softc *sc, u_char * esi)
{
unsigned char mac_str[2 * ETH_ALEN + 1];
int i;
if (usb_string
(sc->usb_dev, sc->usb_dev->descriptor.iSerialNumber, mac_str,
sizeof(mac_str)) != 2 * ETH_ALEN)
return 1;
for (i = 0; i < ETH_ALEN; i++)
esi[i] = hex_to_bin(mac_str[2 * i]) * 16 +
hex_to_bin(mac_str[2 * i + 1]);
return 0;
}
/* ATM stuff */
static int uea_atm_open(struct usbatm_data *usbatm, struct atm_dev *atm_dev)
{
struct uea_softc *sc = usbatm->driver_data;
return uea_getesi(sc, atm_dev->esi);
}
static int uea_heavy(struct usbatm_data *usbatm, struct usb_interface *intf)
{
struct uea_softc *sc = usbatm->driver_data;
wait_event_interruptible(sc->sync_q, IS_OPERATIONAL(sc));
return 0;
}
static int claim_interface(struct usb_device *usb_dev,
struct usbatm_data *usbatm, int ifnum)
{
int ret;
struct usb_interface *intf = usb_ifnum_to_if(usb_dev, ifnum);
if (!intf) {
uea_err(usb_dev, "interface %d not found\n", ifnum);
return -ENODEV;
}
ret = usb_driver_claim_interface(&uea_driver, intf, usbatm);
if (ret != 0)
uea_err(usb_dev, "can't claim interface %d, error %d\n", ifnum,
ret);
return ret;
}
static struct attribute *attrs[] = {
&dev_attr_stat_status.attr,
&dev_attr_stat_mflags.attr,
&dev_attr_stat_human_status.attr,
&dev_attr_stat_delin.attr,
&dev_attr_stat_vidcpe.attr,
&dev_attr_stat_usrate.attr,
&dev_attr_stat_dsrate.attr,
&dev_attr_stat_usattenuation.attr,
&dev_attr_stat_dsattenuation.attr,
&dev_attr_stat_usmargin.attr,
&dev_attr_stat_dsmargin.attr,
&dev_attr_stat_txflow.attr,
&dev_attr_stat_rxflow.attr,
&dev_attr_stat_uscorr.attr,
&dev_attr_stat_dscorr.attr,
&dev_attr_stat_usunc.attr,
&dev_attr_stat_dsunc.attr,
&dev_attr_stat_firmid.attr,
NULL,
};
static struct attribute_group attr_grp = {
.attrs = attrs,
};
static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usb = interface_to_usbdev(intf);
struct uea_softc *sc;
int ret, ifnum = intf->altsetting->desc.bInterfaceNumber;
unsigned int alt;
uea_enters(usb);
/* interface 0 is for firmware/monitoring */
if (ifnum != UEA_INTR_IFACE_NO)
return -ENODEV;
usbatm->flags = (sync_wait[modem_index] ? 0 : UDSL_SKIP_HEAVY_INIT);
/* interface 1 is for outbound traffic */
ret = claim_interface(usb, usbatm, UEA_US_IFACE_NO);
if (ret < 0)
return ret;
/* ADI930 has only 2 interfaces and inbound traffic is on interface 1 */
if (UEA_CHIP_VERSION(id) != ADI930) {
/* interface 2 is for inbound traffic */
ret = claim_interface(usb, usbatm, UEA_DS_IFACE_NO);
if (ret < 0)
return ret;
}
sc = kzalloc(sizeof(struct uea_softc), GFP_KERNEL);
if (!sc) {
uea_err(usb, "uea_init: not enough memory !\n");
return -ENOMEM;
}
sc->usb_dev = usb;
usbatm->driver_data = sc;
sc->usbatm = usbatm;
sc->modem_index = (modem_index < NB_MODEM) ? modem_index++ : 0;
sc->driver_info = id->driver_info;
/* first try to use module parameter */
if (annex[sc->modem_index] == 1)
sc->annex = ANNEXA;
else if (annex[sc->modem_index] == 2)
sc->annex = ANNEXB;
/* try to autodetect annex */
else if (sc->driver_info & AUTO_ANNEX_A)
sc->annex = ANNEXA;
else if (sc->driver_info & AUTO_ANNEX_B)
sc->annex = ANNEXB;
else
sc->annex = (le16_to_cpu
(sc->usb_dev->descriptor.bcdDevice) & 0x80) ? ANNEXB : ANNEXA;
alt = altsetting[sc->modem_index];
/* ADI930 don't support iso */
if (UEA_CHIP_VERSION(id) != ADI930 && alt > 0) {
if (alt <= 8 &&
usb_set_interface(usb, UEA_DS_IFACE_NO, alt) == 0) {
uea_dbg(usb, "set alternate %u for 2 interface\n", alt);
uea_info(usb, "using iso mode\n");
usbatm->flags |= UDSL_USE_ISOC | UDSL_IGNORE_EILSEQ;
} else {
uea_err(usb, "setting alternate %u failed for "
"2 interface, using bulk mode\n", alt);
}
}
ret = sysfs_create_group(&intf->dev.kobj, &attr_grp);
if (ret < 0)
goto error;
ret = uea_boot(sc);
if (ret < 0)
goto error_rm_grp;
return 0;
error_rm_grp:
sysfs_remove_group(&intf->dev.kobj, &attr_grp);
error:
kfree(sc);
return ret;
}
static void uea_unbind(struct usbatm_data *usbatm, struct usb_interface *intf)
{
struct uea_softc *sc = usbatm->driver_data;
sysfs_remove_group(&intf->dev.kobj, &attr_grp);
uea_stop(sc);
kfree(sc);
}
static struct usbatm_driver uea_usbatm_driver = {
.driver_name = "ueagle-atm",
.bind = uea_bind,
.atm_start = uea_atm_open,
.unbind = uea_unbind,
.heavy_init = uea_heavy,
.bulk_in = UEA_BULK_DATA_PIPE,
.bulk_out = UEA_BULK_DATA_PIPE,
.isoc_in = UEA_ISO_DATA_PIPE,
};
static int uea_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *usb = interface_to_usbdev(intf);
int ret;
uea_enters(usb);
uea_info(usb, "ADSL device founded vid (%#X) pid (%#X) Rev (%#X): %s\n",
le16_to_cpu(usb->descriptor.idVendor),
le16_to_cpu(usb->descriptor.idProduct),
le16_to_cpu(usb->descriptor.bcdDevice),
chip_name[UEA_CHIP_VERSION(id)]);
usb_reset_device(usb);
if (UEA_IS_PREFIRM(id))
return uea_load_firmware(usb, UEA_CHIP_VERSION(id));
ret = usbatm_usb_probe(intf, id, &uea_usbatm_driver);
if (ret == 0) {
struct usbatm_data *usbatm = usb_get_intfdata(intf);
struct uea_softc *sc = usbatm->driver_data;
/* Ensure carrier is initialized to off as early as possible */
UPDATE_ATM_SIGNAL(ATM_PHY_SIG_LOST);
/* Only start the worker thread when all init is done */
wake_up_process(sc->kthread);
}
return ret;
}
static void uea_disconnect(struct usb_interface *intf)
{
struct usb_device *usb = interface_to_usbdev(intf);
int ifnum = intf->altsetting->desc.bInterfaceNumber;
uea_enters(usb);
/* ADI930 has 2 interfaces and eagle 3 interfaces.
* Pre-firmware device has one interface
*/
if (usb->config->desc.bNumInterfaces != 1 && ifnum == 0) {
mutex_lock(&uea_mutex);
usbatm_usb_disconnect(intf);
mutex_unlock(&uea_mutex);
uea_info(usb, "ADSL device removed\n");
}
uea_leaves(usb);
}
/*
* List of supported VID/PID
*/
static const struct usb_device_id uea_ids[] = {
{USB_DEVICE(ANALOG_VID, ADI930_PID_PREFIRM),
.driver_info = ADI930 | PREFIRM},
{USB_DEVICE(ANALOG_VID, ADI930_PID_PSTFIRM),
.driver_info = ADI930 | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PREFIRM),
.driver_info = EAGLE_II | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PSTFIRM),
.driver_info = EAGLE_II | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PREFIRM),
.driver_info = EAGLE_II | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PSTFIRM),
.driver_info = EAGLE_II | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PREFIRM),
.driver_info = EAGLE_III | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PSTFIRM),
.driver_info = EAGLE_III | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PREFIRM),
.driver_info = EAGLE_IV | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PSTFIRM),
.driver_info = EAGLE_IV | PSTFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PREFIRM),
.driver_info = EAGLE_II | PREFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PSTFIRM),
.driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PREFIRM),
.driver_info = EAGLE_II | PREFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PSTFIRM),
.driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_B},
{USB_DEVICE(ELSA_VID, ELSA_PID_PREFIRM),
.driver_info = ADI930 | PREFIRM},
{USB_DEVICE(ELSA_VID, ELSA_PID_PSTFIRM),
.driver_info = ADI930 | PSTFIRM},
{USB_DEVICE(ELSA_VID, ELSA_PID_A_PREFIRM),
.driver_info = ADI930 | PREFIRM},
{USB_DEVICE(ELSA_VID, ELSA_PID_A_PSTFIRM),
.driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(ELSA_VID, ELSA_PID_B_PREFIRM),
.driver_info = ADI930 | PREFIRM},
{USB_DEVICE(ELSA_VID, ELSA_PID_B_PSTFIRM),
.driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_B},
{USB_DEVICE(USR_VID, MILLER_A_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(USR_VID, MILLER_A_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(USR_VID, MILLER_B_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(USR_VID, MILLER_B_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
{USB_DEVICE(USR_VID, HEINEKEN_A_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(USR_VID, HEINEKEN_A_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(USR_VID, HEINEKEN_B_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(USR_VID, HEINEKEN_B_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
{}
};
/*
* USB driver descriptor
*/
static struct usb_driver uea_driver = {
.name = "ueagle-atm",
.id_table = uea_ids,
.probe = uea_probe,
.disconnect = uea_disconnect,
};
MODULE_DEVICE_TABLE(usb, uea_ids);
module_usb_driver(uea_driver);
MODULE_AUTHOR("Damien Bergamini/Matthieu Castet/Stanislaw W. Gruszka");
MODULE_DESCRIPTION("ADI 930/Eagle USB ADSL Modem driver");
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
ardX/android_kernel_k-touch_msm8x25q | drivers/video/sh_mipi_dsi.c | 4860 | 15400 | /*
* Renesas SH-mobile MIPI DSI support
*
* Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/
#include <linux/bitmap.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/module.h>
#include <video/mipi_display.h>
#include <video/sh_mipi_dsi.h>
#include <video/sh_mobile_lcdc.h>
#include "sh_mobile_lcdcfb.h"
#define SYSCTRL 0x0000
#define SYSCONF 0x0004
#define TIMSET 0x0008
#define RESREQSET0 0x0018
#define RESREQSET1 0x001c
#define HSTTOVSET 0x0020
#define LPRTOVSET 0x0024
#define TATOVSET 0x0028
#define PRTOVSET 0x002c
#define DSICTRL 0x0030
#define DSIINTE 0x0060
#define PHYCTRL 0x0070
/* relative to linkbase */
#define DTCTR 0x0000
#define VMCTR1 0x0020
#define VMCTR2 0x0024
#define VMLEN1 0x0028
#define VMLEN2 0x002c
#define CMTSRTREQ 0x0070
#define CMTSRTCTR 0x00d0
/* E.g., sh7372 has 2 MIPI-DSIs - one for each LCDC */
#define MAX_SH_MIPI_DSI 2
struct sh_mipi {
struct sh_mobile_lcdc_entity entity;
void __iomem *base;
void __iomem *linkbase;
struct clk *dsit_clk;
struct platform_device *pdev;
};
#define to_sh_mipi(e) container_of(e, struct sh_mipi, entity)
static struct sh_mipi *mipi_dsi[MAX_SH_MIPI_DSI];
/* Protect the above array */
static DEFINE_MUTEX(array_lock);
static struct sh_mipi *sh_mipi_by_handle(int handle)
{
if (handle >= ARRAY_SIZE(mipi_dsi) || handle < 0)
return NULL;
return mipi_dsi[handle];
}
static int sh_mipi_send_short(struct sh_mipi *mipi, u8 dsi_cmd,
u8 cmd, u8 param)
{
u32 data = (dsi_cmd << 24) | (cmd << 16) | (param << 8);
int cnt = 100;
/* transmit a short packet to LCD panel */
iowrite32(1 | data, mipi->linkbase + CMTSRTCTR);
iowrite32(1, mipi->linkbase + CMTSRTREQ);
while ((ioread32(mipi->linkbase + CMTSRTREQ) & 1) && --cnt)
udelay(1);
return cnt ? 0 : -ETIMEDOUT;
}
#define LCD_CHAN2MIPI(c) ((c) < LCDC_CHAN_MAINLCD || (c) > LCDC_CHAN_SUBLCD ? \
-EINVAL : (c) - 1)
static int sh_mipi_dcs(int handle, u8 cmd)
{
struct sh_mipi *mipi = sh_mipi_by_handle(LCD_CHAN2MIPI(handle));
if (!mipi)
return -ENODEV;
return sh_mipi_send_short(mipi, MIPI_DSI_DCS_SHORT_WRITE, cmd, 0);
}
static int sh_mipi_dcs_param(int handle, u8 cmd, u8 param)
{
struct sh_mipi *mipi = sh_mipi_by_handle(LCD_CHAN2MIPI(handle));
if (!mipi)
return -ENODEV;
return sh_mipi_send_short(mipi, MIPI_DSI_DCS_SHORT_WRITE_PARAM, cmd,
param);
}
static void sh_mipi_dsi_enable(struct sh_mipi *mipi, bool enable)
{
/*
* enable LCDC data tx, transition to LPS after completion of each HS
* packet
*/
iowrite32(0x00000002 | enable, mipi->linkbase + DTCTR);
}
static void sh_mipi_shutdown(struct platform_device *pdev)
{
struct sh_mipi *mipi = to_sh_mipi(platform_get_drvdata(pdev));
sh_mipi_dsi_enable(mipi, false);
}
static int __init sh_mipi_setup(struct sh_mipi *mipi,
struct sh_mipi_dsi_info *pdata)
{
void __iomem *base = mipi->base;
struct sh_mobile_lcdc_chan_cfg *ch = pdata->lcd_chan;
u32 pctype, datatype, pixfmt, linelength, vmctr2;
u32 tmp, top, bottom, delay, div;
bool yuv;
int bpp;
/*
* Select data format. MIPI DSI is not hot-pluggable, so, we just use
* the default videomode. If this ever becomes a problem, We'll have to
* move this to mipi_display_on() above and use info->var.xres
*/
switch (pdata->data_format) {
case MIPI_RGB888:
pctype = 0;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_24;
pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
linelength = ch->lcd_modes[0].xres * 3;
yuv = false;
break;
case MIPI_RGB565:
pctype = 1;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_16;
pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
linelength = ch->lcd_modes[0].xres * 2;
yuv = false;
break;
case MIPI_RGB666_LP:
pctype = 2;
datatype = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
linelength = ch->lcd_modes[0].xres * 3;
yuv = false;
break;
case MIPI_RGB666:
pctype = 3;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_18;
pixfmt = MIPI_DCS_PIXEL_FMT_18BIT;
linelength = (ch->lcd_modes[0].xres * 18 + 7) / 8;
yuv = false;
break;
case MIPI_BGR888:
pctype = 8;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_24;
pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
linelength = ch->lcd_modes[0].xres * 3;
yuv = false;
break;
case MIPI_BGR565:
pctype = 9;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_16;
pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
linelength = ch->lcd_modes[0].xres * 2;
yuv = false;
break;
case MIPI_BGR666_LP:
pctype = 0xa;
datatype = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
linelength = ch->lcd_modes[0].xres * 3;
yuv = false;
break;
case MIPI_BGR666:
pctype = 0xb;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_18;
pixfmt = MIPI_DCS_PIXEL_FMT_18BIT;
linelength = (ch->lcd_modes[0].xres * 18 + 7) / 8;
yuv = false;
break;
case MIPI_YUYV:
pctype = 4;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16;
pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
linelength = ch->lcd_modes[0].xres * 2;
yuv = true;
break;
case MIPI_UYVY:
pctype = 5;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16;
pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
linelength = ch->lcd_modes[0].xres * 2;
yuv = true;
break;
case MIPI_YUV420_L:
pctype = 6;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12;
pixfmt = MIPI_DCS_PIXEL_FMT_12BIT;
linelength = (ch->lcd_modes[0].xres * 12 + 7) / 8;
yuv = true;
break;
case MIPI_YUV420:
pctype = 7;
datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12;
pixfmt = MIPI_DCS_PIXEL_FMT_12BIT;
/* Length of U/V line */
linelength = (ch->lcd_modes[0].xres + 1) / 2;
yuv = true;
break;
default:
return -EINVAL;
}
if ((yuv && ch->interface_type != YUV422) ||
(!yuv && ch->interface_type != RGB24))
return -EINVAL;
if (!pdata->lane)
return -EINVAL;
/* reset DSI link */
iowrite32(0x00000001, base + SYSCTRL);
/* Hold reset for 100 cycles of the slowest of bus, HS byte and LP clock */
udelay(50);
iowrite32(0x00000000, base + SYSCTRL);
/* setup DSI link */
/*
* T_wakeup = 0x7000
* T_hs-trail = 3
* T_hs-prepare = 3
* T_clk-trail = 3
* T_clk-prepare = 2
*/
iowrite32(0x70003332, base + TIMSET);
/* no responses requested */
iowrite32(0x00000000, base + RESREQSET0);
/* request response to packets of type 0x28 */
iowrite32(0x00000100, base + RESREQSET1);
/* High-speed transmission timeout, default 0xffffffff */
iowrite32(0x0fffffff, base + HSTTOVSET);
/* LP reception timeout, default 0xffffffff */
iowrite32(0x0fffffff, base + LPRTOVSET);
/* Turn-around timeout, default 0xffffffff */
iowrite32(0x0fffffff, base + TATOVSET);
/* Peripheral reset timeout, default 0xffffffff */
iowrite32(0x0fffffff, base + PRTOVSET);
/* Interrupts not used, disable all */
iowrite32(0, base + DSIINTE);
/* DSI-Tx bias on */
iowrite32(0x00000001, base + PHYCTRL);
udelay(200);
/* Deassert resets, power on */
iowrite32(0x03070001 | pdata->phyctrl, base + PHYCTRL);
/*
* Default = ULPS enable |
* Contention detection enabled |
* EoT packet transmission enable |
* CRC check enable |
* ECC check enable
*/
bitmap_fill((unsigned long *)&tmp, pdata->lane);
tmp |= 0x00003700;
iowrite32(tmp, base + SYSCONF);
/* setup l-bridge */
/*
* Enable transmission of all packets,
* transmit LPS after each HS packet completion
*/
iowrite32(0x00000006, mipi->linkbase + DTCTR);
/* VSYNC width = 2 (<< 17) */
iowrite32((ch->lcd_modes[0].vsync_len << pdata->vsynw_offset) |
(pdata->clksrc << 16) | (pctype << 12) | datatype,
mipi->linkbase + VMCTR1);
/*
* Non-burst mode with sync pulses: VSE and HSE are output,
* HSA period allowed, no commands in LP
*/
vmctr2 = 0;
if (pdata->flags & SH_MIPI_DSI_VSEE)
vmctr2 |= 1 << 23;
if (pdata->flags & SH_MIPI_DSI_HSEE)
vmctr2 |= 1 << 22;
if (pdata->flags & SH_MIPI_DSI_HSAE)
vmctr2 |= 1 << 21;
if (pdata->flags & SH_MIPI_DSI_BL2E)
vmctr2 |= 1 << 17;
if (pdata->flags & SH_MIPI_DSI_HSABM)
vmctr2 |= 1 << 5;
if (pdata->flags & SH_MIPI_DSI_HBPBM)
vmctr2 |= 1 << 4;
if (pdata->flags & SH_MIPI_DSI_HFPBM)
vmctr2 |= 1 << 3;
iowrite32(vmctr2, mipi->linkbase + VMCTR2);
/*
* VMLEN1 = RGBLEN | HSALEN
*
* see
* Video mode - Blanking Packet setting
*/
top = linelength << 16; /* RGBLEN */
bottom = 0x00000001;
if (pdata->flags & SH_MIPI_DSI_HSABM) /* HSALEN */
bottom = (pdata->lane * ch->lcd_modes[0].hsync_len) - 10;
iowrite32(top | bottom , mipi->linkbase + VMLEN1);
/*
* VMLEN2 = HBPLEN | HFPLEN
*
* see
* Video mode - Blanking Packet setting
*/
top = 0x00010000;
bottom = 0x00000001;
delay = 0;
div = 1; /* HSbyteCLK is calculation base
* HS4divCLK = HSbyteCLK/2
* HS6divCLK is not supported for now */
if (pdata->flags & SH_MIPI_DSI_HS4divCLK)
div = 2;
if (pdata->flags & SH_MIPI_DSI_HFPBM) { /* HBPLEN */
top = ch->lcd_modes[0].hsync_len + ch->lcd_modes[0].left_margin;
top = ((pdata->lane * top / div) - 10) << 16;
}
if (pdata->flags & SH_MIPI_DSI_HBPBM) { /* HFPLEN */
bottom = ch->lcd_modes[0].right_margin;
bottom = (pdata->lane * bottom / div) - 12;
}
bpp = linelength / ch->lcd_modes[0].xres; /* byte / pixel */
if ((pdata->lane / div) > bpp) {
tmp = ch->lcd_modes[0].xres / bpp; /* output cycle */
tmp = ch->lcd_modes[0].xres - tmp; /* (input - output) cycle */
delay = (pdata->lane * tmp);
}
iowrite32(top | (bottom + delay) , mipi->linkbase + VMLEN2);
msleep(5);
/* setup LCD panel */
/* cf. drivers/video/omap/lcd_mipid.c */
sh_mipi_dcs(ch->chan, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(120);
/*
* [7] - Page Address Mode
* [6] - Column Address Mode
* [5] - Page / Column Address Mode
* [4] - Display Device Line Refresh Order
* [3] - RGB/BGR Order
* [2] - Display Data Latch Data Order
* [1] - Flip Horizontal
* [0] - Flip Vertical
*/
sh_mipi_dcs_param(ch->chan, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
/* cf. set_data_lines() */
sh_mipi_dcs_param(ch->chan, MIPI_DCS_SET_PIXEL_FORMAT,
pixfmt << 4);
sh_mipi_dcs(ch->chan, MIPI_DCS_SET_DISPLAY_ON);
/* Enable timeout counters */
iowrite32(0x00000f00, base + DSICTRL);
return 0;
}
static int mipi_display_on(struct sh_mobile_lcdc_entity *entity)
{
struct sh_mipi *mipi = to_sh_mipi(entity);
struct sh_mipi_dsi_info *pdata = mipi->pdev->dev.platform_data;
int ret;
pm_runtime_get_sync(&mipi->pdev->dev);
ret = pdata->set_dot_clock(mipi->pdev, mipi->base, 1);
if (ret < 0)
goto mipi_display_on_fail1;
ret = sh_mipi_setup(mipi, pdata);
if (ret < 0)
goto mipi_display_on_fail2;
sh_mipi_dsi_enable(mipi, true);
return SH_MOBILE_LCDC_DISPLAY_CONNECTED;
mipi_display_on_fail1:
pm_runtime_put_sync(&mipi->pdev->dev);
mipi_display_on_fail2:
pdata->set_dot_clock(mipi->pdev, mipi->base, 0);
return ret;
}
static void mipi_display_off(struct sh_mobile_lcdc_entity *entity)
{
struct sh_mipi *mipi = to_sh_mipi(entity);
struct sh_mipi_dsi_info *pdata = mipi->pdev->dev.platform_data;
sh_mipi_dsi_enable(mipi, false);
pdata->set_dot_clock(mipi->pdev, mipi->base, 0);
pm_runtime_put_sync(&mipi->pdev->dev);
}
static const struct sh_mobile_lcdc_entity_ops mipi_ops = {
.display_on = mipi_display_on,
.display_off = mipi_display_off,
};
static int __init sh_mipi_probe(struct platform_device *pdev)
{
struct sh_mipi *mipi;
struct sh_mipi_dsi_info *pdata = pdev->dev.platform_data;
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
unsigned long rate, f_current;
int idx = pdev->id, ret;
if (!res || !res2 || idx >= ARRAY_SIZE(mipi_dsi) || !pdata)
return -ENODEV;
if (!pdata->set_dot_clock)
return -EINVAL;
mutex_lock(&array_lock);
if (idx < 0)
for (idx = 0; idx < ARRAY_SIZE(mipi_dsi) && mipi_dsi[idx]; idx++)
;
if (idx == ARRAY_SIZE(mipi_dsi)) {
ret = -EBUSY;
goto efindslot;
}
mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi) {
ret = -ENOMEM;
goto ealloc;
}
mipi->entity.owner = THIS_MODULE;
mipi->entity.ops = &mipi_ops;
if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
dev_err(&pdev->dev, "MIPI register region already claimed\n");
ret = -EBUSY;
goto ereqreg;
}
mipi->base = ioremap(res->start, resource_size(res));
if (!mipi->base) {
ret = -ENOMEM;
goto emap;
}
if (!request_mem_region(res2->start, resource_size(res2), pdev->name)) {
dev_err(&pdev->dev, "MIPI register region 2 already claimed\n");
ret = -EBUSY;
goto ereqreg2;
}
mipi->linkbase = ioremap(res2->start, resource_size(res2));
if (!mipi->linkbase) {
ret = -ENOMEM;
goto emap2;
}
mipi->pdev = pdev;
mipi->dsit_clk = clk_get(&pdev->dev, "dsit_clk");
if (IS_ERR(mipi->dsit_clk)) {
ret = PTR_ERR(mipi->dsit_clk);
goto eclktget;
}
f_current = clk_get_rate(mipi->dsit_clk);
/* 80MHz required by the datasheet */
rate = clk_round_rate(mipi->dsit_clk, 80000000);
if (rate > 0 && rate != f_current)
ret = clk_set_rate(mipi->dsit_clk, rate);
else
ret = rate;
if (ret < 0)
goto esettrate;
dev_dbg(&pdev->dev, "DSI-T clk %lu -> %lu\n", f_current, rate);
ret = clk_enable(mipi->dsit_clk);
if (ret < 0)
goto eclkton;
mipi_dsi[idx] = mipi;
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
mutex_unlock(&array_lock);
platform_set_drvdata(pdev, &mipi->entity);
return 0;
eclkton:
esettrate:
clk_put(mipi->dsit_clk);
eclktget:
iounmap(mipi->linkbase);
emap2:
release_mem_region(res2->start, resource_size(res2));
ereqreg2:
iounmap(mipi->base);
emap:
release_mem_region(res->start, resource_size(res));
ereqreg:
kfree(mipi);
ealloc:
efindslot:
mutex_unlock(&array_lock);
return ret;
}
static int __exit sh_mipi_remove(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
struct sh_mipi *mipi = to_sh_mipi(platform_get_drvdata(pdev));
int i, ret;
mutex_lock(&array_lock);
for (i = 0; i < ARRAY_SIZE(mipi_dsi) && mipi_dsi[i] != mipi; i++)
;
if (i == ARRAY_SIZE(mipi_dsi)) {
ret = -EINVAL;
} else {
ret = 0;
mipi_dsi[i] = NULL;
}
mutex_unlock(&array_lock);
if (ret < 0)
return ret;
pm_runtime_disable(&pdev->dev);
clk_disable(mipi->dsit_clk);
clk_put(mipi->dsit_clk);
iounmap(mipi->linkbase);
if (res2)
release_mem_region(res2->start, resource_size(res2));
iounmap(mipi->base);
if (res)
release_mem_region(res->start, resource_size(res));
platform_set_drvdata(pdev, NULL);
kfree(mipi);
return 0;
}
static struct platform_driver sh_mipi_driver = {
.remove = __exit_p(sh_mipi_remove),
.shutdown = sh_mipi_shutdown,
.driver = {
.name = "sh-mipi-dsi",
},
};
static int __init sh_mipi_init(void)
{
return platform_driver_probe(&sh_mipi_driver, sh_mipi_probe);
}
module_init(sh_mipi_init);
static void __exit sh_mipi_exit(void)
{
platform_driver_unregister(&sh_mipi_driver);
}
module_exit(sh_mipi_exit);
MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
MODULE_DESCRIPTION("SuperH / ARM-shmobile MIPI DSI driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
jamison904/Galaxy_Note_3 | drivers/net/ethernet/faraday/ftgmac100.c | 4860 | 35713 | /*
* Faraday FTGMAC100 Gigabit Ethernet
*
* (C) Copyright 2009-2011 Faraday Technology
* Po-Yu Chuang <ratbert@faraday-tech.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <net/ip.h>
#include "ftgmac100.h"
#define DRV_NAME "ftgmac100"
#define DRV_VERSION "0.7"
#define RX_QUEUE_ENTRIES 256 /* must be power of 2 */
#define TX_QUEUE_ENTRIES 512 /* must be power of 2 */
#define MAX_PKT_SIZE 1518
#define RX_BUF_SIZE PAGE_SIZE /* must be smaller than 0x3fff */
/******************************************************************************
* private data
*****************************************************************************/
struct ftgmac100_descs {
struct ftgmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
struct ftgmac100_txdes txdes[TX_QUEUE_ENTRIES];
};
struct ftgmac100 {
struct resource *res;
void __iomem *base;
int irq;
struct ftgmac100_descs *descs;
dma_addr_t descs_dma_addr;
unsigned int rx_pointer;
unsigned int tx_clean_pointer;
unsigned int tx_pointer;
unsigned int tx_pending;
spinlock_t tx_lock;
struct net_device *netdev;
struct device *dev;
struct napi_struct napi;
struct mii_bus *mii_bus;
int phy_irq[PHY_MAX_ADDR];
struct phy_device *phydev;
int old_speed;
};
static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
struct ftgmac100_rxdes *rxdes, gfp_t gfp);
/******************************************************************************
* internal functions (hardware register access)
*****************************************************************************/
#define INT_MASK_ALL_ENABLED (FTGMAC100_INT_RPKT_LOST | \
FTGMAC100_INT_XPKT_ETH | \
FTGMAC100_INT_XPKT_LOST | \
FTGMAC100_INT_AHB_ERR | \
FTGMAC100_INT_PHYSTS_CHG | \
FTGMAC100_INT_RPKT_BUF | \
FTGMAC100_INT_NO_RXBUF)
static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr)
{
iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR);
}
static void ftgmac100_set_rx_buffer_size(struct ftgmac100 *priv,
unsigned int size)
{
size = FTGMAC100_RBSR_SIZE(size);
iowrite32(size, priv->base + FTGMAC100_OFFSET_RBSR);
}
static void ftgmac100_set_normal_prio_tx_ring_base(struct ftgmac100 *priv,
dma_addr_t addr)
{
iowrite32(addr, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
}
static void ftgmac100_txdma_normal_prio_start_polling(struct ftgmac100 *priv)
{
iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
}
static int ftgmac100_reset_hw(struct ftgmac100 *priv)
{
struct net_device *netdev = priv->netdev;
int i;
/* NOTE: reset clears all registers */
iowrite32(FTGMAC100_MACCR_SW_RST, priv->base + FTGMAC100_OFFSET_MACCR);
for (i = 0; i < 5; i++) {
unsigned int maccr;
maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
if (!(maccr & FTGMAC100_MACCR_SW_RST))
return 0;
udelay(1000);
}
netdev_err(netdev, "software reset failed\n");
return -EIO;
}
static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac)
{
unsigned int maddr = mac[0] << 8 | mac[1];
unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR);
iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
}
static void ftgmac100_init_hw(struct ftgmac100 *priv)
{
/* setup ring buffer base registers */
ftgmac100_set_rx_ring_base(priv,
priv->descs_dma_addr +
offsetof(struct ftgmac100_descs, rxdes));
ftgmac100_set_normal_prio_tx_ring_base(priv,
priv->descs_dma_addr +
offsetof(struct ftgmac100_descs, txdes));
ftgmac100_set_rx_buffer_size(priv, RX_BUF_SIZE);
iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), priv->base + FTGMAC100_OFFSET_APTC);
ftgmac100_set_mac(priv, priv->netdev->dev_addr);
}
#define MACCR_ENABLE_ALL (FTGMAC100_MACCR_TXDMA_EN | \
FTGMAC100_MACCR_RXDMA_EN | \
FTGMAC100_MACCR_TXMAC_EN | \
FTGMAC100_MACCR_RXMAC_EN | \
FTGMAC100_MACCR_FULLDUP | \
FTGMAC100_MACCR_CRC_APD | \
FTGMAC100_MACCR_RX_RUNT | \
FTGMAC100_MACCR_RX_BROADPKT)
static void ftgmac100_start_hw(struct ftgmac100 *priv, int speed)
{
int maccr = MACCR_ENABLE_ALL;
switch (speed) {
default:
case 10:
break;
case 100:
maccr |= FTGMAC100_MACCR_FAST_MODE;
break;
case 1000:
maccr |= FTGMAC100_MACCR_GIGA_MODE;
break;
}
iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
}
static void ftgmac100_stop_hw(struct ftgmac100 *priv)
{
iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
}
/******************************************************************************
* internal functions (receive descriptor)
*****************************************************************************/
static bool ftgmac100_rxdes_first_segment(struct ftgmac100_rxdes *rxdes)
{
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FRS);
}
static bool ftgmac100_rxdes_last_segment(struct ftgmac100_rxdes *rxdes)
{
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_LRS);
}
static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes)
{
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY);
}
static void ftgmac100_rxdes_set_dma_own(struct ftgmac100_rxdes *rxdes)
{
/* clear status bits */
rxdes->rxdes0 &= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
}
static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes)
{
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ERR);
}
static bool ftgmac100_rxdes_crc_error(struct ftgmac100_rxdes *rxdes)
{
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_CRC_ERR);
}
static bool ftgmac100_rxdes_frame_too_long(struct ftgmac100_rxdes *rxdes)
{
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FTL);
}
static bool ftgmac100_rxdes_runt(struct ftgmac100_rxdes *rxdes)
{
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RUNT);
}
static bool ftgmac100_rxdes_odd_nibble(struct ftgmac100_rxdes *rxdes)
{
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ODD_NB);
}
static unsigned int ftgmac100_rxdes_data_length(struct ftgmac100_rxdes *rxdes)
{
return le32_to_cpu(rxdes->rxdes0) & FTGMAC100_RXDES0_VDBC;
}
static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes)
{
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST);
}
static void ftgmac100_rxdes_set_end_of_ring(struct ftgmac100_rxdes *rxdes)
{
rxdes->rxdes0 |= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
}
static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes,
dma_addr_t addr)
{
rxdes->rxdes3 = cpu_to_le32(addr);
}
static dma_addr_t ftgmac100_rxdes_get_dma_addr(struct ftgmac100_rxdes *rxdes)
{
return le32_to_cpu(rxdes->rxdes3);
}
static bool ftgmac100_rxdes_is_tcp(struct ftgmac100_rxdes *rxdes)
{
return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
cpu_to_le32(FTGMAC100_RXDES1_PROT_TCPIP);
}
static bool ftgmac100_rxdes_is_udp(struct ftgmac100_rxdes *rxdes)
{
return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
cpu_to_le32(FTGMAC100_RXDES1_PROT_UDPIP);
}
static bool ftgmac100_rxdes_tcpcs_err(struct ftgmac100_rxdes *rxdes)
{
return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_TCP_CHKSUM_ERR);
}
static bool ftgmac100_rxdes_udpcs_err(struct ftgmac100_rxdes *rxdes)
{
return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_UDP_CHKSUM_ERR);
}
static bool ftgmac100_rxdes_ipcs_err(struct ftgmac100_rxdes *rxdes)
{
return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_IP_CHKSUM_ERR);
}
/*
* rxdes2 is not used by hardware. We use it to keep track of page.
* Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
*/
static void ftgmac100_rxdes_set_page(struct ftgmac100_rxdes *rxdes, struct page *page)
{
rxdes->rxdes2 = (unsigned int)page;
}
static struct page *ftgmac100_rxdes_get_page(struct ftgmac100_rxdes *rxdes)
{
return (struct page *)rxdes->rxdes2;
}
/******************************************************************************
* internal functions (receive)
*****************************************************************************/
static int ftgmac100_next_rx_pointer(int pointer)
{
return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
}
static void ftgmac100_rx_pointer_advance(struct ftgmac100 *priv)
{
priv->rx_pointer = ftgmac100_next_rx_pointer(priv->rx_pointer);
}
static struct ftgmac100_rxdes *ftgmac100_current_rxdes(struct ftgmac100 *priv)
{
return &priv->descs->rxdes[priv->rx_pointer];
}
static struct ftgmac100_rxdes *
ftgmac100_rx_locate_first_segment(struct ftgmac100 *priv)
{
struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
while (ftgmac100_rxdes_packet_ready(rxdes)) {
if (ftgmac100_rxdes_first_segment(rxdes))
return rxdes;
ftgmac100_rxdes_set_dma_own(rxdes);
ftgmac100_rx_pointer_advance(priv);
rxdes = ftgmac100_current_rxdes(priv);
}
return NULL;
}
static bool ftgmac100_rx_packet_error(struct ftgmac100 *priv,
struct ftgmac100_rxdes *rxdes)
{
struct net_device *netdev = priv->netdev;
bool error = false;
if (unlikely(ftgmac100_rxdes_rx_error(rxdes))) {
if (net_ratelimit())
netdev_info(netdev, "rx err\n");
netdev->stats.rx_errors++;
error = true;
}
if (unlikely(ftgmac100_rxdes_crc_error(rxdes))) {
if (net_ratelimit())
netdev_info(netdev, "rx crc err\n");
netdev->stats.rx_crc_errors++;
error = true;
} else if (unlikely(ftgmac100_rxdes_ipcs_err(rxdes))) {
if (net_ratelimit())
netdev_info(netdev, "rx IP checksum err\n");
error = true;
}
if (unlikely(ftgmac100_rxdes_frame_too_long(rxdes))) {
if (net_ratelimit())
netdev_info(netdev, "rx frame too long\n");
netdev->stats.rx_length_errors++;
error = true;
} else if (unlikely(ftgmac100_rxdes_runt(rxdes))) {
if (net_ratelimit())
netdev_info(netdev, "rx runt\n");
netdev->stats.rx_length_errors++;
error = true;
} else if (unlikely(ftgmac100_rxdes_odd_nibble(rxdes))) {
if (net_ratelimit())
netdev_info(netdev, "rx odd nibble\n");
netdev->stats.rx_length_errors++;
error = true;
}
return error;
}
static void ftgmac100_rx_drop_packet(struct ftgmac100 *priv)
{
struct net_device *netdev = priv->netdev;
struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
bool done = false;
if (net_ratelimit())
netdev_dbg(netdev, "drop packet %p\n", rxdes);
do {
if (ftgmac100_rxdes_last_segment(rxdes))
done = true;
ftgmac100_rxdes_set_dma_own(rxdes);
ftgmac100_rx_pointer_advance(priv);
rxdes = ftgmac100_current_rxdes(priv);
} while (!done && ftgmac100_rxdes_packet_ready(rxdes));
netdev->stats.rx_dropped++;
}
static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
{
struct net_device *netdev = priv->netdev;
struct ftgmac100_rxdes *rxdes;
struct sk_buff *skb;
bool done = false;
rxdes = ftgmac100_rx_locate_first_segment(priv);
if (!rxdes)
return false;
if (unlikely(ftgmac100_rx_packet_error(priv, rxdes))) {
ftgmac100_rx_drop_packet(priv);
return true;
}
/* start processing */
skb = netdev_alloc_skb_ip_align(netdev, 128);
if (unlikely(!skb)) {
if (net_ratelimit())
netdev_err(netdev, "rx skb alloc failed\n");
ftgmac100_rx_drop_packet(priv);
return true;
}
if (unlikely(ftgmac100_rxdes_multicast(rxdes)))
netdev->stats.multicast++;
/*
* It seems that HW does checksum incorrectly with fragmented packets,
* so we are conservative here - if HW checksum error, let software do
* the checksum again.
*/
if ((ftgmac100_rxdes_is_tcp(rxdes) && !ftgmac100_rxdes_tcpcs_err(rxdes)) ||
(ftgmac100_rxdes_is_udp(rxdes) && !ftgmac100_rxdes_udpcs_err(rxdes)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
do {
dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
struct page *page = ftgmac100_rxdes_get_page(rxdes);
unsigned int size;
dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
size = ftgmac100_rxdes_data_length(rxdes);
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0, size);
skb->len += size;
skb->data_len += size;
skb->truesize += PAGE_SIZE;
if (ftgmac100_rxdes_last_segment(rxdes))
done = true;
ftgmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
ftgmac100_rx_pointer_advance(priv);
rxdes = ftgmac100_current_rxdes(priv);
} while (!done);
if (skb->len <= 64)
skb->truesize -= PAGE_SIZE;
__pskb_pull_tail(skb, min(skb->len, 64U));
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += skb->len;
/* push packet to protocol stack */
napi_gro_receive(&priv->napi, skb);
(*processed)++;
return true;
}
/******************************************************************************
* internal functions (transmit descriptor)
*****************************************************************************/
static void ftgmac100_txdes_reset(struct ftgmac100_txdes *txdes)
{
/* clear all except end of ring bit */
txdes->txdes0 &= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
txdes->txdes1 = 0;
txdes->txdes2 = 0;
txdes->txdes3 = 0;
}
static bool ftgmac100_txdes_owned_by_dma(struct ftgmac100_txdes *txdes)
{
return txdes->txdes0 & cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
}
static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes)
{
/*
* Make sure dma own bit will not be set before any other
* descriptor fields.
*/
wmb();
txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
}
static void ftgmac100_txdes_set_end_of_ring(struct ftgmac100_txdes *txdes)
{
txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
}
static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes)
{
txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_FTS);
}
static void ftgmac100_txdes_set_last_segment(struct ftgmac100_txdes *txdes)
{
txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_LTS);
}
static void ftgmac100_txdes_set_buffer_size(struct ftgmac100_txdes *txdes,
unsigned int len)
{
txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXBUF_SIZE(len));
}
static void ftgmac100_txdes_set_txint(struct ftgmac100_txdes *txdes)
{
txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TXIC);
}
static void ftgmac100_txdes_set_tcpcs(struct ftgmac100_txdes *txdes)
{
txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TCP_CHKSUM);
}
static void ftgmac100_txdes_set_udpcs(struct ftgmac100_txdes *txdes)
{
txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_UDP_CHKSUM);
}
static void ftgmac100_txdes_set_ipcs(struct ftgmac100_txdes *txdes)
{
txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_IP_CHKSUM);
}
static void ftgmac100_txdes_set_dma_addr(struct ftgmac100_txdes *txdes,
dma_addr_t addr)
{
txdes->txdes3 = cpu_to_le32(addr);
}
static dma_addr_t ftgmac100_txdes_get_dma_addr(struct ftgmac100_txdes *txdes)
{
return le32_to_cpu(txdes->txdes3);
}
/*
* txdes2 is not used by hardware. We use it to keep track of socket buffer.
* Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
*/
static void ftgmac100_txdes_set_skb(struct ftgmac100_txdes *txdes,
struct sk_buff *skb)
{
txdes->txdes2 = (unsigned int)skb;
}
static struct sk_buff *ftgmac100_txdes_get_skb(struct ftgmac100_txdes *txdes)
{
return (struct sk_buff *)txdes->txdes2;
}
/******************************************************************************
* internal functions (transmit)
*****************************************************************************/
static int ftgmac100_next_tx_pointer(int pointer)
{
return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
}
static void ftgmac100_tx_pointer_advance(struct ftgmac100 *priv)
{
priv->tx_pointer = ftgmac100_next_tx_pointer(priv->tx_pointer);
}
static void ftgmac100_tx_clean_pointer_advance(struct ftgmac100 *priv)
{
priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv->tx_clean_pointer);
}
static struct ftgmac100_txdes *ftgmac100_current_txdes(struct ftgmac100 *priv)
{
return &priv->descs->txdes[priv->tx_pointer];
}
static struct ftgmac100_txdes *
ftgmac100_current_clean_txdes(struct ftgmac100 *priv)
{
return &priv->descs->txdes[priv->tx_clean_pointer];
}
static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
{
struct net_device *netdev = priv->netdev;
struct ftgmac100_txdes *txdes;
struct sk_buff *skb;
dma_addr_t map;
if (priv->tx_pending == 0)
return false;
txdes = ftgmac100_current_clean_txdes(priv);
if (ftgmac100_txdes_owned_by_dma(txdes))
return false;
skb = ftgmac100_txdes_get_skb(txdes);
map = ftgmac100_txdes_get_dma_addr(txdes);
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += skb->len;
dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
dev_kfree_skb(skb);
ftgmac100_txdes_reset(txdes);
ftgmac100_tx_clean_pointer_advance(priv);
spin_lock(&priv->tx_lock);
priv->tx_pending--;
spin_unlock(&priv->tx_lock);
netif_wake_queue(netdev);
return true;
}
static void ftgmac100_tx_complete(struct ftgmac100 *priv)
{
while (ftgmac100_tx_complete_packet(priv))
;
}
static int ftgmac100_xmit(struct ftgmac100 *priv, struct sk_buff *skb,
dma_addr_t map)
{
struct net_device *netdev = priv->netdev;
struct ftgmac100_txdes *txdes;
unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
txdes = ftgmac100_current_txdes(priv);
ftgmac100_tx_pointer_advance(priv);
/* setup TX descriptor */
ftgmac100_txdes_set_skb(txdes, skb);
ftgmac100_txdes_set_dma_addr(txdes, map);
ftgmac100_txdes_set_buffer_size(txdes, len);
ftgmac100_txdes_set_first_segment(txdes);
ftgmac100_txdes_set_last_segment(txdes);
ftgmac100_txdes_set_txint(txdes);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
__be16 protocol = skb->protocol;
if (protocol == cpu_to_be16(ETH_P_IP)) {
u8 ip_proto = ip_hdr(skb)->protocol;
ftgmac100_txdes_set_ipcs(txdes);
if (ip_proto == IPPROTO_TCP)
ftgmac100_txdes_set_tcpcs(txdes);
else if (ip_proto == IPPROTO_UDP)
ftgmac100_txdes_set_udpcs(txdes);
}
}
spin_lock(&priv->tx_lock);
priv->tx_pending++;
if (priv->tx_pending == TX_QUEUE_ENTRIES)
netif_stop_queue(netdev);
/* start transmit */
ftgmac100_txdes_set_dma_own(txdes);
spin_unlock(&priv->tx_lock);
ftgmac100_txdma_normal_prio_start_polling(priv);
return NETDEV_TX_OK;
}
/******************************************************************************
* internal functions (buffer)
*****************************************************************************/
static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
struct ftgmac100_rxdes *rxdes, gfp_t gfp)
{
struct net_device *netdev = priv->netdev;
struct page *page;
dma_addr_t map;
page = alloc_page(gfp);
if (!page) {
if (net_ratelimit())
netdev_err(netdev, "failed to allocate rx page\n");
return -ENOMEM;
}
map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(priv->dev, map))) {
if (net_ratelimit())
netdev_err(netdev, "failed to map rx page\n");
__free_page(page);
return -ENOMEM;
}
ftgmac100_rxdes_set_page(rxdes, page);
ftgmac100_rxdes_set_dma_addr(rxdes, map);
ftgmac100_rxdes_set_dma_own(rxdes);
return 0;
}
static void ftgmac100_free_buffers(struct ftgmac100 *priv)
{
int i;
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
struct page *page = ftgmac100_rxdes_get_page(rxdes);
dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
if (!page)
continue;
dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
__free_page(page);
}
for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
struct ftgmac100_txdes *txdes = &priv->descs->txdes[i];
struct sk_buff *skb = ftgmac100_txdes_get_skb(txdes);
dma_addr_t map = ftgmac100_txdes_get_dma_addr(txdes);
if (!skb)
continue;
dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
dev_kfree_skb(skb);
}
dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs),
priv->descs, priv->descs_dma_addr);
}
static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
{
int i;
priv->descs = dma_alloc_coherent(priv->dev,
sizeof(struct ftgmac100_descs),
&priv->descs_dma_addr, GFP_KERNEL);
if (!priv->descs)
return -ENOMEM;
memset(priv->descs, 0, sizeof(struct ftgmac100_descs));
/* initialize RX ring */
ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
if (ftgmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL))
goto err;
}
/* initialize TX ring */
ftgmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
return 0;
err:
ftgmac100_free_buffers(priv);
return -ENOMEM;
}
/******************************************************************************
* internal functions (mdio)
*****************************************************************************/
static void ftgmac100_adjust_link(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
struct phy_device *phydev = priv->phydev;
int ier;
if (phydev->speed == priv->old_speed)
return;
priv->old_speed = phydev->speed;
ier = ioread32(priv->base + FTGMAC100_OFFSET_IER);
/* disable all interrupts */
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
netif_stop_queue(netdev);
ftgmac100_stop_hw(priv);
netif_start_queue(netdev);
ftgmac100_init_hw(priv);
ftgmac100_start_hw(priv, phydev->speed);
/* re-enable interrupts */
iowrite32(ier, priv->base + FTGMAC100_OFFSET_IER);
}
static int ftgmac100_mii_probe(struct ftgmac100 *priv)
{
struct net_device *netdev = priv->netdev;
struct phy_device *phydev = NULL;
int i;
/* search for connect PHY device */
for (i = 0; i < PHY_MAX_ADDR; i++) {
struct phy_device *tmp = priv->mii_bus->phy_map[i];
if (tmp) {
phydev = tmp;
break;
}
}
/* now we are supposed to have a proper phydev, to attach to... */
if (!phydev) {
netdev_info(netdev, "%s: no PHY found\n", netdev->name);
return -ENODEV;
}
phydev = phy_connect(netdev, dev_name(&phydev->dev),
&ftgmac100_adjust_link, 0,
PHY_INTERFACE_MODE_GMII);
if (IS_ERR(phydev)) {
netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
return PTR_ERR(phydev);
}
priv->phydev = phydev;
return 0;
}
/******************************************************************************
* struct mii_bus functions
*****************************************************************************/
static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
struct net_device *netdev = bus->priv;
struct ftgmac100 *priv = netdev_priv(netdev);
unsigned int phycr;
int i;
phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
/* preserve MDC cycle threshold */
phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
FTGMAC100_PHYCR_REGAD(regnum) |
FTGMAC100_PHYCR_MIIRD;
iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
for (i = 0; i < 10; i++) {
phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) {
int data;
data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA);
return FTGMAC100_PHYDATA_MIIRDATA(data);
}
udelay(100);
}
netdev_err(netdev, "mdio read timed out\n");
return -EIO;
}
static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
int regnum, u16 value)
{
struct net_device *netdev = bus->priv;
struct ftgmac100 *priv = netdev_priv(netdev);
unsigned int phycr;
int data;
int i;
phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
/* preserve MDC cycle threshold */
phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
FTGMAC100_PHYCR_REGAD(regnum) |
FTGMAC100_PHYCR_MIIWR;
data = FTGMAC100_PHYDATA_MIIWDATA(value);
iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA);
iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
for (i = 0; i < 10; i++) {
phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0)
return 0;
udelay(100);
}
netdev_err(netdev, "mdio write timed out\n");
return -EIO;
}
static int ftgmac100_mdiobus_reset(struct mii_bus *bus)
{
return 0;
}
/******************************************************************************
* struct ethtool_ops functions
*****************************************************************************/
static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
strcpy(info->bus_info, dev_name(&netdev->dev));
}
static int ftgmac100_get_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct ftgmac100 *priv = netdev_priv(netdev);
return phy_ethtool_gset(priv->phydev, cmd);
}
static int ftgmac100_set_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct ftgmac100 *priv = netdev_priv(netdev);
return phy_ethtool_sset(priv->phydev, cmd);
}
static const struct ethtool_ops ftgmac100_ethtool_ops = {
.set_settings = ftgmac100_set_settings,
.get_settings = ftgmac100_get_settings,
.get_drvinfo = ftgmac100_get_drvinfo,
.get_link = ethtool_op_get_link,
};
/******************************************************************************
* interrupt handler
*****************************************************************************/
static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
{
struct net_device *netdev = dev_id;
struct ftgmac100 *priv = netdev_priv(netdev);
if (likely(netif_running(netdev))) {
/* Disable interrupts for polling */
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
napi_schedule(&priv->napi);
}
return IRQ_HANDLED;
}
/******************************************************************************
* struct napi_struct functions
*****************************************************************************/
static int ftgmac100_poll(struct napi_struct *napi, int budget)
{
struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
struct net_device *netdev = priv->netdev;
unsigned int status;
bool completed = true;
int rx = 0;
status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
if (status & (FTGMAC100_INT_RPKT_BUF | FTGMAC100_INT_NO_RXBUF)) {
/*
* FTGMAC100_INT_RPKT_BUF:
* RX DMA has received packets into RX buffer successfully
*
* FTGMAC100_INT_NO_RXBUF:
* RX buffer unavailable
*/
bool retry;
do {
retry = ftgmac100_rx_packet(priv, &rx);
} while (retry && rx < budget);
if (retry && rx == budget)
completed = false;
}
if (status & (FTGMAC100_INT_XPKT_ETH | FTGMAC100_INT_XPKT_LOST)) {
/*
* FTGMAC100_INT_XPKT_ETH:
* packet transmitted to ethernet successfully
*
* FTGMAC100_INT_XPKT_LOST:
* packet transmitted to ethernet lost due to late
* collision or excessive collision
*/
ftgmac100_tx_complete(priv);
}
if (status & (FTGMAC100_INT_NO_RXBUF | FTGMAC100_INT_RPKT_LOST |
FTGMAC100_INT_AHB_ERR | FTGMAC100_INT_PHYSTS_CHG)) {
if (net_ratelimit())
netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "",
status & FTGMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
status & FTGMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
if (status & FTGMAC100_INT_NO_RXBUF) {
/* RX buffer unavailable */
netdev->stats.rx_over_errors++;
}
if (status & FTGMAC100_INT_RPKT_LOST) {
/* received packet lost due to RX FIFO full */
netdev->stats.rx_fifo_errors++;
}
}
if (completed) {
napi_complete(napi);
/* enable all interrupts */
iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER);
}
return rx;
}
/******************************************************************************
* struct net_device_ops functions
*****************************************************************************/
static int ftgmac100_open(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
int err;
err = ftgmac100_alloc_buffers(priv);
if (err) {
netdev_err(netdev, "failed to allocate buffers\n");
goto err_alloc;
}
err = request_irq(priv->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
if (err) {
netdev_err(netdev, "failed to request irq %d\n", priv->irq);
goto err_irq;
}
priv->rx_pointer = 0;
priv->tx_clean_pointer = 0;
priv->tx_pointer = 0;
priv->tx_pending = 0;
err = ftgmac100_reset_hw(priv);
if (err)
goto err_hw;
ftgmac100_init_hw(priv);
ftgmac100_start_hw(priv, 10);
phy_start(priv->phydev);
napi_enable(&priv->napi);
netif_start_queue(netdev);
/* enable all interrupts */
iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER);
return 0;
err_hw:
free_irq(priv->irq, netdev);
err_irq:
ftgmac100_free_buffers(priv);
err_alloc:
return err;
}
static int ftgmac100_stop(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
/* disable all interrupts */
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
netif_stop_queue(netdev);
napi_disable(&priv->napi);
phy_stop(priv->phydev);
ftgmac100_stop_hw(priv);
free_irq(priv->irq, netdev);
ftgmac100_free_buffers(priv);
return 0;
}
static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
dma_addr_t map;
if (unlikely(skb->len > MAX_PKT_SIZE)) {
if (net_ratelimit())
netdev_dbg(netdev, "tx packet too big\n");
netdev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(priv->dev, map))) {
/* drop packet */
if (net_ratelimit())
netdev_err(netdev, "map socket buffer failed\n");
netdev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
return ftgmac100_xmit(priv, skb, map);
}
/* optional */
static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct ftgmac100 *priv = netdev_priv(netdev);
return phy_mii_ioctl(priv->phydev, ifr, cmd);
}
static const struct net_device_ops ftgmac100_netdev_ops = {
.ndo_open = ftgmac100_open,
.ndo_stop = ftgmac100_stop,
.ndo_start_xmit = ftgmac100_hard_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = ftgmac100_do_ioctl,
};
/******************************************************************************
* struct platform_driver functions
*****************************************************************************/
static int ftgmac100_probe(struct platform_device *pdev)
{
struct resource *res;
int irq;
struct net_device *netdev;
struct ftgmac100 *priv;
int err;
int i;
if (!pdev)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
/* setup net_device */
netdev = alloc_etherdev(sizeof(*priv));
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
SET_ETHTOOL_OPS(netdev, &ftgmac100_ethtool_ops);
netdev->netdev_ops = &ftgmac100_netdev_ops;
netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
platform_set_drvdata(pdev, netdev);
/* setup private data */
priv = netdev_priv(netdev);
priv->netdev = netdev;
priv->dev = &pdev->dev;
spin_lock_init(&priv->tx_lock);
/* initialize NAPI */
netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
/* map io memory */
priv->res = request_mem_region(res->start, resource_size(res),
dev_name(&pdev->dev));
if (!priv->res) {
dev_err(&pdev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto err_req_mem;
}
priv->base = ioremap(res->start, resource_size(res));
if (!priv->base) {
dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
err = -EIO;
goto err_ioremap;
}
priv->irq = irq;
/* initialize mdio bus */
priv->mii_bus = mdiobus_alloc();
if (!priv->mii_bus) {
err = -EIO;
goto err_alloc_mdiobus;
}
priv->mii_bus->name = "ftgmac100_mdio";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "ftgmac100_mii");
priv->mii_bus->priv = netdev;
priv->mii_bus->read = ftgmac100_mdiobus_read;
priv->mii_bus->write = ftgmac100_mdiobus_write;
priv->mii_bus->reset = ftgmac100_mdiobus_reset;
priv->mii_bus->irq = priv->phy_irq;
for (i = 0; i < PHY_MAX_ADDR; i++)
priv->mii_bus->irq[i] = PHY_POLL;
err = mdiobus_register(priv->mii_bus);
if (err) {
dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
goto err_register_mdiobus;
}
err = ftgmac100_mii_probe(priv);
if (err) {
dev_err(&pdev->dev, "MII Probe failed!\n");
goto err_mii_probe;
}
/* register network device */
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register netdev\n");
goto err_register_netdev;
}
netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
if (!is_valid_ether_addr(netdev->dev_addr)) {
eth_hw_addr_random(netdev);
netdev_info(netdev, "generated random MAC address %pM\n",
netdev->dev_addr);
}
return 0;
err_register_netdev:
phy_disconnect(priv->phydev);
err_mii_probe:
mdiobus_unregister(priv->mii_bus);
err_register_mdiobus:
mdiobus_free(priv->mii_bus);
err_alloc_mdiobus:
iounmap(priv->base);
err_ioremap:
release_resource(priv->res);
err_req_mem:
netif_napi_del(&priv->napi);
platform_set_drvdata(pdev, NULL);
free_netdev(netdev);
err_alloc_etherdev:
return err;
}
static int __exit ftgmac100_remove(struct platform_device *pdev)
{
struct net_device *netdev;
struct ftgmac100 *priv;
netdev = platform_get_drvdata(pdev);
priv = netdev_priv(netdev);
unregister_netdev(netdev);
phy_disconnect(priv->phydev);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
iounmap(priv->base);
release_resource(priv->res);
netif_napi_del(&priv->napi);
platform_set_drvdata(pdev, NULL);
free_netdev(netdev);
return 0;
}
static struct platform_driver ftgmac100_driver = {
.probe = ftgmac100_probe,
.remove = __exit_p(ftgmac100_remove),
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
};
/******************************************************************************
* initialization / finalization
*****************************************************************************/
static int __init ftgmac100_init(void)
{
pr_info("Loading version " DRV_VERSION " ...\n");
return platform_driver_register(&ftgmac100_driver);
}
static void __exit ftgmac100_exit(void)
{
platform_driver_unregister(&ftgmac100_driver);
}
module_init(ftgmac100_init);
module_exit(ftgmac100_exit);
MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
MODULE_DESCRIPTION("FTGMAC100 driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
xXminiWHOOPERxX/xXminiWHOOPERxX-Kernel-ZaraCL- | drivers/staging/vme/boards/vme_vmivme7805.c | 5628 | 2963 | /*
* Support for the VMIVME-7805 board access to the Universe II bridge.
*
* Author: Arthur Benilov <arthur.benilov@iba-group.com>
* Copyright 2010 Ion Beam Application, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/io.h>
#include "vme_vmivme7805.h"
static int __init vmic_init(void);
static int vmic_probe(struct pci_dev *, const struct pci_device_id *);
static void vmic_remove(struct pci_dev *);
static void __exit vmic_exit(void);
/** Base address to access FPGA register */
static void *vmic_base;
static const char driver_name[] = "vmivme_7805";
static DEFINE_PCI_DEVICE_TABLE(vmic_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VMIC, PCI_DEVICE_ID_VTIMR) },
{ },
};
static struct pci_driver vmic_driver = {
.name = driver_name,
.id_table = vmic_ids,
.probe = vmic_probe,
.remove = vmic_remove,
};
static int __init vmic_init(void)
{
return pci_register_driver(&vmic_driver);
}
static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int retval;
u32 data;
/* Enable the device */
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "Unable to enable device\n");
goto err;
}
/* Map Registers */
retval = pci_request_regions(pdev, driver_name);
if (retval) {
dev_err(&pdev->dev, "Unable to reserve resources\n");
goto err_resource;
}
/* Map registers in BAR 0 */
vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16);
if (!vmic_base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
goto err_remap;
}
/* Clear the FPGA VME IF contents */
iowrite32(0, vmic_base + VME_CONTROL);
/* Clear any initial BERR */
data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
data |= BM_VME_CONTROL_BERRST;
iowrite32(data, vmic_base + VME_CONTROL);
/* Enable the vme interface and byte swapping */
data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
data = data | BM_VME_CONTROL_MASTER_ENDIAN |
BM_VME_CONTROL_SLAVE_ENDIAN |
BM_VME_CONTROL_ABLE |
BM_VME_CONTROL_BERRI |
BM_VME_CONTROL_BPENA |
BM_VME_CONTROL_VBENA;
iowrite32(data, vmic_base + VME_CONTROL);
return 0;
err_remap:
pci_release_regions(pdev);
err_resource:
pci_disable_device(pdev);
err:
return retval;
}
static void vmic_remove(struct pci_dev *pdev)
{
iounmap(vmic_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static void __exit vmic_exit(void)
{
pci_unregister_driver(&vmic_driver);
}
MODULE_DESCRIPTION("VMIVME-7805 board support driver");
MODULE_AUTHOR("Arthur Benilov <arthur.benilov@iba-group.com>");
MODULE_LICENSE("GPL");
module_init(vmic_init);
module_exit(vmic_exit);
| gpl-2.0 |
maxwen/android_kernel_oppo_apq8064 | drivers/staging/vme/boards/vme_vmivme7805.c | 5628 | 2963 | /*
* Support for the VMIVME-7805 board access to the Universe II bridge.
*
* Author: Arthur Benilov <arthur.benilov@iba-group.com>
* Copyright 2010 Ion Beam Application, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/io.h>
#include "vme_vmivme7805.h"
static int __init vmic_init(void);
static int vmic_probe(struct pci_dev *, const struct pci_device_id *);
static void vmic_remove(struct pci_dev *);
static void __exit vmic_exit(void);
/** Base address to access FPGA register */
static void *vmic_base;
static const char driver_name[] = "vmivme_7805";
static DEFINE_PCI_DEVICE_TABLE(vmic_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VMIC, PCI_DEVICE_ID_VTIMR) },
{ },
};
static struct pci_driver vmic_driver = {
.name = driver_name,
.id_table = vmic_ids,
.probe = vmic_probe,
.remove = vmic_remove,
};
static int __init vmic_init(void)
{
return pci_register_driver(&vmic_driver);
}
static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int retval;
u32 data;
/* Enable the device */
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "Unable to enable device\n");
goto err;
}
/* Map Registers */
retval = pci_request_regions(pdev, driver_name);
if (retval) {
dev_err(&pdev->dev, "Unable to reserve resources\n");
goto err_resource;
}
/* Map registers in BAR 0 */
vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16);
if (!vmic_base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
goto err_remap;
}
/* Clear the FPGA VME IF contents */
iowrite32(0, vmic_base + VME_CONTROL);
/* Clear any initial BERR */
data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
data |= BM_VME_CONTROL_BERRST;
iowrite32(data, vmic_base + VME_CONTROL);
/* Enable the vme interface and byte swapping */
data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
data = data | BM_VME_CONTROL_MASTER_ENDIAN |
BM_VME_CONTROL_SLAVE_ENDIAN |
BM_VME_CONTROL_ABLE |
BM_VME_CONTROL_BERRI |
BM_VME_CONTROL_BPENA |
BM_VME_CONTROL_VBENA;
iowrite32(data, vmic_base + VME_CONTROL);
return 0;
err_remap:
pci_release_regions(pdev);
err_resource:
pci_disable_device(pdev);
err:
return retval;
}
static void vmic_remove(struct pci_dev *pdev)
{
iounmap(vmic_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static void __exit vmic_exit(void)
{
pci_unregister_driver(&vmic_driver);
}
MODULE_DESCRIPTION("VMIVME-7805 board support driver");
MODULE_AUTHOR("Arthur Benilov <arthur.benilov@iba-group.com>");
MODULE_LICENSE("GPL");
module_init(vmic_init);
module_exit(vmic_exit);
| gpl-2.0 |
anders3408/kernel_oppo_find5-old | drivers/staging/vme/boards/vme_vmivme7805.c | 5628 | 2963 | /*
* Support for the VMIVME-7805 board access to the Universe II bridge.
*
* Author: Arthur Benilov <arthur.benilov@iba-group.com>
* Copyright 2010 Ion Beam Application, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/io.h>
#include "vme_vmivme7805.h"
static int __init vmic_init(void);
static int vmic_probe(struct pci_dev *, const struct pci_device_id *);
static void vmic_remove(struct pci_dev *);
static void __exit vmic_exit(void);
/** Base address to access FPGA register */
static void *vmic_base;
static const char driver_name[] = "vmivme_7805";
static DEFINE_PCI_DEVICE_TABLE(vmic_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VMIC, PCI_DEVICE_ID_VTIMR) },
{ },
};
static struct pci_driver vmic_driver = {
.name = driver_name,
.id_table = vmic_ids,
.probe = vmic_probe,
.remove = vmic_remove,
};
static int __init vmic_init(void)
{
return pci_register_driver(&vmic_driver);
}
static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int retval;
u32 data;
/* Enable the device */
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "Unable to enable device\n");
goto err;
}
/* Map Registers */
retval = pci_request_regions(pdev, driver_name);
if (retval) {
dev_err(&pdev->dev, "Unable to reserve resources\n");
goto err_resource;
}
/* Map registers in BAR 0 */
vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16);
if (!vmic_base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
goto err_remap;
}
/* Clear the FPGA VME IF contents */
iowrite32(0, vmic_base + VME_CONTROL);
/* Clear any initial BERR */
data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
data |= BM_VME_CONTROL_BERRST;
iowrite32(data, vmic_base + VME_CONTROL);
/* Enable the vme interface and byte swapping */
data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
data = data | BM_VME_CONTROL_MASTER_ENDIAN |
BM_VME_CONTROL_SLAVE_ENDIAN |
BM_VME_CONTROL_ABLE |
BM_VME_CONTROL_BERRI |
BM_VME_CONTROL_BPENA |
BM_VME_CONTROL_VBENA;
iowrite32(data, vmic_base + VME_CONTROL);
return 0;
err_remap:
pci_release_regions(pdev);
err_resource:
pci_disable_device(pdev);
err:
return retval;
}
static void vmic_remove(struct pci_dev *pdev)
{
iounmap(vmic_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static void __exit vmic_exit(void)
{
pci_unregister_driver(&vmic_driver);
}
MODULE_DESCRIPTION("VMIVME-7805 board support driver");
MODULE_AUTHOR("Arthur Benilov <arthur.benilov@iba-group.com>");
MODULE_LICENSE("GPL");
module_init(vmic_init);
module_exit(vmic_exit);
| gpl-2.0 |
Redmi-dev/android_kernel_xiaomi_msm8226 | arch/um/drivers/mmapper_kern.c | 7676 | 2898 | /*
* arch/um/drivers/mmapper_kern.c
*
* BRIEF MODULE DESCRIPTION
*
* Copyright (C) 2000 RidgeRun, Inc.
* Author: RidgeRun, Inc.
* Greg Lonnon glonnon@ridgerun.com or info@ridgerun.com
*
*/
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/uaccess.h>
#include "mem_user.h"
/* These are set in mmapper_init, which is called at boot time */
static unsigned long mmapper_size;
static unsigned long p_buf;
static char *v_buf;
static ssize_t mmapper_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, v_buf, mmapper_size);
}
static ssize_t mmapper_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
if (*ppos > mmapper_size)
return -EINVAL;
return simple_write_to_buffer(v_buf, mmapper_size, ppos, buf, count);
}
static long mmapper_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
}
static int mmapper_mmap(struct file *file, struct vm_area_struct *vma)
{
int ret = -EINVAL;
int size;
if (vma->vm_pgoff != 0)
goto out;
size = vma->vm_end - vma->vm_start;
if (size > mmapper_size)
return -EFAULT;
/*
* XXX A comment above remap_pfn_range says it should only be
* called when the mm semaphore is held
*/
if (remap_pfn_range(vma, vma->vm_start, p_buf >> PAGE_SHIFT, size,
vma->vm_page_prot))
goto out;
ret = 0;
out:
return ret;
}
static int mmapper_open(struct inode *inode, struct file *file)
{
return 0;
}
static int mmapper_release(struct inode *inode, struct file *file)
{
return 0;
}
static const struct file_operations mmapper_fops = {
.owner = THIS_MODULE,
.read = mmapper_read,
.write = mmapper_write,
.unlocked_ioctl = mmapper_ioctl,
.mmap = mmapper_mmap,
.open = mmapper_open,
.release = mmapper_release,
.llseek = default_llseek,
};
/*
* No locking needed - only used (and modified) by below initcall and exitcall.
*/
static struct miscdevice mmapper_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "mmapper",
.fops = &mmapper_fops
};
static int __init mmapper_init(void)
{
int err;
printk(KERN_INFO "Mapper v0.1\n");
v_buf = (char *) find_iomem("mmapper", &mmapper_size);
if (mmapper_size == 0) {
printk(KERN_ERR "mmapper_init - find_iomem failed\n");
return -ENODEV;
}
p_buf = __pa(v_buf);
err = misc_register(&mmapper_dev);
if (err) {
printk(KERN_ERR "mmapper - misc_register failed, err = %d\n",
err);
return err;
}
return 0;
}
static void mmapper_exit(void)
{
misc_deregister(&mmapper_dev);
}
module_init(mmapper_init);
module_exit(mmapper_exit);
MODULE_AUTHOR("Greg Lonnon <glonnon@ridgerun.com>");
MODULE_DESCRIPTION("DSPLinux simulator mmapper driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mustermaxmueller/android_kernel_sony_msm8974_togari_5.x | fs/ntfs/attrib.c | 7676 | 91838 | /**
* attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc.
* Copyright (c) 2002 Richard Russon
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/buffer_head.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include "attrib.h"
#include "debug.h"
#include "layout.h"
#include "lcnalloc.h"
#include "malloc.h"
#include "mft.h"
#include "ntfs.h"
#include "types.h"
/**
* ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode
* @ni: ntfs inode for which to map (part of) a runlist
* @vcn: map runlist part containing this vcn
* @ctx: active attribute search context if present or NULL if not
*
* Map the part of a runlist containing the @vcn of the ntfs inode @ni.
*
* If @ctx is specified, it is an active search context of @ni and its base mft
* record. This is needed when ntfs_map_runlist_nolock() encounters unmapped
* runlist fragments and allows their mapping. If you do not have the mft
* record mapped, you can specify @ctx as NULL and ntfs_map_runlist_nolock()
* will perform the necessary mapping and unmapping.
*
* Note, ntfs_map_runlist_nolock() saves the state of @ctx on entry and
* restores it before returning. Thus, @ctx will be left pointing to the same
* attribute on return as on entry. However, the actual pointers in @ctx may
* point to different memory locations on return, so you must remember to reset
* any cached pointers from the @ctx, i.e. after the call to
* ntfs_map_runlist_nolock(), you will probably want to do:
* m = ctx->mrec;
* a = ctx->attr;
* Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
* you cache ctx->mrec in a variable @m of type MFT_RECORD *.
*
* Return 0 on success and -errno on error. There is one special error code
* which is not an error as such. This is -ENOENT. It means that @vcn is out
* of bounds of the runlist.
*
* Note the runlist can be NULL after this function returns if @vcn is zero and
* the attribute has zero allocated size, i.e. there simply is no runlist.
*
* WARNING: If @ctx is supplied, regardless of whether success or failure is
* returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
* is no longer valid, i.e. you need to either call
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
* In that case PTR_ERR(@ctx->mrec) will give you the error code for
* why the mapping of the old inode failed.
*
* Locking: - The runlist described by @ni must be locked for writing on entry
* and is locked on return. Note the runlist will be modified.
* - If @ctx is NULL, the base mft record of @ni must not be mapped on
* entry and it will be left unmapped on return.
* - If @ctx is not NULL, the base mft record must be mapped on entry
* and it will be left mapped on return.
*/
int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
{
VCN end_vcn;
unsigned long flags;
ntfs_inode *base_ni;
MFT_RECORD *m;
ATTR_RECORD *a;
runlist_element *rl;
struct page *put_this_page = NULL;
int err = 0;
bool ctx_is_temporary, ctx_needs_reset;
ntfs_attr_search_ctx old_ctx = { NULL, };
ntfs_debug("Mapping runlist part containing vcn 0x%llx.",
(unsigned long long)vcn);
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
if (!ctx) {
ctx_is_temporary = ctx_needs_reset = true;
m = map_mft_record(base_ni);
if (IS_ERR(m))
return PTR_ERR(m);
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
} else {
VCN allocated_size_vcn;
BUG_ON(IS_ERR(ctx->mrec));
a = ctx->attr;
BUG_ON(!a->non_resident);
ctx_is_temporary = false;
end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
read_lock_irqsave(&ni->size_lock, flags);
allocated_size_vcn = ni->allocated_size >>
ni->vol->cluster_size_bits;
read_unlock_irqrestore(&ni->size_lock, flags);
if (!a->data.non_resident.lowest_vcn && end_vcn <= 0)
end_vcn = allocated_size_vcn - 1;
/*
* If we already have the attribute extent containing @vcn in
* @ctx, no need to look it up again. We slightly cheat in
* that if vcn exceeds the allocated size, we will refuse to
* map the runlist below, so there is definitely no need to get
* the right attribute extent.
*/
if (vcn >= allocated_size_vcn || (a->type == ni->type &&
a->name_length == ni->name_len &&
!memcmp((u8*)a + le16_to_cpu(a->name_offset),
ni->name, ni->name_len) &&
sle64_to_cpu(a->data.non_resident.lowest_vcn)
<= vcn && end_vcn >= vcn))
ctx_needs_reset = false;
else {
/* Save the old search context. */
old_ctx = *ctx;
/*
* If the currently mapped (extent) inode is not the
* base inode we will unmap it when we reinitialize the
* search context which means we need to get a
* reference to the page containing the mapped mft
* record so we do not accidentally drop changes to the
* mft record when it has not been marked dirty yet.
*/
if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
old_ctx.base_ntfs_ino) {
put_this_page = old_ctx.ntfs_ino->page;
page_cache_get(put_this_page);
}
/*
* Reinitialize the search context so we can lookup the
* needed attribute extent.
*/
ntfs_attr_reinit_search_ctx(ctx);
ctx_needs_reset = true;
}
}
if (ctx_needs_reset) {
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, vcn, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
BUG_ON(!ctx->attr->non_resident);
}
a = ctx->attr;
/*
* Only decompress the mapping pairs if @vcn is inside it. Otherwise
* we get into problems when we try to map an out of bounds vcn because
* we then try to map the already mapped runlist fragment and
* ntfs_mapping_pairs_decompress() fails.
*/
end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn) + 1;
if (unlikely(vcn && vcn >= end_vcn)) {
err = -ENOENT;
goto err_out;
}
rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl);
if (IS_ERR(rl))
err = PTR_ERR(rl);
else
ni->runlist.rl = rl;
err_out:
if (ctx_is_temporary) {
if (likely(ctx))
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
} else if (ctx_needs_reset) {
/*
* If there is no attribute list, restoring the search context
* is accomplished simply by copying the saved context back over
* the caller supplied context. If there is an attribute list,
* things are more complicated as we need to deal with mapping
* of mft records and resulting potential changes in pointers.
*/
if (NInoAttrList(base_ni)) {
/*
* If the currently mapped (extent) inode is not the
* one we had before, we need to unmap it and map the
* old one.
*/
if (ctx->ntfs_ino != old_ctx.ntfs_ino) {
/*
* If the currently mapped inode is not the
* base inode, unmap it.
*/
if (ctx->base_ntfs_ino && ctx->ntfs_ino !=
ctx->base_ntfs_ino) {
unmap_extent_mft_record(ctx->ntfs_ino);
ctx->mrec = ctx->base_mrec;
BUG_ON(!ctx->mrec);
}
/*
* If the old mapped inode is not the base
* inode, map it.
*/
if (old_ctx.base_ntfs_ino &&
old_ctx.ntfs_ino !=
old_ctx.base_ntfs_ino) {
retry_map:
ctx->mrec = map_mft_record(
old_ctx.ntfs_ino);
/*
* Something bad has happened. If out
* of memory retry till it succeeds.
* Any other errors are fatal and we
* return the error code in ctx->mrec.
* Let the caller deal with it... We
* just need to fudge things so the
* caller can reinit and/or put the
* search context safely.
*/
if (IS_ERR(ctx->mrec)) {
if (PTR_ERR(ctx->mrec) ==
-ENOMEM) {
schedule();
goto retry_map;
} else
old_ctx.ntfs_ino =
old_ctx.
base_ntfs_ino;
}
}
}
/* Update the changed pointers in the saved context. */
if (ctx->mrec != old_ctx.mrec) {
if (!IS_ERR(ctx->mrec))
old_ctx.attr = (ATTR_RECORD*)(
(u8*)ctx->mrec +
((u8*)old_ctx.attr -
(u8*)old_ctx.mrec));
old_ctx.mrec = ctx->mrec;
}
}
/* Restore the search context to the saved one. */
*ctx = old_ctx;
/*
* We drop the reference on the page we took earlier. In the
* case that IS_ERR(ctx->mrec) is true this means we might lose
* some changes to the mft record that had been made between
* the last time it was marked dirty/written out and now. This
* at this stage is not a problem as the mapping error is fatal
* enough that the mft record cannot be written out anyway and
* the caller is very likely to shutdown the whole inode
* immediately and mark the volume dirty for chkdsk to pick up
* the pieces anyway.
*/
if (put_this_page)
page_cache_release(put_this_page);
}
return err;
}
/**
* ntfs_map_runlist - map (a part of) a runlist of an ntfs inode
* @ni: ntfs inode for which to map (part of) a runlist
* @vcn: map runlist part containing this vcn
*
* Map the part of a runlist containing the @vcn of the ntfs inode @ni.
*
* Return 0 on success and -errno on error. There is one special error code
* which is not an error as such. This is -ENOENT. It means that @vcn is out
* of bounds of the runlist.
*
* Locking: - The runlist must be unlocked on entry and is unlocked on return.
* - This function takes the runlist lock for writing and may modify
* the runlist.
*/
int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
{
int err = 0;
down_write(&ni->runlist.lock);
/* Make sure someone else didn't do the work while we were sleeping. */
if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
LCN_RL_NOT_MAPPED))
err = ntfs_map_runlist_nolock(ni, vcn, NULL);
up_write(&ni->runlist.lock);
return err;
}
/**
* ntfs_attr_vcn_to_lcn_nolock - convert a vcn into a lcn given an ntfs inode
* @ni: ntfs inode of the attribute whose runlist to search
* @vcn: vcn to convert
* @write_locked: true if the runlist is locked for writing
*
* Find the virtual cluster number @vcn in the runlist of the ntfs attribute
* described by the ntfs inode @ni and return the corresponding logical cluster
* number (lcn).
*
* If the @vcn is not mapped yet, the attempt is made to map the attribute
* extent containing the @vcn and the vcn to lcn conversion is retried.
*
* If @write_locked is true the caller has locked the runlist for writing and
* if false for reading.
*
* Since lcns must be >= 0, we use negative return codes with special meaning:
*
* Return code Meaning / Description
* ==========================================
* LCN_HOLE Hole / not allocated on disk.
* LCN_ENOENT There is no such vcn in the runlist, i.e. @vcn is out of bounds.
* LCN_ENOMEM Not enough memory to map runlist.
* LCN_EIO Critical error (runlist/file is corrupt, i/o error, etc).
*
* Locking: - The runlist must be locked on entry and is left locked on return.
* - If @write_locked is 'false', i.e. the runlist is locked for reading,
* the lock may be dropped inside the function so you cannot rely on
* the runlist still being the same when this function returns.
*/
LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
const bool write_locked)
{
LCN lcn;
unsigned long flags;
bool is_retry = false;
BUG_ON(!ni);
ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.",
ni->mft_no, (unsigned long long)vcn,
write_locked ? "write" : "read");
BUG_ON(!NInoNonResident(ni));
BUG_ON(vcn < 0);
if (!ni->runlist.rl) {
read_lock_irqsave(&ni->size_lock, flags);
if (!ni->allocated_size) {
read_unlock_irqrestore(&ni->size_lock, flags);
return LCN_ENOENT;
}
read_unlock_irqrestore(&ni->size_lock, flags);
}
retry_remap:
/* Convert vcn to lcn. If that fails map the runlist and retry once. */
lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn);
if (likely(lcn >= LCN_HOLE)) {
ntfs_debug("Done, lcn 0x%llx.", (long long)lcn);
return lcn;
}
if (lcn != LCN_RL_NOT_MAPPED) {
if (lcn != LCN_ENOENT)
lcn = LCN_EIO;
} else if (!is_retry) {
int err;
if (!write_locked) {
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) !=
LCN_RL_NOT_MAPPED)) {
up_write(&ni->runlist.lock);
down_read(&ni->runlist.lock);
goto retry_remap;
}
}
err = ntfs_map_runlist_nolock(ni, vcn, NULL);
if (!write_locked) {
up_write(&ni->runlist.lock);
down_read(&ni->runlist.lock);
}
if (likely(!err)) {
is_retry = true;
goto retry_remap;
}
if (err == -ENOENT)
lcn = LCN_ENOENT;
else if (err == -ENOMEM)
lcn = LCN_ENOMEM;
else
lcn = LCN_EIO;
}
if (lcn != LCN_ENOENT)
ntfs_error(ni->vol->sb, "Failed with error code %lli.",
(long long)lcn);
return lcn;
}
/**
* ntfs_attr_find_vcn_nolock - find a vcn in the runlist of an ntfs inode
* @ni: ntfs inode describing the runlist to search
* @vcn: vcn to find
* @ctx: active attribute search context if present or NULL if not
*
* Find the virtual cluster number @vcn in the runlist described by the ntfs
* inode @ni and return the address of the runlist element containing the @vcn.
*
* If the @vcn is not mapped yet, the attempt is made to map the attribute
* extent containing the @vcn and the vcn to lcn conversion is retried.
*
* If @ctx is specified, it is an active search context of @ni and its base mft
* record. This is needed when ntfs_attr_find_vcn_nolock() encounters unmapped
* runlist fragments and allows their mapping. If you do not have the mft
* record mapped, you can specify @ctx as NULL and ntfs_attr_find_vcn_nolock()
* will perform the necessary mapping and unmapping.
*
* Note, ntfs_attr_find_vcn_nolock() saves the state of @ctx on entry and
* restores it before returning. Thus, @ctx will be left pointing to the same
* attribute on return as on entry. However, the actual pointers in @ctx may
* point to different memory locations on return, so you must remember to reset
* any cached pointers from the @ctx, i.e. after the call to
* ntfs_attr_find_vcn_nolock(), you will probably want to do:
* m = ctx->mrec;
* a = ctx->attr;
* Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
* you cache ctx->mrec in a variable @m of type MFT_RECORD *.
* Note you need to distinguish between the lcn of the returned runlist element
* being >= 0 and LCN_HOLE. In the later case you have to return zeroes on
* read and allocate clusters on write.
*
* Return the runlist element containing the @vcn on success and
* ERR_PTR(-errno) on error. You need to test the return value with IS_ERR()
* to decide if the return is success or failure and PTR_ERR() to get to the
* error code if IS_ERR() is true.
*
* The possible error return codes are:
* -ENOENT - No such vcn in the runlist, i.e. @vcn is out of bounds.
* -ENOMEM - Not enough memory to map runlist.
* -EIO - Critical error (runlist/file is corrupt, i/o error, etc).
*
* WARNING: If @ctx is supplied, regardless of whether success or failure is
* returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
* is no longer valid, i.e. you need to either call
* ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
* In that case PTR_ERR(@ctx->mrec) will give you the error code for
* why the mapping of the old inode failed.
*
* Locking: - The runlist described by @ni must be locked for writing on entry
* and is locked on return. Note the runlist may be modified when
* needed runlist fragments need to be mapped.
* - If @ctx is NULL, the base mft record of @ni must not be mapped on
* entry and it will be left unmapped on return.
* - If @ctx is not NULL, the base mft record must be mapped on entry
* and it will be left mapped on return.
*/
runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
ntfs_attr_search_ctx *ctx)
{
unsigned long flags;
runlist_element *rl;
int err = 0;
bool is_retry = false;
BUG_ON(!ni);
ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.",
ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out");
BUG_ON(!NInoNonResident(ni));
BUG_ON(vcn < 0);
if (!ni->runlist.rl) {
read_lock_irqsave(&ni->size_lock, flags);
if (!ni->allocated_size) {
read_unlock_irqrestore(&ni->size_lock, flags);
return ERR_PTR(-ENOENT);
}
read_unlock_irqrestore(&ni->size_lock, flags);
}
retry_remap:
rl = ni->runlist.rl;
if (likely(rl && vcn >= rl[0].vcn)) {
while (likely(rl->length)) {
if (unlikely(vcn < rl[1].vcn)) {
if (likely(rl->lcn >= LCN_HOLE)) {
ntfs_debug("Done.");
return rl;
}
break;
}
rl++;
}
if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) {
if (likely(rl->lcn == LCN_ENOENT))
err = -ENOENT;
else
err = -EIO;
}
}
if (!err && !is_retry) {
/*
* If the search context is invalid we cannot map the unmapped
* region.
*/
if (IS_ERR(ctx->mrec))
err = PTR_ERR(ctx->mrec);
else {
/*
* The @vcn is in an unmapped region, map the runlist
* and retry.
*/
err = ntfs_map_runlist_nolock(ni, vcn, ctx);
if (likely(!err)) {
is_retry = true;
goto retry_remap;
}
}
if (err == -EINVAL)
err = -EIO;
} else if (!err)
err = -EIO;
if (err != -ENOENT)
ntfs_error(ni->vol->sb, "Failed with error code %i.", err);
return ERR_PTR(err);
}
/**
* ntfs_attr_find - find (next) attribute in mft record
* @type: attribute type to find
* @name: attribute name to find (optional, i.e. NULL means don't care)
* @name_len: attribute name length (only needed if @name present)
* @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
* @val: attribute value to find (optional, resident attributes only)
* @val_len: attribute value length
* @ctx: search context with mft record and attribute to search from
*
* You should not need to call this function directly. Use ntfs_attr_lookup()
* instead.
*
* ntfs_attr_find() takes a search context @ctx as parameter and searches the
* mft record specified by @ctx->mrec, beginning at @ctx->attr, for an
* attribute of @type, optionally @name and @val.
*
* If the attribute is found, ntfs_attr_find() returns 0 and @ctx->attr will
* point to the found attribute.
*
* If the attribute is not found, ntfs_attr_find() returns -ENOENT and
* @ctx->attr will point to the attribute before which the attribute being
* searched for would need to be inserted if such an action were to be desired.
*
* On actual error, ntfs_attr_find() returns -EIO. In this case @ctx->attr is
* undefined and in particular do not rely on it not changing.
*
* If @ctx->is_first is 'true', the search begins with @ctx->attr itself. If it
* is 'false', the search begins after @ctx->attr.
*
* If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and
* @ctx->ntfs_ino must be set to the ntfs inode to which the mft record
* @ctx->mrec belongs. This is so we can get at the ntfs volume and hence at
* the upcase table. If @ic is CASE_SENSITIVE, the comparison is case
* sensitive. When @name is present, @name_len is the @name length in Unicode
* characters.
*
* If @name is not present (NULL), we assume that the unnamed attribute is
* being searched for.
*
* Finally, the resident attribute value @val is looked for, if present. If
* @val is not present (NULL), @val_len is ignored.
*
* ntfs_attr_find() only searches the specified mft record and it ignores the
* presence of an attribute list attribute (unless it is the one being searched
* for, obviously). If you need to take attribute lists into consideration,
* use ntfs_attr_lookup() instead (see below). This also means that you cannot
* use ntfs_attr_find() to search for extent records of non-resident
* attributes, as extents with lowest_vcn != 0 are usually described by the
* attribute list attribute only. - Note that it is possible that the first
* extent is only in the attribute list while the last extent is in the base
* mft record, so do not rely on being able to find the first extent in the
* base mft record.
*
* Warning: Never use @val when looking for attribute types which can be
* non-resident as this most likely will result in a crash!
*/
static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
const u32 name_len, const IGNORE_CASE_BOOL ic,
const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
{
ATTR_RECORD *a;
ntfs_volume *vol = ctx->ntfs_ino->vol;
ntfschar *upcase = vol->upcase;
u32 upcase_len = vol->upcase_len;
/*
* Iterate over attributes in mft record starting at @ctx->attr, or the
* attribute following that, if @ctx->is_first is 'true'.
*/
if (ctx->is_first) {
a = ctx->attr;
ctx->is_first = false;
} else
a = (ATTR_RECORD*)((u8*)ctx->attr +
le32_to_cpu(ctx->attr->length));
for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
le32_to_cpu(ctx->mrec->bytes_allocated))
break;
ctx->attr = a;
if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
a->type == AT_END))
return -ENOENT;
if (unlikely(!a->length))
break;
if (a->type != type)
continue;
/*
* If @name is present, compare the two names. If @name is
* missing, assume we want an unnamed attribute.
*/
if (!name) {
/* The search failed if the found attribute is named. */
if (a->name_length)
return -ENOENT;
} else if (!ntfs_are_names_equal(name, name_len,
(ntfschar*)((u8*)a + le16_to_cpu(a->name_offset)),
a->name_length, ic, upcase, upcase_len)) {
register int rc;
rc = ntfs_collate_names(name, name_len,
(ntfschar*)((u8*)a +
le16_to_cpu(a->name_offset)),
a->name_length, 1, IGNORE_CASE,
upcase, upcase_len);
/*
* If @name collates before a->name, there is no
* matching attribute.
*/
if (rc == -1)
return -ENOENT;
/* If the strings are not equal, continue search. */
if (rc)
continue;
rc = ntfs_collate_names(name, name_len,
(ntfschar*)((u8*)a +
le16_to_cpu(a->name_offset)),
a->name_length, 1, CASE_SENSITIVE,
upcase, upcase_len);
if (rc == -1)
return -ENOENT;
if (rc)
continue;
}
/*
* The names match or @name not present and attribute is
* unnamed. If no @val specified, we have found the attribute
* and are done.
*/
if (!val)
return 0;
/* @val is present; compare values. */
else {
register int rc;
rc = memcmp(val, (u8*)a + le16_to_cpu(
a->data.resident.value_offset),
min_t(u32, val_len, le32_to_cpu(
a->data.resident.value_length)));
/*
* If @val collates before the current attribute's
* value, there is no matching attribute.
*/
if (!rc) {
register u32 avl;
avl = le32_to_cpu(
a->data.resident.value_length);
if (val_len == avl)
return 0;
if (val_len < avl)
return -ENOENT;
} else if (rc < 0)
return -ENOENT;
}
}
ntfs_error(vol->sb, "Inode is corrupt. Run chkdsk.");
NVolSetErrors(vol);
return -EIO;
}
/**
* load_attribute_list - load an attribute list into memory
* @vol: ntfs volume from which to read
* @runlist: runlist of the attribute list
* @al_start: destination buffer
* @size: size of the destination buffer in bytes
* @initialized_size: initialized size of the attribute list
*
* Walk the runlist @runlist and load all clusters from it copying them into
* the linear buffer @al. The maximum number of bytes copied to @al is @size
* bytes. Note, @size does not need to be a multiple of the cluster size. If
* @initialized_size is less than @size, the region in @al between
* @initialized_size and @size will be zeroed and not read from disk.
*
* Return 0 on success or -errno on error.
*/
int load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start,
const s64 size, const s64 initialized_size)
{
LCN lcn;
u8 *al = al_start;
u8 *al_end = al + initialized_size;
runlist_element *rl;
struct buffer_head *bh;
struct super_block *sb;
unsigned long block_size;
unsigned long block, max_block;
int err = 0;
unsigned char block_size_bits;
ntfs_debug("Entering.");
if (!vol || !runlist || !al || size <= 0 || initialized_size < 0 ||
initialized_size > size)
return -EINVAL;
if (!initialized_size) {
memset(al, 0, size);
return 0;
}
sb = vol->sb;
block_size = sb->s_blocksize;
block_size_bits = sb->s_blocksize_bits;
down_read(&runlist->lock);
rl = runlist->rl;
if (!rl) {
ntfs_error(sb, "Cannot read attribute list since runlist is "
"missing.");
goto err_out;
}
/* Read all clusters specified by the runlist one run at a time. */
while (rl->length) {
lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn);
ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
(unsigned long long)rl->vcn,
(unsigned long long)lcn);
/* The attribute list cannot be sparse. */
if (lcn < 0) {
ntfs_error(sb, "ntfs_rl_vcn_to_lcn() failed. Cannot "
"read attribute list.");
goto err_out;
}
block = lcn << vol->cluster_size_bits >> block_size_bits;
/* Read the run from device in chunks of block_size bytes. */
max_block = block + (rl->length << vol->cluster_size_bits >>
block_size_bits);
ntfs_debug("max_block = 0x%lx.", max_block);
do {
ntfs_debug("Reading block = 0x%lx.", block);
bh = sb_bread(sb, block);
if (!bh) {
ntfs_error(sb, "sb_bread() failed. Cannot "
"read attribute list.");
goto err_out;
}
if (al + block_size >= al_end)
goto do_final;
memcpy(al, bh->b_data, block_size);
brelse(bh);
al += block_size;
} while (++block < max_block);
rl++;
}
if (initialized_size < size) {
initialize:
memset(al_start + initialized_size, 0, size - initialized_size);
}
done:
up_read(&runlist->lock);
return err;
do_final:
if (al < al_end) {
/*
* Partial block.
*
* Note: The attribute list can be smaller than its allocation
* by multiple clusters. This has been encountered by at least
* two people running Windows XP, thus we cannot do any
* truncation sanity checking here. (AIA)
*/
memcpy(al, bh->b_data, al_end - al);
brelse(bh);
if (initialized_size < size)
goto initialize;
goto done;
}
brelse(bh);
/* Real overflow! */
ntfs_error(sb, "Attribute list buffer overflow. Read attribute list "
"is truncated.");
err_out:
err = -EIO;
goto done;
}
/**
* ntfs_external_attr_find - find an attribute in the attribute list of an inode
* @type: attribute type to find
* @name: attribute name to find (optional, i.e. NULL means don't care)
* @name_len: attribute name length (only needed if @name present)
* @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
* @lowest_vcn: lowest vcn to find (optional, non-resident attributes only)
* @val: attribute value to find (optional, resident attributes only)
* @val_len: attribute value length
* @ctx: search context with mft record and attribute to search from
*
* You should not need to call this function directly. Use ntfs_attr_lookup()
* instead.
*
* Find an attribute by searching the attribute list for the corresponding
* attribute list entry. Having found the entry, map the mft record if the
* attribute is in a different mft record/inode, ntfs_attr_find() the attribute
* in there and return it.
*
* On first search @ctx->ntfs_ino must be the base mft record and @ctx must
* have been obtained from a call to ntfs_attr_get_search_ctx(). On subsequent
* calls @ctx->ntfs_ino can be any extent inode, too (@ctx->base_ntfs_ino is
* then the base inode).
*
* After finishing with the attribute/mft record you need to call
* ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
* mapped inodes, etc).
*
* If the attribute is found, ntfs_external_attr_find() returns 0 and
* @ctx->attr will point to the found attribute. @ctx->mrec will point to the
* mft record in which @ctx->attr is located and @ctx->al_entry will point to
* the attribute list entry for the attribute.
*
* If the attribute is not found, ntfs_external_attr_find() returns -ENOENT and
* @ctx->attr will point to the attribute in the base mft record before which
* the attribute being searched for would need to be inserted if such an action
* were to be desired. @ctx->mrec will point to the mft record in which
* @ctx->attr is located and @ctx->al_entry will point to the attribute list
* entry of the attribute before which the attribute being searched for would
* need to be inserted if such an action were to be desired.
*
* Thus to insert the not found attribute, one wants to add the attribute to
* @ctx->mrec (the base mft record) and if there is not enough space, the
* attribute should be placed in a newly allocated extent mft record. The
* attribute list entry for the inserted attribute should be inserted in the
* attribute list attribute at @ctx->al_entry.
*
* On actual error, ntfs_external_attr_find() returns -EIO. In this case
* @ctx->attr is undefined and in particular do not rely on it not changing.
*/
static int ntfs_external_attr_find(const ATTR_TYPE type,
const ntfschar *name, const u32 name_len,
const IGNORE_CASE_BOOL ic, const VCN lowest_vcn,
const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
{
ntfs_inode *base_ni, *ni;
ntfs_volume *vol;
ATTR_LIST_ENTRY *al_entry, *next_al_entry;
u8 *al_start, *al_end;
ATTR_RECORD *a;
ntfschar *al_name;
u32 al_name_len;
int err = 0;
static const char *es = " Unmount and run chkdsk.";
ni = ctx->ntfs_ino;
base_ni = ctx->base_ntfs_ino;
ntfs_debug("Entering for inode 0x%lx, type 0x%x.", ni->mft_no, type);
if (!base_ni) {
/* First call happens with the base mft record. */
base_ni = ctx->base_ntfs_ino = ctx->ntfs_ino;
ctx->base_mrec = ctx->mrec;
}
if (ni == base_ni)
ctx->base_attr = ctx->attr;
if (type == AT_END)
goto not_found;
vol = base_ni->vol;
al_start = base_ni->attr_list;
al_end = al_start + base_ni->attr_list_size;
if (!ctx->al_entry)
ctx->al_entry = (ATTR_LIST_ENTRY*)al_start;
/*
* Iterate over entries in attribute list starting at @ctx->al_entry,
* or the entry following that, if @ctx->is_first is 'true'.
*/
if (ctx->is_first) {
al_entry = ctx->al_entry;
ctx->is_first = false;
} else
al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry +
le16_to_cpu(ctx->al_entry->length));
for (;; al_entry = next_al_entry) {
/* Out of bounds check. */
if ((u8*)al_entry < base_ni->attr_list ||
(u8*)al_entry > al_end)
break; /* Inode is corrupt. */
ctx->al_entry = al_entry;
/* Catch the end of the attribute list. */
if ((u8*)al_entry == al_end)
goto not_found;
if (!al_entry->length)
break;
if ((u8*)al_entry + 6 > al_end || (u8*)al_entry +
le16_to_cpu(al_entry->length) > al_end)
break;
next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
le16_to_cpu(al_entry->length));
if (le32_to_cpu(al_entry->type) > le32_to_cpu(type))
goto not_found;
if (type != al_entry->type)
continue;
/*
* If @name is present, compare the two names. If @name is
* missing, assume we want an unnamed attribute.
*/
al_name_len = al_entry->name_length;
al_name = (ntfschar*)((u8*)al_entry + al_entry->name_offset);
if (!name) {
if (al_name_len)
goto not_found;
} else if (!ntfs_are_names_equal(al_name, al_name_len, name,
name_len, ic, vol->upcase, vol->upcase_len)) {
register int rc;
rc = ntfs_collate_names(name, name_len, al_name,
al_name_len, 1, IGNORE_CASE,
vol->upcase, vol->upcase_len);
/*
* If @name collates before al_name, there is no
* matching attribute.
*/
if (rc == -1)
goto not_found;
/* If the strings are not equal, continue search. */
if (rc)
continue;
/*
* FIXME: Reverse engineering showed 0, IGNORE_CASE but
* that is inconsistent with ntfs_attr_find(). The
* subsequent rc checks were also different. Perhaps I
* made a mistake in one of the two. Need to recheck
* which is correct or at least see what is going on...
* (AIA)
*/
rc = ntfs_collate_names(name, name_len, al_name,
al_name_len, 1, CASE_SENSITIVE,
vol->upcase, vol->upcase_len);
if (rc == -1)
goto not_found;
if (rc)
continue;
}
/*
* The names match or @name not present and attribute is
* unnamed. Now check @lowest_vcn. Continue search if the
* next attribute list entry still fits @lowest_vcn. Otherwise
* we have reached the right one or the search has failed.
*/
if (lowest_vcn && (u8*)next_al_entry >= al_start &&
(u8*)next_al_entry + 6 < al_end &&
(u8*)next_al_entry + le16_to_cpu(
next_al_entry->length) <= al_end &&
sle64_to_cpu(next_al_entry->lowest_vcn) <=
lowest_vcn &&
next_al_entry->type == al_entry->type &&
next_al_entry->name_length == al_name_len &&
ntfs_are_names_equal((ntfschar*)((u8*)
next_al_entry +
next_al_entry->name_offset),
next_al_entry->name_length,
al_name, al_name_len, CASE_SENSITIVE,
vol->upcase, vol->upcase_len))
continue;
if (MREF_LE(al_entry->mft_reference) == ni->mft_no) {
if (MSEQNO_LE(al_entry->mft_reference) != ni->seq_no) {
ntfs_error(vol->sb, "Found stale mft "
"reference in attribute list "
"of base inode 0x%lx.%s",
base_ni->mft_no, es);
err = -EIO;
break;
}
} else { /* Mft references do not match. */
/* If there is a mapped record unmap it first. */
if (ni != base_ni)
unmap_extent_mft_record(ni);
/* Do we want the base record back? */
if (MREF_LE(al_entry->mft_reference) ==
base_ni->mft_no) {
ni = ctx->ntfs_ino = base_ni;
ctx->mrec = ctx->base_mrec;
} else {
/* We want an extent record. */
ctx->mrec = map_extent_mft_record(base_ni,
le64_to_cpu(
al_entry->mft_reference), &ni);
if (IS_ERR(ctx->mrec)) {
ntfs_error(vol->sb, "Failed to map "
"extent mft record "
"0x%lx of base inode "
"0x%lx.%s",
MREF_LE(al_entry->
mft_reference),
base_ni->mft_no, es);
err = PTR_ERR(ctx->mrec);
if (err == -ENOENT)
err = -EIO;
/* Cause @ctx to be sanitized below. */
ni = NULL;
break;
}
ctx->ntfs_ino = ni;
}
ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
le16_to_cpu(ctx->mrec->attrs_offset));
}
/*
* ctx->vfs_ino, ctx->mrec, and ctx->attr now point to the
* mft record containing the attribute represented by the
* current al_entry.
*/
/*
* We could call into ntfs_attr_find() to find the right
* attribute in this mft record but this would be less
* efficient and not quite accurate as ntfs_attr_find() ignores
* the attribute instance numbers for example which become
* important when one plays with attribute lists. Also,
* because a proper match has been found in the attribute list
* entry above, the comparison can now be optimized. So it is
* worth re-implementing a simplified ntfs_attr_find() here.
*/
a = ctx->attr;
/*
* Use a manual loop so we can still use break and continue
* with the same meanings as above.
*/
do_next_attr_loop:
if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
le32_to_cpu(ctx->mrec->bytes_allocated))
break;
if (a->type == AT_END)
break;
if (!a->length)
break;
if (al_entry->instance != a->instance)
goto do_next_attr;
/*
* If the type and/or the name are mismatched between the
* attribute list entry and the attribute record, there is
* corruption so we break and return error EIO.
*/
if (al_entry->type != a->type)
break;
if (!ntfs_are_names_equal((ntfschar*)((u8*)a +
le16_to_cpu(a->name_offset)), a->name_length,
al_name, al_name_len, CASE_SENSITIVE,
vol->upcase, vol->upcase_len))
break;
ctx->attr = a;
/*
* If no @val specified or @val specified and it matches, we
* have found it!
*/
if (!val || (!a->non_resident && le32_to_cpu(
a->data.resident.value_length) == val_len &&
!memcmp((u8*)a +
le16_to_cpu(a->data.resident.value_offset),
val, val_len))) {
ntfs_debug("Done, found.");
return 0;
}
do_next_attr:
/* Proceed to the next attribute in the current mft record. */
a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length));
goto do_next_attr_loop;
}
if (!err) {
ntfs_error(vol->sb, "Base inode 0x%lx contains corrupt "
"attribute list attribute.%s", base_ni->mft_no,
es);
err = -EIO;
}
if (ni != base_ni) {
if (ni)
unmap_extent_mft_record(ni);
ctx->ntfs_ino = base_ni;
ctx->mrec = ctx->base_mrec;
ctx->attr = ctx->base_attr;
}
if (err != -ENOMEM)
NVolSetErrors(vol);
return err;
not_found:
/*
* If we were looking for AT_END, we reset the search context @ctx and
* use ntfs_attr_find() to seek to the end of the base mft record.
*/
if (type == AT_END) {
ntfs_attr_reinit_search_ctx(ctx);
return ntfs_attr_find(AT_END, name, name_len, ic, val, val_len,
ctx);
}
/*
* The attribute was not found. Before we return, we want to ensure
* @ctx->mrec and @ctx->attr indicate the position at which the
* attribute should be inserted in the base mft record. Since we also
* want to preserve @ctx->al_entry we cannot reinitialize the search
* context using ntfs_attr_reinit_search_ctx() as this would set
* @ctx->al_entry to NULL. Thus we do the necessary bits manually (see
* ntfs_attr_init_search_ctx() below). Note, we _only_ preserve
* @ctx->al_entry as the remaining fields (base_*) are identical to
* their non base_ counterparts and we cannot set @ctx->base_attr
* correctly yet as we do not know what @ctx->attr will be set to by
* the call to ntfs_attr_find() below.
*/
if (ni != base_ni)
unmap_extent_mft_record(ni);
ctx->mrec = ctx->base_mrec;
ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
le16_to_cpu(ctx->mrec->attrs_offset));
ctx->is_first = true;
ctx->ntfs_ino = base_ni;
ctx->base_ntfs_ino = NULL;
ctx->base_mrec = NULL;
ctx->base_attr = NULL;
/*
* In case there are multiple matches in the base mft record, need to
* keep enumerating until we get an attribute not found response (or
* another error), otherwise we would keep returning the same attribute
* over and over again and all programs using us for enumeration would
* lock up in a tight loop.
*/
do {
err = ntfs_attr_find(type, name, name_len, ic, val, val_len,
ctx);
} while (!err);
ntfs_debug("Done, not found.");
return err;
}
/**
* ntfs_attr_lookup - find an attribute in an ntfs inode
* @type: attribute type to find
* @name: attribute name to find (optional, i.e. NULL means don't care)
* @name_len: attribute name length (only needed if @name present)
* @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
* @lowest_vcn: lowest vcn to find (optional, non-resident attributes only)
* @val: attribute value to find (optional, resident attributes only)
* @val_len: attribute value length
* @ctx: search context with mft record and attribute to search from
*
* Find an attribute in an ntfs inode. On first search @ctx->ntfs_ino must
* be the base mft record and @ctx must have been obtained from a call to
* ntfs_attr_get_search_ctx().
*
* This function transparently handles attribute lists and @ctx is used to
* continue searches where they were left off at.
*
* After finishing with the attribute/mft record you need to call
* ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
* mapped inodes, etc).
*
* Return 0 if the search was successful and -errno if not.
*
* When 0, @ctx->attr is the found attribute and it is in mft record
* @ctx->mrec. If an attribute list attribute is present, @ctx->al_entry is
* the attribute list entry of the found attribute.
*
* When -ENOENT, @ctx->attr is the attribute which collates just after the
* attribute being searched for, i.e. if one wants to add the attribute to the
* mft record this is the correct place to insert it into. If an attribute
* list attribute is present, @ctx->al_entry is the attribute list entry which
* collates just after the attribute list entry of the attribute being searched
* for, i.e. if one wants to add the attribute to the mft record this is the
* correct place to insert its attribute list entry into.
*
* When -errno != -ENOENT, an error occurred during the lookup. @ctx->attr is
* then undefined and in particular you should not rely on it not changing.
*/
int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
const u32 name_len, const IGNORE_CASE_BOOL ic,
const VCN lowest_vcn, const u8 *val, const u32 val_len,
ntfs_attr_search_ctx *ctx)
{
ntfs_inode *base_ni;
ntfs_debug("Entering.");
BUG_ON(IS_ERR(ctx->mrec));
if (ctx->base_ntfs_ino)
base_ni = ctx->base_ntfs_ino;
else
base_ni = ctx->ntfs_ino;
/* Sanity check, just for debugging really. */
BUG_ON(!base_ni);
if (!NInoAttrList(base_ni) || type == AT_ATTRIBUTE_LIST)
return ntfs_attr_find(type, name, name_len, ic, val, val_len,
ctx);
return ntfs_external_attr_find(type, name, name_len, ic, lowest_vcn,
val, val_len, ctx);
}
/**
* ntfs_attr_init_search_ctx - initialize an attribute search context
* @ctx: attribute search context to initialize
* @ni: ntfs inode with which to initialize the search context
* @mrec: mft record with which to initialize the search context
*
* Initialize the attribute search context @ctx with @ni and @mrec.
*/
static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx,
ntfs_inode *ni, MFT_RECORD *mrec)
{
*ctx = (ntfs_attr_search_ctx) {
.mrec = mrec,
/* Sanity checks are performed elsewhere. */
.attr = (ATTR_RECORD*)((u8*)mrec +
le16_to_cpu(mrec->attrs_offset)),
.is_first = true,
.ntfs_ino = ni,
};
}
/**
* ntfs_attr_reinit_search_ctx - reinitialize an attribute search context
* @ctx: attribute search context to reinitialize
*
* Reinitialize the attribute search context @ctx, unmapping an associated
* extent mft record if present, and initialize the search context again.
*
* This is used when a search for a new attribute is being started to reset
* the search context to the beginning.
*/
void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx)
{
if (likely(!ctx->base_ntfs_ino)) {
/* No attribute list. */
ctx->is_first = true;
/* Sanity checks are performed elsewhere. */
ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
le16_to_cpu(ctx->mrec->attrs_offset));
/*
* This needs resetting due to ntfs_external_attr_find() which
* can leave it set despite having zeroed ctx->base_ntfs_ino.
*/
ctx->al_entry = NULL;
return;
} /* Attribute list. */
if (ctx->ntfs_ino != ctx->base_ntfs_ino)
unmap_extent_mft_record(ctx->ntfs_ino);
ntfs_attr_init_search_ctx(ctx, ctx->base_ntfs_ino, ctx->base_mrec);
return;
}
/**
* ntfs_attr_get_search_ctx - allocate/initialize a new attribute search context
* @ni: ntfs inode with which to initialize the search context
* @mrec: mft record with which to initialize the search context
*
* Allocate a new attribute search context, initialize it with @ni and @mrec,
* and return it. Return NULL if allocation failed.
*/
ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec)
{
ntfs_attr_search_ctx *ctx;
ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS);
if (ctx)
ntfs_attr_init_search_ctx(ctx, ni, mrec);
return ctx;
}
/**
* ntfs_attr_put_search_ctx - release an attribute search context
* @ctx: attribute search context to free
*
* Release the attribute search context @ctx, unmapping an associated extent
* mft record if present.
*/
void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx)
{
if (ctx->base_ntfs_ino && ctx->ntfs_ino != ctx->base_ntfs_ino)
unmap_extent_mft_record(ctx->ntfs_ino);
kmem_cache_free(ntfs_attr_ctx_cache, ctx);
return;
}
#ifdef NTFS_RW
/**
* ntfs_attr_find_in_attrdef - find an attribute in the $AttrDef system file
* @vol: ntfs volume to which the attribute belongs
* @type: attribute type which to find
*
* Search for the attribute definition record corresponding to the attribute
* @type in the $AttrDef system file.
*
* Return the attribute type definition record if found and NULL if not found.
*/
static ATTR_DEF *ntfs_attr_find_in_attrdef(const ntfs_volume *vol,
const ATTR_TYPE type)
{
ATTR_DEF *ad;
BUG_ON(!vol->attrdef);
BUG_ON(!type);
for (ad = vol->attrdef; (u8*)ad - (u8*)vol->attrdef <
vol->attrdef_size && ad->type; ++ad) {
/* We have not found it yet, carry on searching. */
if (likely(le32_to_cpu(ad->type) < le32_to_cpu(type)))
continue;
/* We found the attribute; return it. */
if (likely(ad->type == type))
return ad;
/* We have gone too far already. No point in continuing. */
break;
}
/* Attribute not found. */
ntfs_debug("Attribute type 0x%x not found in $AttrDef.",
le32_to_cpu(type));
return NULL;
}
/**
* ntfs_attr_size_bounds_check - check a size of an attribute type for validity
* @vol: ntfs volume to which the attribute belongs
* @type: attribute type which to check
* @size: size which to check
*
* Check whether the @size in bytes is valid for an attribute of @type on the
* ntfs volume @vol. This information is obtained from $AttrDef system file.
*
* Return 0 if valid, -ERANGE if not valid, or -ENOENT if the attribute is not
* listed in $AttrDef.
*/
int ntfs_attr_size_bounds_check(const ntfs_volume *vol, const ATTR_TYPE type,
const s64 size)
{
ATTR_DEF *ad;
BUG_ON(size < 0);
/*
* $ATTRIBUTE_LIST has a maximum size of 256kiB, but this is not
* listed in $AttrDef.
*/
if (unlikely(type == AT_ATTRIBUTE_LIST && size > 256 * 1024))
return -ERANGE;
/* Get the $AttrDef entry for the attribute @type. */
ad = ntfs_attr_find_in_attrdef(vol, type);
if (unlikely(!ad))
return -ENOENT;
/* Do the bounds check. */
if (((sle64_to_cpu(ad->min_size) > 0) &&
size < sle64_to_cpu(ad->min_size)) ||
((sle64_to_cpu(ad->max_size) > 0) && size >
sle64_to_cpu(ad->max_size)))
return -ERANGE;
return 0;
}
/**
* ntfs_attr_can_be_non_resident - check if an attribute can be non-resident
* @vol: ntfs volume to which the attribute belongs
* @type: attribute type which to check
*
* Check whether the attribute of @type on the ntfs volume @vol is allowed to
* be non-resident. This information is obtained from $AttrDef system file.
*
* Return 0 if the attribute is allowed to be non-resident, -EPERM if not, and
* -ENOENT if the attribute is not listed in $AttrDef.
*/
int ntfs_attr_can_be_non_resident(const ntfs_volume *vol, const ATTR_TYPE type)
{
ATTR_DEF *ad;
/* Find the attribute definition record in $AttrDef. */
ad = ntfs_attr_find_in_attrdef(vol, type);
if (unlikely(!ad))
return -ENOENT;
/* Check the flags and return the result. */
if (ad->flags & ATTR_DEF_RESIDENT)
return -EPERM;
return 0;
}
/**
* ntfs_attr_can_be_resident - check if an attribute can be resident
* @vol: ntfs volume to which the attribute belongs
* @type: attribute type which to check
*
* Check whether the attribute of @type on the ntfs volume @vol is allowed to
* be resident. This information is derived from our ntfs knowledge and may
* not be completely accurate, especially when user defined attributes are
* present. Basically we allow everything to be resident except for index
* allocation and $EA attributes.
*
* Return 0 if the attribute is allowed to be non-resident and -EPERM if not.
*
* Warning: In the system file $MFT the attribute $Bitmap must be non-resident
* otherwise windows will not boot (blue screen of death)! We cannot
* check for this here as we do not know which inode's $Bitmap is
* being asked about so the caller needs to special case this.
*/
int ntfs_attr_can_be_resident(const ntfs_volume *vol, const ATTR_TYPE type)
{
if (type == AT_INDEX_ALLOCATION)
return -EPERM;
return 0;
}
/**
* ntfs_attr_record_resize - resize an attribute record
* @m: mft record containing attribute record
* @a: attribute record to resize
* @new_size: new size in bytes to which to resize the attribute record @a
*
* Resize the attribute record @a, i.e. the resident part of the attribute, in
* the mft record @m to @new_size bytes.
*
* Return 0 on success and -errno on error. The following error codes are
* defined:
* -ENOSPC - Not enough space in the mft record @m to perform the resize.
*
* Note: On error, no modifications have been performed whatsoever.
*
* Warning: If you make a record smaller without having copied all the data you
* are interested in the data may be overwritten.
*/
int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size)
{
ntfs_debug("Entering for new_size %u.", new_size);
/* Align to 8 bytes if it is not already done. */
if (new_size & 7)
new_size = (new_size + 7) & ~7;
/* If the actual attribute length has changed, move things around. */
if (new_size != le32_to_cpu(a->length)) {
u32 new_muse = le32_to_cpu(m->bytes_in_use) -
le32_to_cpu(a->length) + new_size;
/* Not enough space in this mft record. */
if (new_muse > le32_to_cpu(m->bytes_allocated))
return -ENOSPC;
/* Move attributes following @a to their new location. */
memmove((u8*)a + new_size, (u8*)a + le32_to_cpu(a->length),
le32_to_cpu(m->bytes_in_use) - ((u8*)a -
(u8*)m) - le32_to_cpu(a->length));
/* Adjust @m to reflect the change in used space. */
m->bytes_in_use = cpu_to_le32(new_muse);
/* Adjust @a to reflect the new size. */
if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length))
a->length = cpu_to_le32(new_size);
}
return 0;
}
/**
* ntfs_resident_attr_value_resize - resize the value of a resident attribute
* @m: mft record containing attribute record
* @a: attribute record whose value to resize
* @new_size: new size in bytes to which to resize the attribute value of @a
*
* Resize the value of the attribute @a in the mft record @m to @new_size bytes.
* If the value is made bigger, the newly allocated space is cleared.
*
* Return 0 on success and -errno on error. The following error codes are
* defined:
* -ENOSPC - Not enough space in the mft record @m to perform the resize.
*
* Note: On error, no modifications have been performed whatsoever.
*
* Warning: If you make a record smaller without having copied all the data you
* are interested in the data may be overwritten.
*/
int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
const u32 new_size)
{
u32 old_size;
/* Resize the resident part of the attribute record. */
if (ntfs_attr_record_resize(m, a,
le16_to_cpu(a->data.resident.value_offset) + new_size))
return -ENOSPC;
/*
* The resize succeeded! If we made the attribute value bigger, clear
* the area between the old size and @new_size.
*/
old_size = le32_to_cpu(a->data.resident.value_length);
if (new_size > old_size)
memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
old_size, 0, new_size - old_size);
/* Finally update the length of the attribute value. */
a->data.resident.value_length = cpu_to_le32(new_size);
return 0;
}
/**
* ntfs_attr_make_non_resident - convert a resident to a non-resident attribute
* @ni: ntfs inode describing the attribute to convert
* @data_size: size of the resident data to copy to the non-resident attribute
*
* Convert the resident ntfs attribute described by the ntfs inode @ni to a
* non-resident one.
*
* @data_size must be equal to the attribute value size. This is needed since
* we need to know the size before we can map the mft record and our callers
* always know it. The reason we cannot simply read the size from the vfs
* inode i_size is that this is not necessarily uptodate. This happens when
* ntfs_attr_make_non_resident() is called in the ->truncate call path(s).
*
* Return 0 on success and -errno on error. The following error return codes
* are defined:
* -EPERM - The attribute is not allowed to be non-resident.
* -ENOMEM - Not enough memory.
* -ENOSPC - Not enough disk space.
* -EINVAL - Attribute not defined on the volume.
* -EIO - I/o error or other error.
* Note that -ENOSPC is also returned in the case that there is not enough
* space in the mft record to do the conversion. This can happen when the mft
* record is already very full. The caller is responsible for trying to make
* space in the mft record and trying again. FIXME: Do we need a separate
* error return code for this kind of -ENOSPC or is it always worth trying
* again in case the attribute may then fit in a resident state so no need to
* make it non-resident at all? Ho-hum... (AIA)
*
* NOTE to self: No changes in the attribute list are required to move from
* a resident to a non-resident attribute.
*
* Locking: - The caller must hold i_mutex on the inode.
*/
int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
{
s64 new_size;
struct inode *vi = VFS_I(ni);
ntfs_volume *vol = ni->vol;
ntfs_inode *base_ni;
MFT_RECORD *m;
ATTR_RECORD *a;
ntfs_attr_search_ctx *ctx;
struct page *page;
runlist_element *rl;
u8 *kaddr;
unsigned long flags;
int mp_size, mp_ofs, name_ofs, arec_size, err, err2;
u32 attr_size;
u8 old_res_attr_flags;
/* Check that the attribute is allowed to be non-resident. */
err = ntfs_attr_can_be_non_resident(vol, ni->type);
if (unlikely(err)) {
if (err == -EPERM)
ntfs_debug("Attribute is not allowed to be "
"non-resident.");
else
ntfs_debug("Attribute not defined on the NTFS "
"volume!");
return err;
}
/*
* FIXME: Compressed and encrypted attributes are not supported when
* writing and we should never have gotten here for them.
*/
BUG_ON(NInoCompressed(ni));
BUG_ON(NInoEncrypted(ni));
/*
* The size needs to be aligned to a cluster boundary for allocation
* purposes.
*/
new_size = (data_size + vol->cluster_size - 1) &
~(vol->cluster_size - 1);
if (new_size > 0) {
/*
* Will need the page later and since the page lock nests
* outside all ntfs locks, we need to get the page now.
*/
page = find_or_create_page(vi->i_mapping, 0,
mapping_gfp_mask(vi->i_mapping));
if (unlikely(!page))
return -ENOMEM;
/* Start by allocating clusters to hold the attribute value. */
rl = ntfs_cluster_alloc(vol, 0, new_size >>
vol->cluster_size_bits, -1, DATA_ZONE, true);
if (IS_ERR(rl)) {
err = PTR_ERR(rl);
ntfs_debug("Failed to allocate cluster%s, error code "
"%i.", (new_size >>
vol->cluster_size_bits) > 1 ? "s" : "",
err);
goto page_err_out;
}
} else {
rl = NULL;
page = NULL;
}
/* Determine the size of the mapping pairs array. */
mp_size = ntfs_get_size_for_mapping_pairs(vol, rl, 0, -1);
if (unlikely(mp_size < 0)) {
err = mp_size;
ntfs_debug("Failed to get size for mapping pairs array, error "
"code %i.", err);
goto rl_err_out;
}
down_write(&ni->runlist.lock);
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
ctx = NULL;
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
m = ctx->mrec;
a = ctx->attr;
BUG_ON(NInoNonResident(ni));
BUG_ON(a->non_resident);
/*
* Calculate new offsets for the name and the mapping pairs array.
*/
if (NInoSparse(ni) || NInoCompressed(ni))
name_ofs = (offsetof(ATTR_REC,
data.non_resident.compressed_size) +
sizeof(a->data.non_resident.compressed_size) +
7) & ~7;
else
name_ofs = (offsetof(ATTR_REC,
data.non_resident.compressed_size) + 7) & ~7;
mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
/*
* Determine the size of the resident part of the now non-resident
* attribute record.
*/
arec_size = (mp_ofs + mp_size + 7) & ~7;
/*
* If the page is not uptodate bring it uptodate by copying from the
* attribute value.
*/
attr_size = le32_to_cpu(a->data.resident.value_length);
BUG_ON(attr_size != data_size);
if (page && !PageUptodate(page)) {
kaddr = kmap_atomic(page);
memcpy(kaddr, (u8*)a +
le16_to_cpu(a->data.resident.value_offset),
attr_size);
memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);
kunmap_atomic(kaddr);
flush_dcache_page(page);
SetPageUptodate(page);
}
/* Backup the attribute flag. */
old_res_attr_flags = a->data.resident.flags;
/* Resize the resident part of the attribute record. */
err = ntfs_attr_record_resize(m, a, arec_size);
if (unlikely(err))
goto err_out;
/*
* Convert the resident part of the attribute record to describe a
* non-resident attribute.
*/
a->non_resident = 1;
/* Move the attribute name if it exists and update the offset. */
if (a->name_length)
memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
a->name_length * sizeof(ntfschar));
a->name_offset = cpu_to_le16(name_ofs);
/* Setup the fields specific to non-resident attributes. */
a->data.non_resident.lowest_vcn = 0;
a->data.non_resident.highest_vcn = cpu_to_sle64((new_size - 1) >>
vol->cluster_size_bits);
a->data.non_resident.mapping_pairs_offset = cpu_to_le16(mp_ofs);
memset(&a->data.non_resident.reserved, 0,
sizeof(a->data.non_resident.reserved));
a->data.non_resident.allocated_size = cpu_to_sle64(new_size);
a->data.non_resident.data_size =
a->data.non_resident.initialized_size =
cpu_to_sle64(attr_size);
if (NInoSparse(ni) || NInoCompressed(ni)) {
a->data.non_resident.compression_unit = 0;
if (NInoCompressed(ni) || vol->major_ver < 3)
a->data.non_resident.compression_unit = 4;
a->data.non_resident.compressed_size =
a->data.non_resident.allocated_size;
} else
a->data.non_resident.compression_unit = 0;
/* Generate the mapping pairs array into the attribute record. */
err = ntfs_mapping_pairs_build(vol, (u8*)a + mp_ofs,
arec_size - mp_ofs, rl, 0, -1, NULL);
if (unlikely(err)) {
ntfs_debug("Failed to build mapping pairs, error code %i.",
err);
goto undo_err_out;
}
/* Setup the in-memory attribute structure to be non-resident. */
ni->runlist.rl = rl;
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = new_size;
if (NInoSparse(ni) || NInoCompressed(ni)) {
ni->itype.compressed.size = ni->allocated_size;
if (a->data.non_resident.compression_unit) {
ni->itype.compressed.block_size = 1U << (a->data.
non_resident.compression_unit +
vol->cluster_size_bits);
ni->itype.compressed.block_size_bits =
ffs(ni->itype.compressed.block_size) -
1;
ni->itype.compressed.block_clusters = 1U <<
a->data.non_resident.compression_unit;
} else {
ni->itype.compressed.block_size = 0;
ni->itype.compressed.block_size_bits = 0;
ni->itype.compressed.block_clusters = 0;
}
vi->i_blocks = ni->itype.compressed.size >> 9;
} else
vi->i_blocks = ni->allocated_size >> 9;
write_unlock_irqrestore(&ni->size_lock, flags);
/*
* This needs to be last since the address space operations ->readpage
* and ->writepage can run concurrently with us as they are not
* serialized on i_mutex. Note, we are not allowed to fail once we flip
* this switch, which is another reason to do this last.
*/
NInoSetNonResident(ni);
/* Mark the mft record dirty, so it gets written back. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
if (page) {
set_page_dirty(page);
unlock_page(page);
mark_page_accessed(page);
page_cache_release(page);
}
ntfs_debug("Done.");
return 0;
undo_err_out:
/* Convert the attribute back into a resident attribute. */
a->non_resident = 0;
/* Move the attribute name if it exists and update the offset. */
name_ofs = (offsetof(ATTR_RECORD, data.resident.reserved) +
sizeof(a->data.resident.reserved) + 7) & ~7;
if (a->name_length)
memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
a->name_length * sizeof(ntfschar));
mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
a->name_offset = cpu_to_le16(name_ofs);
arec_size = (mp_ofs + attr_size + 7) & ~7;
/* Resize the resident part of the attribute record. */
err2 = ntfs_attr_record_resize(m, a, arec_size);
if (unlikely(err2)) {
/*
* This cannot happen (well if memory corruption is at work it
* could happen in theory), but deal with it as well as we can.
* If the old size is too small, truncate the attribute,
* otherwise simply give it a larger allocated size.
* FIXME: Should check whether chkdsk complains when the
* allocated size is much bigger than the resident value size.
*/
arec_size = le32_to_cpu(a->length);
if ((mp_ofs + attr_size) > arec_size) {
err2 = attr_size;
attr_size = arec_size - mp_ofs;
ntfs_error(vol->sb, "Failed to undo partial resident "
"to non-resident attribute "
"conversion. Truncating inode 0x%lx, "
"attribute type 0x%x from %i bytes to "
"%i bytes to maintain metadata "
"consistency. THIS MEANS YOU ARE "
"LOSING %i BYTES DATA FROM THIS %s.",
vi->i_ino,
(unsigned)le32_to_cpu(ni->type),
err2, attr_size, err2 - attr_size,
((ni->type == AT_DATA) &&
!ni->name_len) ? "FILE": "ATTRIBUTE");
write_lock_irqsave(&ni->size_lock, flags);
ni->initialized_size = attr_size;
i_size_write(vi, attr_size);
write_unlock_irqrestore(&ni->size_lock, flags);
}
}
/* Setup the fields specific to resident attributes. */
a->data.resident.value_length = cpu_to_le32(attr_size);
a->data.resident.value_offset = cpu_to_le16(mp_ofs);
a->data.resident.flags = old_res_attr_flags;
memset(&a->data.resident.reserved, 0,
sizeof(a->data.resident.reserved));
/* Copy the data from the page back to the attribute value. */
if (page) {
kaddr = kmap_atomic(page);
memcpy((u8*)a + mp_ofs, kaddr, attr_size);
kunmap_atomic(kaddr);
}
/* Setup the allocated size in the ntfs inode in case it changed. */
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = arec_size - mp_ofs;
write_unlock_irqrestore(&ni->size_lock, flags);
/* Mark the mft record dirty, so it gets written back. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
ni->runlist.rl = NULL;
up_write(&ni->runlist.lock);
rl_err_out:
if (rl) {
if (ntfs_cluster_free_from_rl(vol, rl) < 0) {
ntfs_error(vol->sb, "Failed to release allocated "
"cluster(s) in error code path. Run "
"chkdsk to recover the lost "
"cluster(s).");
NVolSetErrors(vol);
}
ntfs_free(rl);
page_err_out:
unlock_page(page);
page_cache_release(page);
}
if (err == -EINVAL)
err = -EIO;
return err;
}
/**
* ntfs_attr_extend_allocation - extend the allocated space of an attribute
* @ni: ntfs inode of the attribute whose allocation to extend
* @new_alloc_size: new size in bytes to which to extend the allocation to
* @new_data_size: new size in bytes to which to extend the data to
* @data_start: beginning of region which is required to be non-sparse
*
* Extend the allocated space of an attribute described by the ntfs inode @ni
* to @new_alloc_size bytes. If @data_start is -1, the whole extension may be
* implemented as a hole in the file (as long as both the volume and the ntfs
* inode @ni have sparse support enabled). If @data_start is >= 0, then the
* region between the old allocated size and @data_start - 1 may be made sparse
* but the regions between @data_start and @new_alloc_size must be backed by
* actual clusters.
*
* If @new_data_size is -1, it is ignored. If it is >= 0, then the data size
* of the attribute is extended to @new_data_size. Note that the i_size of the
* vfs inode is not updated. Only the data size in the base attribute record
* is updated. The caller has to update i_size separately if this is required.
* WARNING: It is a BUG() for @new_data_size to be smaller than the old data
* size as well as for @new_data_size to be greater than @new_alloc_size.
*
* For resident attributes this involves resizing the attribute record and if
* necessary moving it and/or other attributes into extent mft records and/or
* converting the attribute to a non-resident attribute which in turn involves
* extending the allocation of a non-resident attribute as described below.
*
* For non-resident attributes this involves allocating clusters in the data
* zone on the volume (except for regions that are being made sparse) and
* extending the run list to describe the allocated clusters as well as
* updating the mapping pairs array of the attribute. This in turn involves
* resizing the attribute record and if necessary moving it and/or other
* attributes into extent mft records and/or splitting the attribute record
* into multiple extent attribute records.
*
* Also, the attribute list attribute is updated if present and in some of the
* above cases (the ones where extent mft records/attributes come into play),
* an attribute list attribute is created if not already present.
*
* Return the new allocated size on success and -errno on error. In the case
* that an error is encountered but a partial extension at least up to
* @data_start (if present) is possible, the allocation is partially extended
* and this is returned. This means the caller must check the returned size to
* determine if the extension was partial. If @data_start is -1 then partial
* allocations are not performed.
*
* WARNING: Do not call ntfs_attr_extend_allocation() for $MFT/$DATA.
*
* Locking: This function takes the runlist lock of @ni for writing as well as
* locking the mft record of the base ntfs inode. These locks are maintained
* throughout execution of the function. These locks are required so that the
* attribute can be resized safely and so that it can for example be converted
* from resident to non-resident safely.
*
* TODO: At present attribute list attribute handling is not implemented.
*
* TODO: At present it is not safe to call this function for anything other
* than the $DATA attribute(s) of an uncompressed and unencrypted file.
*/
s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
const s64 new_data_size, const s64 data_start)
{
VCN vcn;
s64 ll, allocated_size, start = data_start;
struct inode *vi = VFS_I(ni);
ntfs_volume *vol = ni->vol;
ntfs_inode *base_ni;
MFT_RECORD *m;
ATTR_RECORD *a;
ntfs_attr_search_ctx *ctx;
runlist_element *rl, *rl2;
unsigned long flags;
int err, mp_size;
u32 attr_len = 0; /* Silence stupid gcc warning. */
bool mp_rebuilt;
#ifdef DEBUG
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
"old_allocated_size 0x%llx, "
"new_allocated_size 0x%llx, new_data_size 0x%llx, "
"data_start 0x%llx.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type),
(unsigned long long)allocated_size,
(unsigned long long)new_alloc_size,
(unsigned long long)new_data_size,
(unsigned long long)start);
#endif
retry_extend:
/*
* For non-resident attributes, @start and @new_size need to be aligned
* to cluster boundaries for allocation purposes.
*/
if (NInoNonResident(ni)) {
if (start > 0)
start &= ~(s64)vol->cluster_size_mask;
new_alloc_size = (new_alloc_size + vol->cluster_size - 1) &
~(s64)vol->cluster_size_mask;
}
BUG_ON(new_data_size >= 0 && new_data_size > new_alloc_size);
/* Check if new size is allowed in $AttrDef. */
err = ntfs_attr_size_bounds_check(vol, ni->type, new_alloc_size);
if (unlikely(err)) {
/* Only emit errors when the write will fail completely. */
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (start < 0 || start >= allocated_size) {
if (err == -ERANGE) {
ntfs_error(vol->sb, "Cannot extend allocation "
"of inode 0x%lx, attribute "
"type 0x%x, because the new "
"allocation would exceed the "
"maximum allowed size for "
"this attribute type.",
vi->i_ino, (unsigned)
le32_to_cpu(ni->type));
} else {
ntfs_error(vol->sb, "Cannot extend allocation "
"of inode 0x%lx, attribute "
"type 0x%x, because this "
"attribute type is not "
"defined on the NTFS volume. "
"Possible corruption! You "
"should run chkdsk!",
vi->i_ino, (unsigned)
le32_to_cpu(ni->type));
}
}
/* Translate error code to be POSIX conformant for write(2). */
if (err == -ERANGE)
err = -EFBIG;
else
err = -EIO;
return err;
}
if (!NInoAttr(ni))
base_ni = ni;
else
base_ni = ni->ext.base_ntfs_ino;
/*
* We will be modifying both the runlist (if non-resident) and the mft
* record so lock them both down.
*/
down_write(&ni->runlist.lock);
m = map_mft_record(base_ni);
if (IS_ERR(m)) {
err = PTR_ERR(m);
m = NULL;
ctx = NULL;
goto err_out;
}
ctx = ntfs_attr_get_search_ctx(base_ni, m);
if (unlikely(!ctx)) {
err = -ENOMEM;
goto err_out;
}
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
/*
* If non-resident, seek to the last extent. If resident, there is
* only one extent, so seek to that.
*/
vcn = NInoNonResident(ni) ? allocated_size >> vol->cluster_size_bits :
0;
/*
* Abort if someone did the work whilst we waited for the locks. If we
* just converted the attribute from resident to non-resident it is
* likely that exactly this has happened already. We cannot quite
* abort if we need to update the data size.
*/
if (unlikely(new_alloc_size <= allocated_size)) {
ntfs_debug("Allocated size already exceeds requested size.");
new_alloc_size = allocated_size;
if (new_data_size < 0)
goto done;
/*
* We want the first attribute extent so that we can update the
* data size.
*/
vcn = 0;
}
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, vcn, NULL, 0, ctx);
if (unlikely(err)) {
if (err == -ENOENT)
err = -EIO;
goto err_out;
}
m = ctx->mrec;
a = ctx->attr;
/* Use goto to reduce indentation. */
if (a->non_resident)
goto do_non_resident_extend;
BUG_ON(NInoNonResident(ni));
/* The total length of the attribute value. */
attr_len = le32_to_cpu(a->data.resident.value_length);
/*
* Extend the attribute record to be able to store the new attribute
* size. ntfs_attr_record_resize() will not do anything if the size is
* not changing.
*/
if (new_alloc_size < vol->mft_record_size &&
!ntfs_attr_record_resize(m, a,
le16_to_cpu(a->data.resident.value_offset) +
new_alloc_size)) {
/* The resize succeeded! */
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = le32_to_cpu(a->length) -
le16_to_cpu(a->data.resident.value_offset);
write_unlock_irqrestore(&ni->size_lock, flags);
if (new_data_size >= 0) {
BUG_ON(new_data_size < attr_len);
a->data.resident.value_length =
cpu_to_le32((u32)new_data_size);
}
goto flush_done;
}
/*
* We have to drop all the locks so we can call
* ntfs_attr_make_non_resident(). This could be optimised by try-
* locking the first page cache page and only if that fails dropping
* the locks, locking the page, and redoing all the locking and
* lookups. While this would be a huge optimisation, it is not worth
* it as this is definitely a slow code path.
*/
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
/*
* Not enough space in the mft record, try to make the attribute
* non-resident and if successful restart the extension process.
*/
err = ntfs_attr_make_non_resident(ni, attr_len);
if (likely(!err))
goto retry_extend;
/*
* Could not make non-resident. If this is due to this not being
* permitted for this attribute type or there not being enough space,
* try to make other attributes non-resident. Otherwise fail.
*/
if (unlikely(err != -EPERM && err != -ENOSPC)) {
/* Only emit errors when the write will fail completely. */
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because the conversion from resident "
"to non-resident attribute failed "
"with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err != -ENOMEM)
err = -EIO;
goto conv_err_out;
}
/* TODO: Not implemented from here, abort. */
read_lock_irqsave(&ni->size_lock, flags);
allocated_size = ni->allocated_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (start < 0 || start >= allocated_size) {
if (err == -ENOSPC)
ntfs_error(vol->sb, "Not enough space in the mft "
"record/on disk for the non-resident "
"attribute value. This case is not "
"implemented yet.");
else /* if (err == -EPERM) */
ntfs_error(vol->sb, "This attribute type may not be "
"non-resident. This case is not "
"implemented yet.");
}
err = -EOPNOTSUPP;
goto conv_err_out;
#if 0
// TODO: Attempt to make other attributes non-resident.
if (!err)
goto do_resident_extend;
/*
* Both the attribute list attribute and the standard information
* attribute must remain in the base inode. Thus, if this is one of
* these attributes, we have to try to move other attributes out into
* extent mft records instead.
*/
if (ni->type == AT_ATTRIBUTE_LIST ||
ni->type == AT_STANDARD_INFORMATION) {
// TODO: Attempt to move other attributes into extent mft
// records.
err = -EOPNOTSUPP;
if (!err)
goto do_resident_extend;
goto err_out;
}
// TODO: Attempt to move this attribute to an extent mft record, but
// only if it is not already the only attribute in an mft record in
// which case there would be nothing to gain.
err = -EOPNOTSUPP;
if (!err)
goto do_resident_extend;
/* There is nothing we can do to make enough space. )-: */
goto err_out;
#endif
do_non_resident_extend:
BUG_ON(!NInoNonResident(ni));
if (new_alloc_size == allocated_size) {
BUG_ON(vcn);
goto alloc_done;
}
/*
* If the data starts after the end of the old allocation, this is a
* $DATA attribute and sparse attributes are enabled on the volume and
* for this inode, then create a sparse region between the old
* allocated size and the start of the data. Otherwise simply proceed
* with filling the whole space between the old allocated size and the
* new allocated size with clusters.
*/
if ((start >= 0 && start <= allocated_size) || ni->type != AT_DATA ||
!NVolSparseEnabled(vol) || NInoSparseDisabled(ni))
goto skip_sparse;
// TODO: This is not implemented yet. We just fill in with real
// clusters for now...
ntfs_debug("Inserting holes is not-implemented yet. Falling back to "
"allocating real clusters instead.");
skip_sparse:
rl = ni->runlist.rl;
if (likely(rl)) {
/* Seek to the end of the runlist. */
while (rl->length)
rl++;
}
/* If this attribute extent is not mapped, map it now. */
if (unlikely(!rl || rl->lcn == LCN_RL_NOT_MAPPED ||
(rl->lcn == LCN_ENOENT && rl > ni->runlist.rl &&
(rl-1)->lcn == LCN_RL_NOT_MAPPED))) {
if (!rl && !allocated_size)
goto first_alloc;
rl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
if (IS_ERR(rl)) {
err = PTR_ERR(rl);
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation "
"of inode 0x%lx, attribute "
"type 0x%x, because the "
"mapping of a runlist "
"fragment failed with error "
"code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type),
err);
if (err != -ENOMEM)
err = -EIO;
goto err_out;
}
ni->runlist.rl = rl;
/* Seek to the end of the runlist. */
while (rl->length)
rl++;
}
/*
* We now know the runlist of the last extent is mapped and @rl is at
* the end of the runlist. We want to begin allocating clusters
* starting at the last allocated cluster to reduce fragmentation. If
* there are no valid LCNs in the attribute we let the cluster
* allocator choose the starting cluster.
*/
/* If the last LCN is a hole or simillar seek back to last real LCN. */
while (rl->lcn < 0 && rl > ni->runlist.rl)
rl--;
first_alloc:
// FIXME: Need to implement partial allocations so at least part of the
// write can be performed when start >= 0. (Needed for POSIX write(2)
// conformance.)
rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits,
(new_alloc_size - allocated_size) >>
vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ?
rl->lcn + rl->length : -1, DATA_ZONE, true);
if (IS_ERR(rl2)) {
err = PTR_ERR(rl2);
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because the allocation of clusters "
"failed with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err != -ENOMEM && err != -ENOSPC)
err = -EIO;
goto err_out;
}
rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
if (IS_ERR(rl)) {
err = PTR_ERR(rl);
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because the runlist merge failed "
"with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err != -ENOMEM)
err = -EIO;
if (ntfs_cluster_free_from_rl(vol, rl2)) {
ntfs_error(vol->sb, "Failed to release allocated "
"cluster(s) in error code path. Run "
"chkdsk to recover the lost "
"cluster(s).");
NVolSetErrors(vol);
}
ntfs_free(rl2);
goto err_out;
}
ni->runlist.rl = rl;
ntfs_debug("Allocated 0x%llx clusters.", (long long)(new_alloc_size -
allocated_size) >> vol->cluster_size_bits);
/* Find the runlist element with which the attribute extent starts. */
ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
rl2 = ntfs_rl_find_vcn_nolock(rl, ll);
BUG_ON(!rl2);
BUG_ON(!rl2->length);
BUG_ON(rl2->lcn < LCN_HOLE);
mp_rebuilt = false;
/* Get the size for the new mapping pairs array for this extent. */
mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
if (unlikely(mp_size <= 0)) {
err = mp_size;
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because determining the size for the "
"mapping pairs failed with error code "
"%i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
err = -EIO;
goto undo_alloc;
}
/* Extend the attribute record to fit the bigger mapping pairs array. */
attr_len = le32_to_cpu(a->length);
err = ntfs_attr_record_resize(m, a, mp_size +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
if (unlikely(err)) {
BUG_ON(err != -ENOSPC);
// TODO: Deal with this by moving this extent to a new mft
// record or by starting a new extent in a new mft record,
// possibly by extending this extent partially and filling it
// and creating a new extent for the remainder, or by making
// other attributes non-resident and/or by moving other
// attributes out of this mft record.
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Not enough space in the mft "
"record for the extended attribute "
"record. This case is not "
"implemented yet.");
err = -EOPNOTSUPP;
goto undo_alloc;
}
mp_rebuilt = true;
/* Generate the mapping pairs array directly into the attr record. */
err = ntfs_mapping_pairs_build(vol, (u8*)a +
le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
mp_size, rl2, ll, -1, NULL);
if (unlikely(err)) {
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot extend allocation of "
"inode 0x%lx, attribute type 0x%x, "
"because building the mapping pairs "
"failed with error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
err = -EIO;
goto undo_alloc;
}
/* Update the highest_vcn. */
a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >>
vol->cluster_size_bits) - 1);
/*
* We now have extended the allocated size of the attribute. Reflect
* this in the ntfs_inode structure and the attribute record.
*/
if (a->data.non_resident.lowest_vcn) {
/*
* We are not in the first attribute extent, switch to it, but
* first ensure the changes will make it to disk later.
*/
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
ntfs_attr_reinit_search_ctx(ctx);
err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
CASE_SENSITIVE, 0, NULL, 0, ctx);
if (unlikely(err))
goto restore_undo_alloc;
/* @m is not used any more so no need to set it. */
a = ctx->attr;
}
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = new_alloc_size;
a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size);
/*
* FIXME: This would fail if @ni is a directory, $MFT, or an index,
* since those can have sparse/compressed set. For example can be
* set compressed even though it is not compressed itself and in that
* case the bit means that files are to be created compressed in the
* directory... At present this is ok as this code is only called for
* regular files, and only for their $DATA attribute(s).
* FIXME: The calculation is wrong if we created a hole above. For now
* it does not matter as we never create holes.
*/
if (NInoSparse(ni) || NInoCompressed(ni)) {
ni->itype.compressed.size += new_alloc_size - allocated_size;
a->data.non_resident.compressed_size =
cpu_to_sle64(ni->itype.compressed.size);
vi->i_blocks = ni->itype.compressed.size >> 9;
} else
vi->i_blocks = new_alloc_size >> 9;
write_unlock_irqrestore(&ni->size_lock, flags);
alloc_done:
if (new_data_size >= 0) {
BUG_ON(new_data_size <
sle64_to_cpu(a->data.non_resident.data_size));
a->data.non_resident.data_size = cpu_to_sle64(new_data_size);
}
flush_done:
/* Ensure the changes make it to disk. */
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
done:
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
ntfs_debug("Done, new_allocated_size 0x%llx.",
(unsigned long long)new_alloc_size);
return new_alloc_size;
restore_undo_alloc:
if (start < 0 || start >= allocated_size)
ntfs_error(vol->sb, "Cannot complete extension of allocation "
"of inode 0x%lx, attribute type 0x%x, because "
"lookup of first attribute extent failed with "
"error code %i.", vi->i_ino,
(unsigned)le32_to_cpu(ni->type), err);
if (err == -ENOENT)
err = -EIO;
ntfs_attr_reinit_search_ctx(ctx);
if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE,
allocated_size >> vol->cluster_size_bits, NULL, 0,
ctx)) {
ntfs_error(vol->sb, "Failed to find last attribute extent of "
"attribute in error code path. Run chkdsk to "
"recover.");
write_lock_irqsave(&ni->size_lock, flags);
ni->allocated_size = new_alloc_size;
/*
* FIXME: This would fail if @ni is a directory... See above.
* FIXME: The calculation is wrong if we created a hole above.
* For now it does not matter as we never create holes.
*/
if (NInoSparse(ni) || NInoCompressed(ni)) {
ni->itype.compressed.size += new_alloc_size -
allocated_size;
vi->i_blocks = ni->itype.compressed.size >> 9;
} else
vi->i_blocks = new_alloc_size >> 9;
write_unlock_irqrestore(&ni->size_lock, flags);
ntfs_attr_put_search_ctx(ctx);
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
/*
* The only thing that is now wrong is the allocated size of the
* base attribute extent which chkdsk should be able to fix.
*/
NVolSetErrors(vol);
return err;
}
ctx->attr->data.non_resident.highest_vcn = cpu_to_sle64(
(allocated_size >> vol->cluster_size_bits) - 1);
undo_alloc:
ll = allocated_size >> vol->cluster_size_bits;
if (ntfs_cluster_free(ni, ll, -1, ctx) < 0) {
ntfs_error(vol->sb, "Failed to release allocated cluster(s) "
"in error code path. Run chkdsk to recover "
"the lost cluster(s).");
NVolSetErrors(vol);
}
m = ctx->mrec;
a = ctx->attr;
/*
* If the runlist truncation fails and/or the search context is no
* longer valid, we cannot resize the attribute record or build the
* mapping pairs array thus we mark the inode bad so that no access to
* the freed clusters can happen.
*/
if (ntfs_rl_truncate_nolock(vol, &ni->runlist, ll) || IS_ERR(m)) {
ntfs_error(vol->sb, "Failed to %s in error code path. Run "
"chkdsk to recover.", IS_ERR(m) ?
"restore attribute search context" :
"truncate attribute runlist");
NVolSetErrors(vol);
} else if (mp_rebuilt) {
if (ntfs_attr_record_resize(m, a, attr_len)) {
ntfs_error(vol->sb, "Failed to restore attribute "
"record in error code path. Run "
"chkdsk to recover.");
NVolSetErrors(vol);
} else /* if (success) */ {
if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
a->data.non_resident.
mapping_pairs_offset), attr_len -
le16_to_cpu(a->data.non_resident.
mapping_pairs_offset), rl2, ll, -1,
NULL)) {
ntfs_error(vol->sb, "Failed to restore "
"mapping pairs array in error "
"code path. Run chkdsk to "
"recover.");
NVolSetErrors(vol);
}
flush_dcache_mft_record_page(ctx->ntfs_ino);
mark_mft_record_dirty(ctx->ntfs_ino);
}
}
err_out:
if (ctx)
ntfs_attr_put_search_ctx(ctx);
if (m)
unmap_mft_record(base_ni);
up_write(&ni->runlist.lock);
conv_err_out:
ntfs_debug("Failed. Returning error code %i.", err);
return err;
}
/**
* ntfs_attr_set - fill (a part of) an attribute with a byte
* @ni: ntfs inode describing the attribute to fill
* @ofs: offset inside the attribute at which to start to fill
* @cnt: number of bytes to fill
* @val: the unsigned 8-bit value with which to fill the attribute
*
* Fill @cnt bytes of the attribute described by the ntfs inode @ni starting at
* byte offset @ofs inside the attribute with the constant byte @val.
*
* This function is effectively like memset() applied to an ntfs attribute.
* Note thie function actually only operates on the page cache pages belonging
* to the ntfs attribute and it marks them dirty after doing the memset().
* Thus it relies on the vm dirty page write code paths to cause the modified
* pages to be written to the mft record/disk.
*
* Return 0 on success and -errno on error. An error code of -ESPIPE means
* that @ofs + @cnt were outside the end of the attribute and no write was
* performed.
*/
int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
{
ntfs_volume *vol = ni->vol;
struct address_space *mapping;
struct page *page;
u8 *kaddr;
pgoff_t idx, end;
unsigned start_ofs, end_ofs, size;
ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.",
(long long)ofs, (long long)cnt, val);
BUG_ON(ofs < 0);
BUG_ON(cnt < 0);
if (!cnt)
goto done;
/*
* FIXME: Compressed and encrypted attributes are not supported when
* writing and we should never have gotten here for them.
*/
BUG_ON(NInoCompressed(ni));
BUG_ON(NInoEncrypted(ni));
mapping = VFS_I(ni)->i_mapping;
/* Work out the starting index and page offset. */
idx = ofs >> PAGE_CACHE_SHIFT;
start_ofs = ofs & ~PAGE_CACHE_MASK;
/* Work out the ending index and page offset. */
end = ofs + cnt;
end_ofs = end & ~PAGE_CACHE_MASK;
/* If the end is outside the inode size return -ESPIPE. */
if (unlikely(end > i_size_read(VFS_I(ni)))) {
ntfs_error(vol->sb, "Request exceeds end of attribute.");
return -ESPIPE;
}
end >>= PAGE_CACHE_SHIFT;
/* If there is a first partial page, need to do it the slow way. */
if (start_ofs) {
page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read first partial "
"page (error, index 0x%lx).", idx);
return PTR_ERR(page);
}
/*
* If the last page is the same as the first page, need to
* limit the write to the end offset.
*/
size = PAGE_CACHE_SIZE;
if (idx == end)
size = end_ofs;
kaddr = kmap_atomic(page);
memset(kaddr + start_ofs, val, size - start_ofs);
flush_dcache_page(page);
kunmap_atomic(kaddr);
set_page_dirty(page);
page_cache_release(page);
balance_dirty_pages_ratelimited(mapping);
cond_resched();
if (idx == end)
goto done;
idx++;
}
/* Do the whole pages the fast way. */
for (; idx < end; idx++) {
/* Find or create the current page. (The page is locked.) */
page = grab_cache_page(mapping, idx);
if (unlikely(!page)) {
ntfs_error(vol->sb, "Insufficient memory to grab "
"page (index 0x%lx).", idx);
return -ENOMEM;
}
kaddr = kmap_atomic(page);
memset(kaddr, val, PAGE_CACHE_SIZE);
flush_dcache_page(page);
kunmap_atomic(kaddr);
/*
* If the page has buffers, mark them uptodate since buffer
* state and not page state is definitive in 2.6 kernels.
*/
if (page_has_buffers(page)) {
struct buffer_head *bh, *head;
bh = head = page_buffers(page);
do {
set_buffer_uptodate(bh);
} while ((bh = bh->b_this_page) != head);
}
/* Now that buffers are uptodate, set the page uptodate, too. */
SetPageUptodate(page);
/*
* Set the page and all its buffers dirty and mark the inode
* dirty, too. The VM will write the page later on.
*/
set_page_dirty(page);
/* Finally unlock and release the page. */
unlock_page(page);
page_cache_release(page);
balance_dirty_pages_ratelimited(mapping);
cond_resched();
}
/* If there is a last partial page, need to do it the slow way. */
if (end_ofs) {
page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read last partial page "
"(error, index 0x%lx).", idx);
return PTR_ERR(page);
}
kaddr = kmap_atomic(page);
memset(kaddr, val, end_ofs);
flush_dcache_page(page);
kunmap_atomic(kaddr);
set_page_dirty(page);
page_cache_release(page);
balance_dirty_pages_ratelimited(mapping);
cond_resched();
}
done:
ntfs_debug("Done.");
return 0;
}
#endif /* NTFS_RW */
| gpl-2.0 |
denghongcai/rk3x_kernel_3.0.36 | kernel/time/timecompare.c | 7932 | 4966 | /*
* Copyright (C) 2009 Intel Corporation.
* Author: Patrick Ohly <patrick.ohly@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/timecompare.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/math64.h>
#include <linux/kernel.h>
/*
* fixed point arithmetic scale factor for skew
*
* Usually one would measure skew in ppb (parts per billion, 1e9), but
* using a factor of 2 simplifies the math.
*/
#define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30)
ktime_t timecompare_transform(struct timecompare *sync,
u64 source_tstamp)
{
u64 nsec;
nsec = source_tstamp + sync->offset;
nsec += (s64)(source_tstamp - sync->last_update) * sync->skew /
TIMECOMPARE_SKEW_RESOLUTION;
return ns_to_ktime(nsec);
}
EXPORT_SYMBOL_GPL(timecompare_transform);
int timecompare_offset(struct timecompare *sync,
s64 *offset,
u64 *source_tstamp)
{
u64 start_source = 0, end_source = 0;
struct {
s64 offset;
s64 duration_target;
} buffer[10], sample, *samples;
int counter = 0, i;
int used;
int index;
int num_samples = sync->num_samples;
if (num_samples > ARRAY_SIZE(buffer)) {
samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC);
if (!samples) {
samples = buffer;
num_samples = ARRAY_SIZE(buffer);
}
} else {
samples = buffer;
}
/* run until we have enough valid samples, but do not try forever */
i = 0;
counter = 0;
while (1) {
u64 ts;
ktime_t start, end;
start = sync->target();
ts = timecounter_read(sync->source);
end = sync->target();
if (!i)
start_source = ts;
/* ignore negative durations */
sample.duration_target = ktime_to_ns(ktime_sub(end, start));
if (sample.duration_target >= 0) {
/*
* assume symetric delay to and from source:
* average target time corresponds to measured
* source time
*/
sample.offset =
(ktime_to_ns(end) + ktime_to_ns(start)) / 2 -
ts;
/* simple insertion sort based on duration */
index = counter - 1;
while (index >= 0) {
if (samples[index].duration_target <
sample.duration_target)
break;
samples[index + 1] = samples[index];
index--;
}
samples[index + 1] = sample;
counter++;
}
i++;
if (counter >= num_samples || i >= 100000) {
end_source = ts;
break;
}
}
*source_tstamp = (end_source + start_source) / 2;
/* remove outliers by only using 75% of the samples */
used = counter * 3 / 4;
if (!used)
used = counter;
if (used) {
/* calculate average */
s64 off = 0;
for (index = 0; index < used; index++)
off += samples[index].offset;
*offset = div_s64(off, used);
}
if (samples && samples != buffer)
kfree(samples);
return used;
}
EXPORT_SYMBOL_GPL(timecompare_offset);
void __timecompare_update(struct timecompare *sync,
u64 source_tstamp)
{
s64 offset;
u64 average_time;
if (!timecompare_offset(sync, &offset, &average_time))
return;
if (!sync->last_update) {
sync->last_update = average_time;
sync->offset = offset;
sync->skew = 0;
} else {
s64 delta_nsec = average_time - sync->last_update;
/* avoid division by negative or small deltas */
if (delta_nsec >= 10000) {
s64 delta_offset_nsec = offset - sync->offset;
s64 skew; /* delta_offset_nsec *
TIMECOMPARE_SKEW_RESOLUTION /
delta_nsec */
u64 divisor;
/* div_s64() is limited to 32 bit divisor */
skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION;
divisor = delta_nsec;
while (unlikely(divisor >= ((s64)1) << 32)) {
/* divide both by 2; beware, right shift
of negative value has undefined
behavior and can only be used for
the positive divisor */
skew = div_s64(skew, 2);
divisor >>= 1;
}
skew = div_s64(skew, divisor);
/*
* Calculate new overall skew as 4/16 the
* old value and 12/16 the new one. This is
* a rather arbitrary tradeoff between
* only using the latest measurement (0/16 and
* 16/16) and even more weight on past measurements.
*/
#define TIMECOMPARE_NEW_SKEW_PER_16 12
sync->skew =
div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) *
sync->skew +
TIMECOMPARE_NEW_SKEW_PER_16 * skew,
16);
sync->last_update = average_time;
sync->offset = offset;
}
}
}
EXPORT_SYMBOL_GPL(__timecompare_update);
| gpl-2.0 |
Lenovo-K3/android_kernel_lenovo_msm8916 | drivers/rapidio/switches/tsi500.c | 11516 | 2485 | /*
* RapidIO Tsi500 switch support
*
* Copyright 2009-2010 Integrated Device Technology, Inc.
* Alexandre Bounine <alexandre.bounine@idt.com>
* - Modified switch operations initialization.
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/rio.h>
#include <linux/rio_drv.h>
#include <linux/rio_ids.h>
#include "../rio.h"
static int
tsi500_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port)
{
int i;
u32 offset = 0x10000 + 0xa00 + ((route_destid / 2)&~0x3);
u32 result;
if (table == 0xff) {
rio_mport_read_config_32(mport, destid, hopcount, offset, &result);
result &= ~(0xf << (4*(route_destid & 0x7)));
for (i=0;i<4;i++)
rio_mport_write_config_32(mport, destid, hopcount, offset + (0x20000*i), result | (route_port << (4*(route_destid & 0x7))));
}
else {
rio_mport_read_config_32(mport, destid, hopcount, offset + (0x20000*table), &result);
result &= ~(0xf << (4*(route_destid & 0x7)));
rio_mport_write_config_32(mport, destid, hopcount, offset + (0x20000*table), result | (route_port << (4*(route_destid & 0x7))));
}
return 0;
}
static int
tsi500_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 *route_port)
{
int ret = 0;
u32 offset = 0x10000 + 0xa00 + ((route_destid / 2)&~0x3);
u32 result;
if (table == 0xff)
rio_mport_read_config_32(mport, destid, hopcount, offset, &result);
else
rio_mport_read_config_32(mport, destid, hopcount, offset + (0x20000*table), &result);
result &= 0xf << (4*(route_destid & 0x7));
*route_port = result >> (4*(route_destid & 0x7));
if (*route_port > 3)
ret = -1;
return ret;
}
static int tsi500_switch_init(struct rio_dev *rdev, int do_enum)
{
pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
rdev->rswitch->add_entry = tsi500_route_add_entry;
rdev->rswitch->get_entry = tsi500_route_get_entry;
rdev->rswitch->clr_table = NULL;
rdev->rswitch->set_domain = NULL;
rdev->rswitch->get_domain = NULL;
rdev->rswitch->em_init = NULL;
rdev->rswitch->em_handle = NULL;
return 0;
}
DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_switch_init);
| gpl-2.0 |
getitnowmarketing/Gz-One-Commando | crypto/cbc.c | 11516 | 7621 | /*
* CBC: Cipher Block Chaining mode
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
struct crypto_cbc_ctx {
struct crypto_cipher *child;
};
static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_encrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
do {
crypto_xor(iv, src, bsize);
fn(crypto_cipher_tfm(tfm), dst, iv);
memcpy(iv, dst, bsize);
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_encrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
do {
crypto_xor(src, iv, bsize);
fn(crypto_cipher_tfm(tfm), src, src);
iv = src;
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child);
else
nbytes = crypto_cbc_encrypt_segment(desc, &walk, child);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_decrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
do {
fn(crypto_cipher_tfm(tfm), dst, src);
crypto_xor(dst, iv, bsize);
iv = src;
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_decrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 last_iv[bsize];
/* Start of the last block. */
src += nbytes - (nbytes & (bsize - 1)) - bsize;
memcpy(last_iv, src, bsize);
for (;;) {
fn(crypto_cipher_tfm(tfm), src, src);
if ((nbytes -= bsize) < bsize)
break;
crypto_xor(src, src - bsize, bsize);
src -= bsize;
}
crypto_xor(src, walk->iv, bsize);
memcpy(walk->iv, last_iv, bsize);
return nbytes;
}
static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child);
else
nbytes = crypto_cbc_decrypt_segment(desc, &walk, child);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
if (err)
return ERR_PTR(err);
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return ERR_CAST(alg);
inst = ERR_PTR(-EINVAL);
if (!is_power_of_2(alg->cra_blocksize))
goto out_put_alg;
inst = crypto_alloc_instance("cbc", alg);
if (IS_ERR(inst))
goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = &crypto_blkcipher_type;
/* We access the data as u32s when xoring. */
inst->alg.cra_alignmask |= __alignof__(u32) - 1;
inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx);
inst->alg.cra_init = crypto_cbc_init_tfm;
inst->alg.cra_exit = crypto_cbc_exit_tfm;
inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey;
inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt;
inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt;
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static void crypto_cbc_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
}
static struct crypto_template crypto_cbc_tmpl = {
.name = "cbc",
.alloc = crypto_cbc_alloc,
.free = crypto_cbc_free,
.module = THIS_MODULE,
};
static int __init crypto_cbc_module_init(void)
{
return crypto_register_template(&crypto_cbc_tmpl);
}
static void __exit crypto_cbc_module_exit(void)
{
crypto_unregister_template(&crypto_cbc_tmpl);
}
module_init(crypto_cbc_module_init);
module_exit(crypto_cbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CBC block cipher algorithm");
| gpl-2.0 |
Hui-Zhi/gpu_cgroup_kernel | sound/soc/codecs/wm8974.c | 253 | 18758 | /*
* wm8974.c -- WM8974 ALSA Soc Audio driver
*
* Copyright 2006-2009 Wolfson Microelectronics PLC.
*
* Author: Liam Girdwood <Liam.Girdwood@wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "wm8974.h"
static const struct reg_default wm8974_reg_defaults[] = {
{ 0, 0x0000 }, { 1, 0x0000 }, { 2, 0x0000 }, { 3, 0x0000 },
{ 4, 0x0050 }, { 5, 0x0000 }, { 6, 0x0140 }, { 7, 0x0000 },
{ 8, 0x0000 }, { 9, 0x0000 }, { 10, 0x0000 }, { 11, 0x00ff },
{ 12, 0x0000 }, { 13, 0x0000 }, { 14, 0x0100 }, { 15, 0x00ff },
{ 16, 0x0000 }, { 17, 0x0000 }, { 18, 0x012c }, { 19, 0x002c },
{ 20, 0x002c }, { 21, 0x002c }, { 22, 0x002c }, { 23, 0x0000 },
{ 24, 0x0032 }, { 25, 0x0000 }, { 26, 0x0000 }, { 27, 0x0000 },
{ 28, 0x0000 }, { 29, 0x0000 }, { 30, 0x0000 }, { 31, 0x0000 },
{ 32, 0x0038 }, { 33, 0x000b }, { 34, 0x0032 }, { 35, 0x0000 },
{ 36, 0x0008 }, { 37, 0x000c }, { 38, 0x0093 }, { 39, 0x00e9 },
{ 40, 0x0000 }, { 41, 0x0000 }, { 42, 0x0000 }, { 43, 0x0000 },
{ 44, 0x0003 }, { 45, 0x0010 }, { 46, 0x0000 }, { 47, 0x0000 },
{ 48, 0x0000 }, { 49, 0x0002 }, { 50, 0x0000 }, { 51, 0x0000 },
{ 52, 0x0000 }, { 53, 0x0000 }, { 54, 0x0039 }, { 55, 0x0000 },
{ 56, 0x0000 },
};
#define WM8974_POWER1_BIASEN 0x08
#define WM8974_POWER1_BUFIOEN 0x04
#define wm8974_reset(c) snd_soc_write(c, WM8974_RESET, 0)
static const char *wm8974_companding[] = {"Off", "NC", "u-law", "A-law" };
static const char *wm8974_deemp[] = {"None", "32kHz", "44.1kHz", "48kHz" };
static const char *wm8974_eqmode[] = {"Capture", "Playback" };
static const char *wm8974_bw[] = {"Narrow", "Wide" };
static const char *wm8974_eq1[] = {"80Hz", "105Hz", "135Hz", "175Hz" };
static const char *wm8974_eq2[] = {"230Hz", "300Hz", "385Hz", "500Hz" };
static const char *wm8974_eq3[] = {"650Hz", "850Hz", "1.1kHz", "1.4kHz" };
static const char *wm8974_eq4[] = {"1.8kHz", "2.4kHz", "3.2kHz", "4.1kHz" };
static const char *wm8974_eq5[] = {"5.3kHz", "6.9kHz", "9kHz", "11.7kHz" };
static const char *wm8974_alc[] = {"ALC", "Limiter" };
static const struct soc_enum wm8974_enum[] = {
SOC_ENUM_SINGLE(WM8974_COMP, 1, 4, wm8974_companding), /* adc */
SOC_ENUM_SINGLE(WM8974_COMP, 3, 4, wm8974_companding), /* dac */
SOC_ENUM_SINGLE(WM8974_DAC, 4, 4, wm8974_deemp),
SOC_ENUM_SINGLE(WM8974_EQ1, 8, 2, wm8974_eqmode),
SOC_ENUM_SINGLE(WM8974_EQ1, 5, 4, wm8974_eq1),
SOC_ENUM_SINGLE(WM8974_EQ2, 8, 2, wm8974_bw),
SOC_ENUM_SINGLE(WM8974_EQ2, 5, 4, wm8974_eq2),
SOC_ENUM_SINGLE(WM8974_EQ3, 8, 2, wm8974_bw),
SOC_ENUM_SINGLE(WM8974_EQ3, 5, 4, wm8974_eq3),
SOC_ENUM_SINGLE(WM8974_EQ4, 8, 2, wm8974_bw),
SOC_ENUM_SINGLE(WM8974_EQ4, 5, 4, wm8974_eq4),
SOC_ENUM_SINGLE(WM8974_EQ5, 8, 2, wm8974_bw),
SOC_ENUM_SINGLE(WM8974_EQ5, 5, 4, wm8974_eq5),
SOC_ENUM_SINGLE(WM8974_ALC3, 8, 2, wm8974_alc),
};
static const char *wm8974_auxmode_text[] = { "Buffer", "Mixer" };
static SOC_ENUM_SINGLE_DECL(wm8974_auxmode,
WM8974_INPUT, 3, wm8974_auxmode_text);
static const DECLARE_TLV_DB_SCALE(digital_tlv, -12750, 50, 1);
static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1200, 75, 0);
static const DECLARE_TLV_DB_SCALE(spk_tlv, -5700, 100, 0);
static const struct snd_kcontrol_new wm8974_snd_controls[] = {
SOC_SINGLE("Digital Loopback Switch", WM8974_COMP, 0, 1, 0),
SOC_ENUM("DAC Companding", wm8974_enum[1]),
SOC_ENUM("ADC Companding", wm8974_enum[0]),
SOC_ENUM("Playback De-emphasis", wm8974_enum[2]),
SOC_SINGLE("DAC Inversion Switch", WM8974_DAC, 0, 1, 0),
SOC_SINGLE_TLV("PCM Volume", WM8974_DACVOL, 0, 255, 0, digital_tlv),
SOC_SINGLE("High Pass Filter Switch", WM8974_ADC, 8, 1, 0),
SOC_SINGLE("High Pass Cut Off", WM8974_ADC, 4, 7, 0),
SOC_SINGLE("ADC Inversion Switch", WM8974_ADC, 0, 1, 0),
SOC_SINGLE_TLV("Capture Volume", WM8974_ADCVOL, 0, 255, 0, digital_tlv),
SOC_ENUM("Equaliser Function", wm8974_enum[3]),
SOC_ENUM("EQ1 Cut Off", wm8974_enum[4]),
SOC_SINGLE_TLV("EQ1 Volume", WM8974_EQ1, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ2 Bandwidth", wm8974_enum[5]),
SOC_ENUM("EQ2 Cut Off", wm8974_enum[6]),
SOC_SINGLE_TLV("EQ2 Volume", WM8974_EQ2, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ3 Bandwidth", wm8974_enum[7]),
SOC_ENUM("EQ3 Cut Off", wm8974_enum[8]),
SOC_SINGLE_TLV("EQ3 Volume", WM8974_EQ3, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ4 Bandwidth", wm8974_enum[9]),
SOC_ENUM("EQ4 Cut Off", wm8974_enum[10]),
SOC_SINGLE_TLV("EQ4 Volume", WM8974_EQ4, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ5 Bandwidth", wm8974_enum[11]),
SOC_ENUM("EQ5 Cut Off", wm8974_enum[12]),
SOC_SINGLE_TLV("EQ5 Volume", WM8974_EQ5, 0, 24, 1, eq_tlv),
SOC_SINGLE("DAC Playback Limiter Switch", WM8974_DACLIM1, 8, 1, 0),
SOC_SINGLE("DAC Playback Limiter Decay", WM8974_DACLIM1, 4, 15, 0),
SOC_SINGLE("DAC Playback Limiter Attack", WM8974_DACLIM1, 0, 15, 0),
SOC_SINGLE("DAC Playback Limiter Threshold", WM8974_DACLIM2, 4, 7, 0),
SOC_SINGLE("DAC Playback Limiter Boost", WM8974_DACLIM2, 0, 15, 0),
SOC_SINGLE("ALC Enable Switch", WM8974_ALC1, 8, 1, 0),
SOC_SINGLE("ALC Capture Max Gain", WM8974_ALC1, 3, 7, 0),
SOC_SINGLE("ALC Capture Min Gain", WM8974_ALC1, 0, 7, 0),
SOC_SINGLE("ALC Capture ZC Switch", WM8974_ALC2, 8, 1, 0),
SOC_SINGLE("ALC Capture Hold", WM8974_ALC2, 4, 7, 0),
SOC_SINGLE("ALC Capture Target", WM8974_ALC2, 0, 15, 0),
SOC_ENUM("ALC Capture Mode", wm8974_enum[13]),
SOC_SINGLE("ALC Capture Decay", WM8974_ALC3, 4, 15, 0),
SOC_SINGLE("ALC Capture Attack", WM8974_ALC3, 0, 15, 0),
SOC_SINGLE("ALC Capture Noise Gate Switch", WM8974_NGATE, 3, 1, 0),
SOC_SINGLE("ALC Capture Noise Gate Threshold", WM8974_NGATE, 0, 7, 0),
SOC_SINGLE("Capture PGA ZC Switch", WM8974_INPPGA, 7, 1, 0),
SOC_SINGLE_TLV("Capture PGA Volume", WM8974_INPPGA, 0, 63, 0, inpga_tlv),
SOC_SINGLE("Speaker Playback ZC Switch", WM8974_SPKVOL, 7, 1, 0),
SOC_SINGLE("Speaker Playback Switch", WM8974_SPKVOL, 6, 1, 1),
SOC_SINGLE_TLV("Speaker Playback Volume", WM8974_SPKVOL, 0, 63, 0, spk_tlv),
SOC_ENUM("Aux Mode", wm8974_auxmode),
SOC_SINGLE("Capture Boost(+20dB)", WM8974_ADCBOOST, 8, 1, 0),
SOC_SINGLE("Mono Playback Switch", WM8974_MONOMIX, 6, 1, 1),
/* DAC / ADC oversampling */
SOC_SINGLE("DAC 128x Oversampling Switch", WM8974_DAC, 8, 1, 0),
SOC_SINGLE("ADC 128x Oversampling Switch", WM8974_ADC, 8, 1, 0),
};
/* Speaker Output Mixer */
static const struct snd_kcontrol_new wm8974_speaker_mixer_controls[] = {
SOC_DAPM_SINGLE("Line Bypass Switch", WM8974_SPKMIX, 1, 1, 0),
SOC_DAPM_SINGLE("Aux Playback Switch", WM8974_SPKMIX, 5, 1, 0),
SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_SPKMIX, 0, 1, 0),
};
/* Mono Output Mixer */
static const struct snd_kcontrol_new wm8974_mono_mixer_controls[] = {
SOC_DAPM_SINGLE("Line Bypass Switch", WM8974_MONOMIX, 1, 1, 0),
SOC_DAPM_SINGLE("Aux Playback Switch", WM8974_MONOMIX, 2, 1, 0),
SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_MONOMIX, 0, 1, 0),
};
/* Boost mixer */
static const struct snd_kcontrol_new wm8974_boost_mixer[] = {
SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 0),
};
/* Input PGA */
static const struct snd_kcontrol_new wm8974_inpga[] = {
SOC_DAPM_SINGLE("Aux Switch", WM8974_INPUT, 2, 1, 0),
SOC_DAPM_SINGLE("MicN Switch", WM8974_INPUT, 1, 1, 0),
SOC_DAPM_SINGLE("MicP Switch", WM8974_INPUT, 0, 1, 0),
};
/* AUX Input boost vol */
static const struct snd_kcontrol_new wm8974_aux_boost_controls =
SOC_DAPM_SINGLE("Aux Volume", WM8974_ADCBOOST, 0, 7, 0);
/* Mic Input boost vol */
static const struct snd_kcontrol_new wm8974_mic_boost_controls =
SOC_DAPM_SINGLE("Mic Volume", WM8974_ADCBOOST, 4, 7, 0);
static const struct snd_soc_dapm_widget wm8974_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("Speaker Mixer", WM8974_POWER3, 2, 0,
&wm8974_speaker_mixer_controls[0],
ARRAY_SIZE(wm8974_speaker_mixer_controls)),
SND_SOC_DAPM_MIXER("Mono Mixer", WM8974_POWER3, 3, 0,
&wm8974_mono_mixer_controls[0],
ARRAY_SIZE(wm8974_mono_mixer_controls)),
SND_SOC_DAPM_DAC("DAC", "HiFi Playback", WM8974_POWER3, 0, 0),
SND_SOC_DAPM_ADC("ADC", "HiFi Capture", WM8974_POWER2, 0, 0),
SND_SOC_DAPM_PGA("Aux Input", WM8974_POWER1, 6, 0, NULL, 0),
SND_SOC_DAPM_PGA("SpkN Out", WM8974_POWER3, 5, 0, NULL, 0),
SND_SOC_DAPM_PGA("SpkP Out", WM8974_POWER3, 6, 0, NULL, 0),
SND_SOC_DAPM_PGA("Mono Out", WM8974_POWER3, 7, 0, NULL, 0),
SND_SOC_DAPM_MIXER("Input PGA", WM8974_POWER2, 2, 0, wm8974_inpga,
ARRAY_SIZE(wm8974_inpga)),
SND_SOC_DAPM_MIXER("Boost Mixer", WM8974_POWER2, 4, 0,
wm8974_boost_mixer, ARRAY_SIZE(wm8974_boost_mixer)),
SND_SOC_DAPM_SUPPLY("Mic Bias", WM8974_POWER1, 4, 0, NULL, 0),
SND_SOC_DAPM_INPUT("MICN"),
SND_SOC_DAPM_INPUT("MICP"),
SND_SOC_DAPM_INPUT("AUX"),
SND_SOC_DAPM_OUTPUT("MONOOUT"),
SND_SOC_DAPM_OUTPUT("SPKOUTP"),
SND_SOC_DAPM_OUTPUT("SPKOUTN"),
};
static const struct snd_soc_dapm_route wm8974_dapm_routes[] = {
/* Mono output mixer */
{"Mono Mixer", "PCM Playback Switch", "DAC"},
{"Mono Mixer", "Aux Playback Switch", "Aux Input"},
{"Mono Mixer", "Line Bypass Switch", "Boost Mixer"},
/* Speaker output mixer */
{"Speaker Mixer", "PCM Playback Switch", "DAC"},
{"Speaker Mixer", "Aux Playback Switch", "Aux Input"},
{"Speaker Mixer", "Line Bypass Switch", "Boost Mixer"},
/* Outputs */
{"Mono Out", NULL, "Mono Mixer"},
{"MONOOUT", NULL, "Mono Out"},
{"SpkN Out", NULL, "Speaker Mixer"},
{"SpkP Out", NULL, "Speaker Mixer"},
{"SPKOUTN", NULL, "SpkN Out"},
{"SPKOUTP", NULL, "SpkP Out"},
/* Boost Mixer */
{"ADC", NULL, "Boost Mixer"},
{"Boost Mixer", "Aux Switch", "Aux Input"},
{"Boost Mixer", NULL, "Input PGA"},
{"Boost Mixer", NULL, "MICP"},
/* Input PGA */
{"Input PGA", "Aux Switch", "Aux Input"},
{"Input PGA", "MicN Switch", "MICN"},
{"Input PGA", "MicP Switch", "MICP"},
/* Inputs */
{"Aux Input", NULL, "AUX"},
};
struct pll_ {
unsigned int pre_div:1;
unsigned int n:4;
unsigned int k;
};
/* The size in bits of the pll divide multiplied by 10
* to allow rounding later */
#define FIXED_PLL_SIZE ((1 << 24) * 10)
static void pll_factors(struct pll_ *pll_div,
unsigned int target, unsigned int source)
{
unsigned long long Kpart;
unsigned int K, Ndiv, Nmod;
/* There is a fixed divide by 4 in the output path */
target *= 4;
Ndiv = target / source;
if (Ndiv < 6) {
source /= 2;
pll_div->pre_div = 1;
Ndiv = target / source;
} else
pll_div->pre_div = 0;
if ((Ndiv < 6) || (Ndiv > 12))
printk(KERN_WARNING
"WM8974 N value %u outwith recommended range!\n",
Ndiv);
pll_div->n = Ndiv;
Nmod = target % source;
Kpart = FIXED_PLL_SIZE * (long long)Nmod;
do_div(Kpart, source);
K = Kpart & 0xFFFFFFFF;
/* Check if we need to round */
if ((K % 10) >= 5)
K += 5;
/* Move down to proper range now rounding is done */
K /= 10;
pll_div->k = K;
}
static int wm8974_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
int source, unsigned int freq_in, unsigned int freq_out)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct pll_ pll_div;
u16 reg;
if (freq_in == 0 || freq_out == 0) {
/* Clock CODEC directly from MCLK */
reg = snd_soc_read(codec, WM8974_CLOCK);
snd_soc_write(codec, WM8974_CLOCK, reg & 0x0ff);
/* Turn off PLL */
reg = snd_soc_read(codec, WM8974_POWER1);
snd_soc_write(codec, WM8974_POWER1, reg & 0x1df);
return 0;
}
pll_factors(&pll_div, freq_out, freq_in);
snd_soc_write(codec, WM8974_PLLN, (pll_div.pre_div << 4) | pll_div.n);
snd_soc_write(codec, WM8974_PLLK1, pll_div.k >> 18);
snd_soc_write(codec, WM8974_PLLK2, (pll_div.k >> 9) & 0x1ff);
snd_soc_write(codec, WM8974_PLLK3, pll_div.k & 0x1ff);
reg = snd_soc_read(codec, WM8974_POWER1);
snd_soc_write(codec, WM8974_POWER1, reg | 0x020);
/* Run CODEC from PLL instead of MCLK */
reg = snd_soc_read(codec, WM8974_CLOCK);
snd_soc_write(codec, WM8974_CLOCK, reg | 0x100);
return 0;
}
/*
* Configure WM8974 clock dividers.
*/
static int wm8974_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
int div_id, int div)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 reg;
switch (div_id) {
case WM8974_OPCLKDIV:
reg = snd_soc_read(codec, WM8974_GPIO) & 0x1cf;
snd_soc_write(codec, WM8974_GPIO, reg | div);
break;
case WM8974_MCLKDIV:
reg = snd_soc_read(codec, WM8974_CLOCK) & 0x11f;
snd_soc_write(codec, WM8974_CLOCK, reg | div);
break;
case WM8974_BCLKDIV:
reg = snd_soc_read(codec, WM8974_CLOCK) & 0x1e3;
snd_soc_write(codec, WM8974_CLOCK, reg | div);
break;
default:
return -EINVAL;
}
return 0;
}
static int wm8974_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 iface = 0;
u16 clk = snd_soc_read(codec, WM8974_CLOCK) & 0x1fe;
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
clk |= 0x0001;
break;
case SND_SOC_DAIFMT_CBS_CFS:
break;
default:
return -EINVAL;
}
/* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
iface |= 0x0010;
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
iface |= 0x0008;
break;
case SND_SOC_DAIFMT_DSP_A:
iface |= 0x00018;
break;
default:
return -EINVAL;
}
/* clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
iface |= 0x0180;
break;
case SND_SOC_DAIFMT_IB_NF:
iface |= 0x0100;
break;
case SND_SOC_DAIFMT_NB_IF:
iface |= 0x0080;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, WM8974_IFACE, iface);
snd_soc_write(codec, WM8974_CLOCK, clk);
return 0;
}
static int wm8974_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
u16 iface = snd_soc_read(codec, WM8974_IFACE) & 0x19f;
u16 adn = snd_soc_read(codec, WM8974_ADD) & 0x1f1;
/* bit size */
switch (params_width(params)) {
case 16:
break;
case 20:
iface |= 0x0020;
break;
case 24:
iface |= 0x0040;
break;
case 32:
iface |= 0x0060;
break;
}
/* filter coefficient */
switch (params_rate(params)) {
case 8000:
adn |= 0x5 << 1;
break;
case 11025:
adn |= 0x4 << 1;
break;
case 16000:
adn |= 0x3 << 1;
break;
case 22050:
adn |= 0x2 << 1;
break;
case 32000:
adn |= 0x1 << 1;
break;
case 44100:
case 48000:
break;
}
snd_soc_write(codec, WM8974_IFACE, iface);
snd_soc_write(codec, WM8974_ADD, adn);
return 0;
}
static int wm8974_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
u16 mute_reg = snd_soc_read(codec, WM8974_DAC) & 0xffbf;
if (mute)
snd_soc_write(codec, WM8974_DAC, mute_reg | 0x40);
else
snd_soc_write(codec, WM8974_DAC, mute_reg);
return 0;
}
/* liam need to make this lower power with dapm */
static int wm8974_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
u16 power1 = snd_soc_read(codec, WM8974_POWER1) & ~0x3;
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
power1 |= 0x1; /* VMID 50k */
snd_soc_write(codec, WM8974_POWER1, power1);
break;
case SND_SOC_BIAS_STANDBY:
power1 |= WM8974_POWER1_BIASEN | WM8974_POWER1_BUFIOEN;
if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
regcache_sync(dev_get_regmap(codec->dev, NULL));
/* Initial cap charge at VMID 5k */
snd_soc_write(codec, WM8974_POWER1, power1 | 0x3);
mdelay(100);
}
power1 |= 0x2; /* VMID 500k */
snd_soc_write(codec, WM8974_POWER1, power1);
break;
case SND_SOC_BIAS_OFF:
snd_soc_write(codec, WM8974_POWER1, 0);
snd_soc_write(codec, WM8974_POWER2, 0);
snd_soc_write(codec, WM8974_POWER3, 0);
break;
}
return 0;
}
#define WM8974_RATES (SNDRV_PCM_RATE_8000_48000)
#define WM8974_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE)
static const struct snd_soc_dai_ops wm8974_ops = {
.hw_params = wm8974_pcm_hw_params,
.digital_mute = wm8974_mute,
.set_fmt = wm8974_set_dai_fmt,
.set_clkdiv = wm8974_set_dai_clkdiv,
.set_pll = wm8974_set_dai_pll,
};
static struct snd_soc_dai_driver wm8974_dai = {
.name = "wm8974-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 2, /* Only 1 channel of data */
.rates = WM8974_RATES,
.formats = WM8974_FORMATS,},
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 2, /* Only 1 channel of data */
.rates = WM8974_RATES,
.formats = WM8974_FORMATS,},
.ops = &wm8974_ops,
.symmetric_rates = 1,
};
static const struct regmap_config wm8974_regmap = {
.reg_bits = 7,
.val_bits = 9,
.max_register = WM8974_MONOMIX,
.reg_defaults = wm8974_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8974_reg_defaults),
};
static int wm8974_probe(struct snd_soc_codec *codec)
{
int ret = 0;
ret = wm8974_reset(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset\n");
return ret;
}
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8974 = {
.probe = wm8974_probe,
.set_bias_level = wm8974_set_bias_level,
.suspend_bias_off = true,
.controls = wm8974_snd_controls,
.num_controls = ARRAY_SIZE(wm8974_snd_controls),
.dapm_widgets = wm8974_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8974_dapm_widgets),
.dapm_routes = wm8974_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(wm8974_dapm_routes),
};
static int wm8974_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct regmap *regmap;
int ret;
regmap = devm_regmap_init_i2c(i2c, &wm8974_regmap);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8974, &wm8974_dai, 1);
return ret;
}
static int wm8974_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
}
static const struct i2c_device_id wm8974_i2c_id[] = {
{ "wm8974", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8974_i2c_id);
static struct i2c_driver wm8974_i2c_driver = {
.driver = {
.name = "wm8974",
},
.probe = wm8974_i2c_probe,
.remove = wm8974_i2c_remove,
.id_table = wm8974_i2c_id,
};
module_i2c_driver(wm8974_i2c_driver);
MODULE_DESCRIPTION("ASoC WM8974 driver");
MODULE_AUTHOR("Liam Girdwood");
MODULE_LICENSE("GPL");
| gpl-2.0 |
manveru0/FeaCore_Phoenix | drivers/net/e1000e/es2lan.c | 765 | 40883 | /*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/*
* 80003ES2LAN Gigabit Ethernet Controller (Copper)
* 80003ES2LAN Gigabit Ethernet Controller (Serdes)
*/
#include "e1000.h"
#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */
#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
/* PHY Specific Control Register 2 (Page 0, Register 26) */
#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000
/* 1=Reverse Auto-Negotiation */
/* MAC Specific Control Register (Page 2, Register 21) */
/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
#define GG82563_MSCR_TX_CLK_MASK 0x0007
#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
/* DSP Distance Register (Page 5, Register 26) */
#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M
1 = 50-80M
2 = 80-110M
3 = 110-140M
4 = >140M */
/* Kumeran Mode Control Register (Page 193, Register 16) */
#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
/* Max number of times Kumeran read/write should be validated */
#define GG82563_MAX_KMRN_RETRY 0x5
/* Power Management Control Register (Page 193, Register 20) */
#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
/* 1=Enable SERDES Electrical Idle */
/* In-Band Control Register (Page 194, Register 18) */
#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
/*
* A table for the GG82563 cable length where the range is defined
* with a lower bound at "index" and the upper bound at
* "index + 5".
*/
static const u16 e1000_gg82563_cable_length_table[] =
{ 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
#define GG82563_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_gg82563_cable_length_table)
static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 *data);
static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 data);
static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
/**
* e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
* @hw: pointer to the HW structure
**/
static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
if (hw->phy.media_type != e1000_media_type_copper) {
phy->type = e1000_phy_none;
return 0;
} else {
phy->ops.power_up = e1000_power_up_phy_copper;
phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
}
phy->addr = 1;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->reset_delay_us = 100;
phy->type = e1000_phy_gg82563;
/* This can only be done after all function pointers are setup. */
ret_val = e1000e_get_phy_id(hw);
/* Verify phy id */
if (phy->id != GG82563_E_PHY_ID)
return -E1000_ERR_PHY;
return ret_val;
}
/**
* e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
* @hw: pointer to the HW structure
**/
static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = er32(EECD);
u16 size;
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
switch (nvm->override) {
case e1000_nvm_override_spi_large:
nvm->page_size = 32;
nvm->address_bits = 16;
break;
case e1000_nvm_override_spi_small:
nvm->page_size = 8;
nvm->address_bits = 8;
break;
default:
nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
break;
}
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
/*
* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
nvm->word_size = 1 << size;
return 0;
}
/**
* e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
* @hw: pointer to the HW structure
**/
static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac;
struct e1000_mac_operations *func = &mac->ops;
/* Set media type */
switch (adapter->pdev->device) {
case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
hw->phy.media_type = e1000_media_type_internal_serdes;
break;
default:
hw->phy.media_type = e1000_media_type_copper;
break;
}
/* Set mta register count */
mac->mta_reg_count = 128;
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES;
/* FWSM register */
mac->has_fwsm = true;
/* ARC supported; valid only if manageability features are enabled. */
mac->arc_subsystem_valid =
(er32(FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
/* Adaptive IFS not supported */
mac->adaptive_ifs = false;
/* check for link */
switch (hw->phy.media_type) {
case e1000_media_type_copper:
func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
func->check_for_link = e1000e_check_for_copper_link;
break;
case e1000_media_type_fiber:
func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
func->check_for_link = e1000e_check_for_fiber_link;
break;
case e1000_media_type_internal_serdes:
func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
func->check_for_link = e1000e_check_for_serdes_link;
break;
default:
return -E1000_ERR_CONFIG;
break;
}
/* set lan id for port to determine which phy lock to use */
hw->mac.ops.set_lan_id(hw);
return 0;
}
static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
s32 rc;
rc = e1000_init_mac_params_80003es2lan(adapter);
if (rc)
return rc;
rc = e1000_init_nvm_params_80003es2lan(hw);
if (rc)
return rc;
rc = e1000_init_phy_params_80003es2lan(hw);
if (rc)
return rc;
return 0;
}
/**
* e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
* @hw: pointer to the HW structure
*
* A wrapper to acquire access rights to the correct PHY.
**/
static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
{
u16 mask;
mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
}
/**
* e1000_release_phy_80003es2lan - Release rights to access PHY
* @hw: pointer to the HW structure
*
* A wrapper to release access rights to the correct PHY.
**/
static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
{
u16 mask;
mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
e1000_release_swfw_sync_80003es2lan(hw, mask);
}
/**
* e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register
* @hw: pointer to the HW structure
*
* Acquire the semaphore to access the Kumeran interface.
*
**/
static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
{
u16 mask;
mask = E1000_SWFW_CSR_SM;
return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
}
/**
* e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register
* @hw: pointer to the HW structure
*
* Release the semaphore used to access the Kumeran interface
**/
static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
{
u16 mask;
mask = E1000_SWFW_CSR_SM;
e1000_release_swfw_sync_80003es2lan(hw, mask);
}
/**
* e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
* @hw: pointer to the HW structure
*
* Acquire the semaphore to access the EEPROM.
**/
static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
{
s32 ret_val;
ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
if (ret_val)
return ret_val;
ret_val = e1000e_acquire_nvm(hw);
if (ret_val)
e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
return ret_val;
}
/**
* e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
* @hw: pointer to the HW structure
*
* Release the semaphore used to access the EEPROM.
**/
static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
{
e1000e_release_nvm(hw);
e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
}
/**
* e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
**/
static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 i = 0;
s32 timeout = 50;
while (i < timeout) {
if (e1000e_get_hw_semaphore(hw))
return -E1000_ERR_SWFW_SYNC;
swfw_sync = er32(SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
/*
* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
e1000e_put_hw_semaphore(hw);
mdelay(5);
i++;
}
if (i == timeout) {
e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
return -E1000_ERR_SWFW_SYNC;
}
swfw_sync |= swmask;
ew32(SW_FW_SYNC, swfw_sync);
e1000e_put_hw_semaphore(hw);
return 0;
}
/**
* e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
**/
static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
while (e1000e_get_hw_semaphore(hw) != 0);
/* Empty */
swfw_sync = er32(SW_FW_SYNC);
swfw_sync &= ~mask;
ew32(SW_FW_SYNC, swfw_sync);
e1000e_put_hw_semaphore(hw);
}
/**
* e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register
* @hw: pointer to the HW structure
* @offset: offset of the register to read
* @data: pointer to the data returned from the operation
*
* Read the GG82563 PHY register.
**/
static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
u32 offset, u16 *data)
{
s32 ret_val;
u32 page_select;
u16 temp;
ret_val = e1000_acquire_phy_80003es2lan(hw);
if (ret_val)
return ret_val;
/* Select Configuration Page */
if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
page_select = GG82563_PHY_PAGE_SELECT;
} else {
/*
* Use Alternative Page Select register to access
* registers 30 and 31
*/
page_select = GG82563_PHY_PAGE_SELECT_ALT;
}
temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
if (ret_val) {
e1000_release_phy_80003es2lan(hw);
return ret_val;
}
if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
/*
* The "ready" bit in the MDIC register may be incorrectly set
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
udelay(200);
/* ...and verify the command was successful. */
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
ret_val = -E1000_ERR_PHY;
e1000_release_phy_80003es2lan(hw);
return ret_val;
}
udelay(200);
ret_val = e1000e_read_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset,
data);
udelay(200);
} else {
ret_val = e1000e_read_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset,
data);
}
e1000_release_phy_80003es2lan(hw);
return ret_val;
}
/**
* e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register
* @hw: pointer to the HW structure
* @offset: offset of the register to read
* @data: value to write to the register
*
* Write to the GG82563 PHY register.
**/
static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
u32 offset, u16 data)
{
s32 ret_val;
u32 page_select;
u16 temp;
ret_val = e1000_acquire_phy_80003es2lan(hw);
if (ret_val)
return ret_val;
/* Select Configuration Page */
if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
page_select = GG82563_PHY_PAGE_SELECT;
} else {
/*
* Use Alternative Page Select register to access
* registers 30 and 31
*/
page_select = GG82563_PHY_PAGE_SELECT_ALT;
}
temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
if (ret_val) {
e1000_release_phy_80003es2lan(hw);
return ret_val;
}
if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
/*
* The "ready" bit in the MDIC register may be incorrectly set
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
udelay(200);
/* ...and verify the command was successful. */
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
e1000_release_phy_80003es2lan(hw);
return -E1000_ERR_PHY;
}
udelay(200);
ret_val = e1000e_write_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset,
data);
udelay(200);
} else {
ret_val = e1000e_write_phy_reg_mdic(hw,
MAX_PHY_REG_ADDRESS & offset,
data);
}
e1000_release_phy_80003es2lan(hw);
return ret_val;
}
/**
* e1000_write_nvm_80003es2lan - Write to ESB2 NVM
* @hw: pointer to the HW structure
* @offset: offset of the register to read
* @words: number of words to write
* @data: buffer of data to write to the NVM
*
* Write "words" of data to the ESB2 NVM.
**/
static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data)
{
return e1000e_write_nvm_spi(hw, offset, words, data);
}
/**
* e1000_get_cfg_done_80003es2lan - Wait for configuration to complete
* @hw: pointer to the HW structure
*
* Wait a specific amount of time for manageability processes to complete.
* This is a function pointer entry point called by the phy module.
**/
static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
{
s32 timeout = PHY_CFG_TIMEOUT;
u32 mask = E1000_NVM_CFG_DONE_PORT_0;
if (hw->bus.func == 1)
mask = E1000_NVM_CFG_DONE_PORT_1;
while (timeout) {
if (er32(EEMNGCTL) & mask)
break;
msleep(1);
timeout--;
}
if (!timeout) {
e_dbg("MNG configuration cycle has not completed.\n");
return -E1000_ERR_RESET;
}
return 0;
}
/**
* e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex
* @hw: pointer to the HW structure
*
* Force the speed and duplex settings onto the PHY. This is a
* function pointer entry point called by the phy module.
**/
static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
{
s32 ret_val;
u16 phy_data;
bool link;
/*
* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
return ret_val;
phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO;
ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data);
if (ret_val)
return ret_val;
e_dbg("GG82563 PSCR: %X\n", phy_data);
ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
/* Reset the phy to commit changes. */
phy_data |= MII_CR_RESET;
ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
if (ret_val)
return ret_val;
udelay(1);
if (hw->phy.autoneg_wait_to_complete) {
e_dbg("Waiting for forced speed/duplex link "
"on GG82563 phy.\n");
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
100000, &link);
if (ret_val)
return ret_val;
if (!link) {
/*
* We didn't get link.
* Reset the DSP and cross our fingers.
*/
ret_val = e1000e_phy_reset_dsp(hw);
if (ret_val)
return ret_val;
}
/* Try once more */
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
100000, &link);
if (ret_val)
return ret_val;
}
ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
if (ret_val)
return ret_val;
/*
* Resetting the phy means we need to verify the TX_CLK corresponds
* to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
*/
phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)
phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5;
else
phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
/*
* In addition, we must re-enable CRS on Tx for both half and full
* duplex.
*/
phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
return ret_val;
}
/**
* e1000_get_cable_length_80003es2lan - Set approximate cable length
* @hw: pointer to the HW structure
*
* Find the approximate cable length as measured by the GG82563 PHY.
* This is a function pointer entry point called by the phy module.
**/
static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = 0;
u16 phy_data, index;
ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
if (ret_val)
goto out;
index = phy_data & GG82563_DSPD_CABLE_LENGTH;
if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) {
ret_val = -E1000_ERR_PHY;
goto out;
}
phy->min_cable_length = e1000_gg82563_cable_length_table[index];
phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5];
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
out:
return ret_val;
}
/**
* e1000_get_link_up_info_80003es2lan - Report speed and duplex
* @hw: pointer to the HW structure
* @speed: pointer to speed buffer
* @duplex: pointer to duplex buffer
*
* Retrieve the current speed and duplex configuration.
**/
static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
u16 *duplex)
{
s32 ret_val;
if (hw->phy.media_type == e1000_media_type_copper) {
ret_val = e1000e_get_speed_and_duplex_copper(hw,
speed,
duplex);
hw->phy.ops.cfg_on_link_up(hw);
} else {
ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
speed,
duplex);
}
return ret_val;
}
/**
* e1000_reset_hw_80003es2lan - Reset the ESB2 controller
* @hw: pointer to the HW structure
*
* Perform a global reset to the ESB2 controller.
**/
static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
{
u32 ctrl, icr;
s32 ret_val;
/*
* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
if (ret_val)
e_dbg("PCI-E Master disable polling has failed.\n");
e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
ew32(RCTL, 0);
ew32(TCTL, E1000_TCTL_PSP);
e1e_flush();
msleep(10);
ctrl = er32(CTRL);
ret_val = e1000_acquire_phy_80003es2lan(hw);
e_dbg("Issuing a global reset to MAC\n");
ew32(CTRL, ctrl | E1000_CTRL_RST);
e1000_release_phy_80003es2lan(hw);
ret_val = e1000e_get_auto_rd_done(hw);
if (ret_val)
/* We don't want to continue accessing MAC registers. */
return ret_val;
/* Clear any pending interrupt events. */
ew32(IMC, 0xffffffff);
icr = er32(ICR);
ret_val = e1000_check_alt_mac_addr_generic(hw);
return ret_val;
}
/**
* e1000_init_hw_80003es2lan - Initialize the ESB2 controller
* @hw: pointer to the HW structure
*
* Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
**/
static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
u32 reg_data;
s32 ret_val;
u16 i;
e1000_initialize_hw_bits_80003es2lan(hw);
/* Initialize identification LED */
ret_val = e1000e_id_led_init(hw);
if (ret_val)
e_dbg("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
/* Disabling VLAN filtering */
e_dbg("Initializing the IEEE VLAN\n");
mac->ops.clear_vfta(hw);
/* Setup the receive address. */
e1000e_init_rx_addrs(hw, mac->rar_entry_count);
/* Zero out the Multicast HASH table */
e_dbg("Zeroing the MTA\n");
for (i = 0; i < mac->mta_reg_count; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
/* Setup link and flow control */
ret_val = e1000e_setup_link(hw);
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
reg_data = er32(TXDCTL(1));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
ew32(TXDCTL(1), reg_data);
/* Enable retransmit on late collisions */
reg_data = er32(TCTL);
reg_data |= E1000_TCTL_RTLC;
ew32(TCTL, reg_data);
/* Configure Gigabit Carry Extend Padding */
reg_data = er32(TCTL_EXT);
reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN;
ew32(TCTL_EXT, reg_data);
/* Configure Transmit Inter-Packet Gap */
reg_data = er32(TIPG);
reg_data &= ~E1000_TIPG_IPGT_MASK;
reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
ew32(TIPG, reg_data);
reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001);
reg_data &= ~0x00100000;
E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
/* default to true to enable the MDIC W/A */
hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET >>
E1000_KMRNCTRLSTA_OFFSET_SHIFT,
&i);
if (!ret_val) {
if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
}
/*
* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
*/
e1000_clear_hw_cntrs_80003es2lan(hw);
return ret_val;
}
/**
* e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2
* @hw: pointer to the HW structure
*
* Initializes required hardware-dependent bits needed for normal operation.
**/
static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
{
u32 reg;
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
reg |= (1 << 22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
reg |= (1 << 22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC(0));
reg &= ~(0xF << 27); /* 30:27 */
if (hw->phy.media_type != e1000_media_type_copper)
reg &= ~(1 << 20);
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC(1));
if (er32(TCTL) & E1000_TCTL_MULR)
reg &= ~(1 << 28);
else
reg |= (1 << 28);
ew32(TARC(1), reg);
}
/**
* e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link
* @hw: pointer to the HW structure
*
* Setup some GG82563 PHY registers for obtaining link
**/
static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u32 ctrl_ext;
u16 data;
ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
if (ret_val)
return ret_val;
data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
/* Use 25MHz for both link down and 1000Base-T for Tx clock. */
data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data);
if (ret_val)
return ret_val;
/*
* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
* 2 - MDI-X mode
* 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
*/
ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data);
if (ret_val)
return ret_val;
data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
switch (phy->mdix) {
case 1:
data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
break;
case 2:
data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
break;
case 0:
default:
data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
break;
}
/*
* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
* 1 - Enabled
*/
data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
if (phy->disable_polarity_correction)
data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data);
if (ret_val)
return ret_val;
/* SW Reset the PHY so all changes take effect */
ret_val = e1000e_commit_phy(hw);
if (ret_val) {
e_dbg("Error Resetting the PHY\n");
return ret_val;
}
/* Bypass Rx and Tx FIFO's */
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
if (ret_val)
return ret_val;
ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
&data);
if (ret_val)
return ret_val;
data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
data);
if (ret_val)
return ret_val;
ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data);
if (ret_val)
return ret_val;
data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data);
if (ret_val)
return ret_val;
ctrl_ext = er32(CTRL_EXT);
ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
ew32(CTRL_EXT, ctrl_ext);
ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
if (ret_val)
return ret_val;
/*
* Do not init these registers when the HW is in IAMT mode, since the
* firmware will have already initialized them. We only initialize
* them if the HW is not in IAMT mode.
*/
if (!e1000e_check_mng_mode(hw)) {
/* Enable Electrical Idle on the PHY */
data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data);
if (ret_val)
return ret_val;
ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data);
if (ret_val)
return ret_val;
data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data);
if (ret_val)
return ret_val;
}
/*
* Workaround: Disable padding in Kumeran interface in the MAC
* and in the PHY to avoid CRC errors.
*/
ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
if (ret_val)
return ret_val;
data |= GG82563_ICR_DIS_PADDING;
ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data);
if (ret_val)
return ret_val;
return 0;
}
/**
* e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2
* @hw: pointer to the HW structure
*
* Essentially a wrapper for setting up all things "copper" related.
* This is a function pointer entry point called by the mac module.
**/
static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
{
u32 ctrl;
s32 ret_val;
u16 reg_data;
ctrl = er32(CTRL);
ctrl |= E1000_CTRL_SLU;
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ew32(CTRL, ctrl);
/*
* Set the mac to wait the maximum time between each
* iteration and increase the max iterations when
* polling the phy; this fixes erroneous timeouts at 10Mbps.
*/
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
0xFFFF);
if (ret_val)
return ret_val;
ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
®_data);
if (ret_val)
return ret_val;
reg_data |= 0x3F;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
reg_data);
if (ret_val)
return ret_val;
ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
®_data);
if (ret_val)
return ret_val;
reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
reg_data);
if (ret_val)
return ret_val;
ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw);
if (ret_val)
return ret_val;
ret_val = e1000e_setup_copper_link(hw);
return 0;
}
/**
* e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
* @hw: pointer to the HW structure
* @duplex: current duplex setting
*
* Configure the KMRN interface by applying last minute quirks for
* 10/100 operation.
**/
static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
u16 speed;
u16 duplex;
if (hw->phy.media_type == e1000_media_type_copper) {
ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
&duplex);
if (ret_val)
return ret_val;
if (speed == SPEED_1000)
ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
else
ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex);
}
return ret_val;
}
/**
* e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
* @hw: pointer to the HW structure
* @duplex: current duplex setting
*
* Configure the KMRN interface by applying last minute quirks for
* 10/100 operation.
**/
static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
{
s32 ret_val;
u32 tipg;
u32 i = 0;
u16 reg_data, reg_data2;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
reg_data);
if (ret_val)
return ret_val;
/* Configure Transmit Inter-Packet Gap */
tipg = er32(TIPG);
tipg &= ~E1000_TIPG_IPGT_MASK;
tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
ew32(TIPG, tipg);
do {
ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data);
if (ret_val)
return ret_val;
ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2);
if (ret_val)
return ret_val;
i++;
} while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
if (duplex == HALF_DUPLEX)
reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
else
reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
return 0;
}
/**
* e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation
* @hw: pointer to the HW structure
*
* Configure the KMRN interface by applying last minute quirks for
* gigabit operation.
**/
static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
{
s32 ret_val;
u16 reg_data, reg_data2;
u32 tipg;
u32 i = 0;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
reg_data);
if (ret_val)
return ret_val;
/* Configure Transmit Inter-Packet Gap */
tipg = er32(TIPG);
tipg &= ~E1000_TIPG_IPGT_MASK;
tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
ew32(TIPG, tipg);
do {
ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data);
if (ret_val)
return ret_val;
ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2);
if (ret_val)
return ret_val;
i++;
} while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
return ret_val;
}
/**
* e1000_read_kmrn_reg_80003es2lan - Read kumeran register
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
*
* Acquire semaphore, then read the PHY register at offset
* using the kumeran interface. The information retrieved is stored in data.
* Release the semaphore before exiting.
**/
static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 *data)
{
u32 kmrnctrlsta;
s32 ret_val = 0;
ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
if (ret_val)
return ret_val;
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
udelay(2);
kmrnctrlsta = er32(KMRNCTRLSTA);
*data = (u16)kmrnctrlsta;
e1000_release_mac_csr_80003es2lan(hw);
return ret_val;
}
/**
* e1000_write_kmrn_reg_80003es2lan - Write kumeran register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
*
* Acquire semaphore, then write the data to PHY register
* at the offset using the kumeran interface. Release semaphore
* before exiting.
**/
static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 data)
{
u32 kmrnctrlsta;
s32 ret_val = 0;
ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
if (ret_val)
return ret_val;
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
udelay(2);
e1000_release_mac_csr_80003es2lan(hw);
return ret_val;
}
/**
* e1000_read_mac_addr_80003es2lan - Read device MAC address
* @hw: pointer to the HW structure
**/
static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
/*
* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
* address.
*/
ret_val = e1000_check_alt_mac_addr_generic(hw);
if (ret_val)
goto out;
ret_val = e1000_read_mac_addr_generic(hw);
out:
return ret_val;
}
/**
* e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
* @hw: pointer to the HW structure
*
* In the case of a PHY power down to save power, or to turn off link during a
* driver unload, or wake on lan is not enabled, remove the link.
**/
static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
{
/* If the management interface is not enabled, then power down */
if (!(hw->mac.ops.check_mng_mode(hw) ||
hw->phy.ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
}
/**
* e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
* @hw: pointer to the HW structure
*
* Clears the hardware counters by reading the counter registers.
**/
static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
{
e1000e_clear_hw_cntrs_base(hw);
er32(PRC64);
er32(PRC127);
er32(PRC255);
er32(PRC511);
er32(PRC1023);
er32(PRC1522);
er32(PTC64);
er32(PTC127);
er32(PTC255);
er32(PTC511);
er32(PTC1023);
er32(PTC1522);
er32(ALGNERRC);
er32(RXERRC);
er32(TNCRS);
er32(CEXTERR);
er32(TSCTC);
er32(TSCTFC);
er32(MGTPRC);
er32(MGTPDC);
er32(MGTPTC);
er32(IAC);
er32(ICRXOC);
er32(ICRXPTC);
er32(ICRXATC);
er32(ICTXPTC);
er32(ICTXATC);
er32(ICTXQEC);
er32(ICTXQMTC);
er32(ICRXDMTC);
}
static struct e1000_mac_operations es2_mac_ops = {
.read_mac_addr = e1000_read_mac_addr_80003es2lan,
.id_led_init = e1000e_id_led_init,
.check_mng_mode = e1000e_check_mng_mode_generic,
/* check_for_link dependent on media type */
.cleanup_led = e1000e_cleanup_led_generic,
.clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
.get_bus_info = e1000e_get_bus_info_pcie,
.set_lan_id = e1000_set_lan_id_multi_port_pcie,
.get_link_up_info = e1000_get_link_up_info_80003es2lan,
.led_on = e1000e_led_on_generic,
.led_off = e1000e_led_off_generic,
.update_mc_addr_list = e1000e_update_mc_addr_list_generic,
.write_vfta = e1000_write_vfta_generic,
.clear_vfta = e1000_clear_vfta_generic,
.reset_hw = e1000_reset_hw_80003es2lan,
.init_hw = e1000_init_hw_80003es2lan,
.setup_link = e1000e_setup_link,
/* setup_physical_interface dependent on media type */
.setup_led = e1000e_setup_led_generic,
};
static struct e1000_phy_operations es2_phy_ops = {
.acquire = e1000_acquire_phy_80003es2lan,
.check_polarity = e1000_check_polarity_m88,
.check_reset_block = e1000e_check_reset_block_generic,
.commit = e1000e_phy_sw_reset,
.force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
.get_cfg_done = e1000_get_cfg_done_80003es2lan,
.get_cable_length = e1000_get_cable_length_80003es2lan,
.get_info = e1000e_get_phy_info_m88,
.read_reg = e1000_read_phy_reg_gg82563_80003es2lan,
.release = e1000_release_phy_80003es2lan,
.reset = e1000e_phy_hw_reset_generic,
.set_d0_lplu_state = NULL,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000_write_phy_reg_gg82563_80003es2lan,
.cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
};
static struct e1000_nvm_operations es2_nvm_ops = {
.acquire = e1000_acquire_nvm_80003es2lan,
.read = e1000e_read_nvm_eerd,
.release = e1000_release_nvm_80003es2lan,
.update = e1000e_update_nvm_checksum_generic,
.valid_led_default = e1000e_valid_led_default,
.validate = e1000e_validate_nvm_checksum_generic,
.write = e1000_write_nvm_80003es2lan,
};
struct e1000_info e1000_es2_info = {
.mac = e1000_80003es2lan,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_RX_NEEDS_RESTART /* errata */
| FLAG_TARC_SET_BIT_ZERO /* errata */
| FLAG_APME_CHECK_PORT_B
| FLAG_DISABLE_FC_PAUSE_TIME /* errata */
| FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
.pba = 38,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_80003es2lan,
.mac_ops = &es2_mac_ops,
.phy_ops = &es2_phy_ops,
.nvm_ops = &es2_nvm_ops,
};
| gpl-2.0 |
androidbftab1/bf-kernel-3.18 | arch/mips/loongson/lemote-2f/clock.c | 765 | 3106 | /*
* Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
* Author: Yanhua, yanh@lemote.com
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <asm/clock.h>
#include <asm/mach-loongson/loongson.h>
static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
static DEFINE_MUTEX(clock_list_sem);
/* Minimum CLK support */
enum {
DC_ZERO, DC_25PT = 2, DC_37PT, DC_50PT, DC_62PT, DC_75PT,
DC_87PT, DC_DISABLE, DC_RESV
};
struct cpufreq_frequency_table loongson2_clockmod_table[] = {
{0, DC_RESV, CPUFREQ_ENTRY_INVALID},
{0, DC_ZERO, CPUFREQ_ENTRY_INVALID},
{0, DC_25PT, 0},
{0, DC_37PT, 0},
{0, DC_50PT, 0},
{0, DC_62PT, 0},
{0, DC_75PT, 0},
{0, DC_87PT, 0},
{0, DC_DISABLE, 0},
{0, DC_RESV, CPUFREQ_TABLE_END},
};
EXPORT_SYMBOL_GPL(loongson2_clockmod_table);
static struct clk cpu_clk = {
.name = "cpu_clk",
.flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
.rate = 800000000,
};
struct clk *clk_get(struct device *dev, const char *id)
{
return &cpu_clk;
}
EXPORT_SYMBOL(clk_get);
static void propagate_rate(struct clk *clk)
{
struct clk *clkp;
list_for_each_entry(clkp, &clock_list, node) {
if (likely(clkp->parent != clk))
continue;
if (likely(clkp->ops && clkp->ops->recalc))
clkp->ops->recalc(clkp);
if (unlikely(clkp->flags & CLK_RATE_PROPAGATES))
propagate_rate(clkp);
}
}
int clk_enable(struct clk *clk)
{
return 0;
}
EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
}
EXPORT_SYMBOL(clk_disable);
unsigned long clk_get_rate(struct clk *clk)
{
return (unsigned long)clk->rate;
}
EXPORT_SYMBOL(clk_get_rate);
void clk_put(struct clk *clk)
{
}
EXPORT_SYMBOL(clk_put);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
unsigned int rate_khz = rate / 1000;
struct cpufreq_frequency_table *pos;
int ret = 0;
int regval;
if (likely(clk->ops && clk->ops->set_rate)) {
unsigned long flags;
spin_lock_irqsave(&clock_lock, flags);
ret = clk->ops->set_rate(clk, rate, 0);
spin_unlock_irqrestore(&clock_lock, flags);
}
if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
propagate_rate(clk);
cpufreq_for_each_valid_entry(pos, loongson2_clockmod_table)
if (rate_khz == pos->frequency)
break;
if (rate_khz != pos->frequency)
return -ENOTSUPP;
clk->rate = rate;
regval = LOONGSON_CHIPCFG(0);
regval = (regval & ~0x7) | (pos->driver_data - 1);
LOONGSON_CHIPCFG(0) = regval;
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate);
long clk_round_rate(struct clk *clk, unsigned long rate)
{
if (likely(clk->ops && clk->ops->round_rate)) {
unsigned long flags, rounded;
spin_lock_irqsave(&clock_lock, flags);
rounded = clk->ops->round_rate(clk, rate);
spin_unlock_irqrestore(&clock_lock, flags);
return rounded;
}
return rate;
}
EXPORT_SYMBOL_GPL(clk_round_rate);
| gpl-2.0 |
Leoyzen/Charm-Kiss-Primou | drivers/firewire/net.c | 1533 | 44387 | /*
* IPv4 over IEEE 1394, per RFC 2734
*
* Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com>
*
* based on eth1394 by Ben Collins et al
*/
#include <linux/bug.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/highmem.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/jiffies.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <asm/unaligned.h>
#include <net/arp.h>
/* rx limits */
#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */
#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2)
/* tx limits */
#define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */
#define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */
#define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */
#define IEEE1394_BROADCAST_CHANNEL 31
#define IEEE1394_ALL_NODES (0xffc0 | 0x003f)
#define IEEE1394_MAX_PAYLOAD_S100 512
#define FWNET_NO_FIFO_ADDR (~0ULL)
#define IANA_SPECIFIER_ID 0x00005eU
#define RFC2734_SW_VERSION 0x000001U
#define IEEE1394_GASP_HDR_SIZE 8
#define RFC2374_UNFRAG_HDR_SIZE 4
#define RFC2374_FRAG_HDR_SIZE 8
#define RFC2374_FRAG_OVERHEAD 4
#define RFC2374_HDR_UNFRAG 0 /* unfragmented */
#define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */
#define RFC2374_HDR_LASTFRAG 2 /* last fragment */
#define RFC2374_HDR_INTFRAG 3 /* interior fragment */
#define RFC2734_HW_ADDR_LEN 16
struct rfc2734_arp {
__be16 hw_type; /* 0x0018 */
__be16 proto_type; /* 0x0806 */
u8 hw_addr_len; /* 16 */
u8 ip_addr_len; /* 4 */
__be16 opcode; /* ARP Opcode */
/* Above is exactly the same format as struct arphdr */
__be64 s_uniq_id; /* Sender's 64bit EUI */
u8 max_rec; /* Sender's max packet size */
u8 sspd; /* Sender's max speed */
__be16 fifo_hi; /* hi 16bits of sender's FIFO addr */
__be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
__be32 sip; /* Sender's IP Address */
__be32 tip; /* IP Address of requested hw addr */
} __attribute__((packed));
/* This header format is specific to this driver implementation. */
#define FWNET_ALEN 8
#define FWNET_HLEN 10
struct fwnet_header {
u8 h_dest[FWNET_ALEN]; /* destination address */
__be16 h_proto; /* packet type ID field */
} __attribute__((packed));
/* IPv4 and IPv6 encapsulation header */
struct rfc2734_header {
u32 w0;
u32 w1;
};
#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
#define fwnet_set_hdr_lf(lf) ((lf) << 30)
#define fwnet_set_hdr_ether_type(et) (et)
#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
#define fwnet_set_hdr_fg_off(fgo) (fgo)
#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr,
unsigned ether_type)
{
hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG)
| fwnet_set_hdr_ether_type(ether_type);
}
static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr,
unsigned ether_type, unsigned dg_size, unsigned dgl)
{
hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG)
| fwnet_set_hdr_dg_size(dg_size)
| fwnet_set_hdr_ether_type(ether_type);
hdr->w1 = fwnet_set_hdr_dgl(dgl);
}
static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr,
unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl)
{
hdr->w0 = fwnet_set_hdr_lf(lf)
| fwnet_set_hdr_dg_size(dg_size)
| fwnet_set_hdr_fg_off(fg_off);
hdr->w1 = fwnet_set_hdr_dgl(dgl);
}
/* This list keeps track of what parts of the datagram have been filled in */
struct fwnet_fragment_info {
struct list_head fi_link;
u16 offset;
u16 len;
};
struct fwnet_partial_datagram {
struct list_head pd_link;
struct list_head fi_list;
struct sk_buff *skb;
/* FIXME Why not use skb->data? */
char *pbuf;
u16 datagram_label;
u16 ether_type;
u16 datagram_size;
};
static DEFINE_MUTEX(fwnet_device_mutex);
static LIST_HEAD(fwnet_device_list);
struct fwnet_device {
struct list_head dev_link;
spinlock_t lock;
enum {
FWNET_BROADCAST_ERROR,
FWNET_BROADCAST_RUNNING,
FWNET_BROADCAST_STOPPED,
} broadcast_state;
struct fw_iso_context *broadcast_rcv_context;
struct fw_iso_buffer broadcast_rcv_buffer;
void **broadcast_rcv_buffer_ptrs;
unsigned broadcast_rcv_next_ptr;
unsigned num_broadcast_rcv_ptrs;
unsigned rcv_buffer_size;
/*
* This value is the maximum unfragmented datagram size that can be
* sent by the hardware. It already has the GASP overhead and the
* unfragmented datagram header overhead calculated into it.
*/
unsigned broadcast_xmt_max_payload;
u16 broadcast_xmt_datagramlabel;
/*
* The CSR address that remote nodes must send datagrams to for us to
* receive them.
*/
struct fw_address_handler handler;
u64 local_fifo;
/* Number of tx datagrams that have been queued but not yet acked */
int queued_datagrams;
int peer_count;
struct list_head peer_list;
struct fw_card *card;
struct net_device *netdev;
};
struct fwnet_peer {
struct list_head peer_link;
struct fwnet_device *dev;
u64 guid;
u64 fifo;
__be32 ip;
/* guarded by dev->lock */
struct list_head pd_list; /* received partial datagrams */
unsigned pdg_size; /* pd_list size */
u16 datagram_label; /* outgoing datagram label */
u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */
int node_id;
int generation;
unsigned speed;
};
/* This is our task struct. It's used for the packet complete callback. */
struct fwnet_packet_task {
struct fw_transaction transaction;
struct rfc2734_header hdr;
struct sk_buff *skb;
struct fwnet_device *dev;
int outstanding_pkts;
u64 fifo_addr;
u16 dest_node;
u16 max_payload;
u8 generation;
u8 speed;
u8 enqueued;
};
/*
* saddr == NULL means use device source address.
* daddr == NULL means leave destination address (eg unresolved arp).
*/
static int fwnet_header_create(struct sk_buff *skb, struct net_device *net,
unsigned short type, const void *daddr,
const void *saddr, unsigned len)
{
struct fwnet_header *h;
h = (struct fwnet_header *)skb_push(skb, sizeof(*h));
put_unaligned_be16(type, &h->h_proto);
if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) {
memset(h->h_dest, 0, net->addr_len);
return net->hard_header_len;
}
if (daddr) {
memcpy(h->h_dest, daddr, net->addr_len);
return net->hard_header_len;
}
return -net->hard_header_len;
}
static int fwnet_header_rebuild(struct sk_buff *skb)
{
struct fwnet_header *h = (struct fwnet_header *)skb->data;
if (get_unaligned_be16(&h->h_proto) == ETH_P_IP)
return arp_find((unsigned char *)&h->h_dest, skb);
fw_notify("%s: unable to resolve type %04x addresses\n",
skb->dev->name, be16_to_cpu(h->h_proto));
return 0;
}
static int fwnet_header_cache(const struct neighbour *neigh,
struct hh_cache *hh)
{
struct net_device *net;
struct fwnet_header *h;
if (hh->hh_type == cpu_to_be16(ETH_P_802_3))
return -1;
net = neigh->dev;
h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h));
h->h_proto = hh->hh_type;
memcpy(h->h_dest, neigh->ha, net->addr_len);
hh->hh_len = FWNET_HLEN;
return 0;
}
/* Called by Address Resolution module to notify changes in address. */
static void fwnet_header_cache_update(struct hh_cache *hh,
const struct net_device *net, const unsigned char *haddr)
{
memcpy((u8 *)hh->hh_data + 16 - FWNET_HLEN, haddr, net->addr_len);
}
static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN);
return FWNET_ALEN;
}
static const struct header_ops fwnet_header_ops = {
.create = fwnet_header_create,
.rebuild = fwnet_header_rebuild,
.cache = fwnet_header_cache,
.cache_update = fwnet_header_cache_update,
.parse = fwnet_header_parse,
};
/* FIXME: is this correct for all cases? */
static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd,
unsigned offset, unsigned len)
{
struct fwnet_fragment_info *fi;
unsigned end = offset + len;
list_for_each_entry(fi, &pd->fi_list, fi_link)
if (offset < fi->offset + fi->len && end > fi->offset)
return true;
return false;
}
/* Assumes that new fragment does not overlap any existing fragments */
static struct fwnet_fragment_info *fwnet_frag_new(
struct fwnet_partial_datagram *pd, unsigned offset, unsigned len)
{
struct fwnet_fragment_info *fi, *fi2, *new;
struct list_head *list;
list = &pd->fi_list;
list_for_each_entry(fi, &pd->fi_list, fi_link) {
if (fi->offset + fi->len == offset) {
/* The new fragment can be tacked on to the end */
/* Did the new fragment plug a hole? */
fi2 = list_entry(fi->fi_link.next,
struct fwnet_fragment_info, fi_link);
if (fi->offset + fi->len == fi2->offset) {
/* glue fragments together */
fi->len += len + fi2->len;
list_del(&fi2->fi_link);
kfree(fi2);
} else {
fi->len += len;
}
return fi;
}
if (offset + len == fi->offset) {
/* The new fragment can be tacked on to the beginning */
/* Did the new fragment plug a hole? */
fi2 = list_entry(fi->fi_link.prev,
struct fwnet_fragment_info, fi_link);
if (fi2->offset + fi2->len == fi->offset) {
/* glue fragments together */
fi2->len += fi->len + len;
list_del(&fi->fi_link);
kfree(fi);
return fi2;
}
fi->offset = offset;
fi->len += len;
return fi;
}
if (offset > fi->offset + fi->len) {
list = &fi->fi_link;
break;
}
if (offset + len < fi->offset) {
list = fi->fi_link.prev;
break;
}
}
new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (!new) {
fw_error("out of memory\n");
return NULL;
}
new->offset = offset;
new->len = len;
list_add(&new->fi_link, list);
return new;
}
static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net,
struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size,
void *frag_buf, unsigned frag_off, unsigned frag_len)
{
struct fwnet_partial_datagram *new;
struct fwnet_fragment_info *fi;
new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (!new)
goto fail;
INIT_LIST_HEAD(&new->fi_list);
fi = fwnet_frag_new(new, frag_off, frag_len);
if (fi == NULL)
goto fail_w_new;
new->datagram_label = datagram_label;
new->datagram_size = dg_size;
new->skb = dev_alloc_skb(dg_size + net->hard_header_len + 15);
if (new->skb == NULL)
goto fail_w_fi;
skb_reserve(new->skb, (net->hard_header_len + 15) & ~15);
new->pbuf = skb_put(new->skb, dg_size);
memcpy(new->pbuf + frag_off, frag_buf, frag_len);
list_add_tail(&new->pd_link, &peer->pd_list);
return new;
fail_w_fi:
kfree(fi);
fail_w_new:
kfree(new);
fail:
fw_error("out of memory\n");
return NULL;
}
static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer,
u16 datagram_label)
{
struct fwnet_partial_datagram *pd;
list_for_each_entry(pd, &peer->pd_list, pd_link)
if (pd->datagram_label == datagram_label)
return pd;
return NULL;
}
static void fwnet_pd_delete(struct fwnet_partial_datagram *old)
{
struct fwnet_fragment_info *fi, *n;
list_for_each_entry_safe(fi, n, &old->fi_list, fi_link)
kfree(fi);
list_del(&old->pd_link);
dev_kfree_skb_any(old->skb);
kfree(old);
}
static bool fwnet_pd_update(struct fwnet_peer *peer,
struct fwnet_partial_datagram *pd, void *frag_buf,
unsigned frag_off, unsigned frag_len)
{
if (fwnet_frag_new(pd, frag_off, frag_len) == NULL)
return false;
memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
/*
* Move list entry to beginning of list so that oldest partial
* datagrams percolate to the end of the list
*/
list_move_tail(&pd->pd_link, &peer->pd_list);
return true;
}
static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd)
{
struct fwnet_fragment_info *fi;
fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link);
return fi->len == pd->datagram_size;
}
/* caller must hold dev->lock */
static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev,
u64 guid)
{
struct fwnet_peer *peer;
list_for_each_entry(peer, &dev->peer_list, peer_link)
if (peer->guid == guid)
return peer;
return NULL;
}
/* caller must hold dev->lock */
static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev,
int node_id, int generation)
{
struct fwnet_peer *peer;
list_for_each_entry(peer, &dev->peer_list, peer_link)
if (peer->node_id == node_id &&
peer->generation == generation)
return peer;
return NULL;
}
/* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */
static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed)
{
max_rec = min(max_rec, speed + 8);
max_rec = min(max_rec, 0xbU); /* <= 4096 */
if (max_rec < 8) {
fw_notify("max_rec %x out of range\n", max_rec);
max_rec = 8;
}
return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE;
}
static int fwnet_finish_incoming_packet(struct net_device *net,
struct sk_buff *skb, u16 source_node_id,
bool is_broadcast, u16 ether_type)
{
struct fwnet_device *dev;
static const __be64 broadcast_hw = cpu_to_be64(~0ULL);
int status;
__be64 guid;
dev = netdev_priv(net);
/* Write metadata, and then pass to the receive level */
skb->dev = net;
skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
/*
* Parse the encapsulation header. This actually does the job of
* converting to an ethernet frame header, as well as arp
* conversion if needed. ARP conversion is easier in this
* direction, since we are using ethernet as our backend.
*/
/*
* If this is an ARP packet, convert it. First, we want to make
* use of some of the fields, since they tell us a little bit
* about the sending machine.
*/
if (ether_type == ETH_P_ARP) {
struct rfc2734_arp *arp1394;
struct arphdr *arp;
unsigned char *arp_ptr;
u64 fifo_addr;
u64 peer_guid;
unsigned sspd;
u16 max_payload;
struct fwnet_peer *peer;
unsigned long flags;
arp1394 = (struct rfc2734_arp *)skb->data;
arp = (struct arphdr *)skb->data;
arp_ptr = (unsigned char *)(arp + 1);
peer_guid = get_unaligned_be64(&arp1394->s_uniq_id);
fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32
| get_unaligned_be32(&arp1394->fifo_lo);
sspd = arp1394->sspd;
/* Sanity check. OS X 10.3 PPC reportedly sends 131. */
if (sspd > SCODE_3200) {
fw_notify("sspd %x out of range\n", sspd);
sspd = SCODE_3200;
}
max_payload = fwnet_max_payload(arp1394->max_rec, sspd);
spin_lock_irqsave(&dev->lock, flags);
peer = fwnet_peer_find_by_guid(dev, peer_guid);
if (peer) {
peer->fifo = fifo_addr;
if (peer->speed > sspd)
peer->speed = sspd;
if (peer->max_payload > max_payload)
peer->max_payload = max_payload;
peer->ip = arp1394->sip;
}
spin_unlock_irqrestore(&dev->lock, flags);
if (!peer) {
fw_notify("No peer for ARP packet from %016llx\n",
(unsigned long long)peer_guid);
goto no_peer;
}
/*
* Now that we're done with the 1394 specific stuff, we'll
* need to alter some of the data. Believe it or not, all
* that needs to be done is sender_IP_address needs to be
* moved, the destination hardware address get stuffed
* in and the hardware address length set to 8.
*
* IMPORTANT: The code below overwrites 1394 specific data
* needed above so keep the munging of the data for the
* higher level IP stack last.
*/
arp->ar_hln = 8;
/* skip over sender unique id */
arp_ptr += arp->ar_hln;
/* move sender IP addr */
put_unaligned(arp1394->sip, (u32 *)arp_ptr);
/* skip over sender IP addr */
arp_ptr += arp->ar_pln;
if (arp->ar_op == htons(ARPOP_REQUEST))
memset(arp_ptr, 0, sizeof(u64));
else
memcpy(arp_ptr, net->dev_addr, sizeof(u64));
}
/* Now add the ethernet header. */
guid = cpu_to_be64(dev->card->guid);
if (dev_hard_header(skb, net, ether_type,
is_broadcast ? &broadcast_hw : &guid,
NULL, skb->len) >= 0) {
struct fwnet_header *eth;
u16 *rawp;
__be16 protocol;
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(*eth));
eth = (struct fwnet_header *)skb_mac_header(skb);
if (*eth->h_dest & 1) {
if (memcmp(eth->h_dest, net->broadcast,
net->addr_len) == 0)
skb->pkt_type = PACKET_BROADCAST;
#if 0
else
skb->pkt_type = PACKET_MULTICAST;
#endif
} else {
if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
skb->pkt_type = PACKET_OTHERHOST;
}
if (ntohs(eth->h_proto) >= 1536) {
protocol = eth->h_proto;
} else {
rawp = (u16 *)skb->data;
if (*rawp == 0xffff)
protocol = htons(ETH_P_802_3);
else
protocol = htons(ETH_P_802_2);
}
skb->protocol = protocol;
}
status = netif_rx(skb);
if (status == NET_RX_DROP) {
net->stats.rx_errors++;
net->stats.rx_dropped++;
} else {
net->stats.rx_packets++;
net->stats.rx_bytes += skb->len;
}
return 0;
no_peer:
net->stats.rx_errors++;
net->stats.rx_dropped++;
dev_kfree_skb_any(skb);
return -ENOENT;
}
static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
int source_node_id, int generation,
bool is_broadcast)
{
struct sk_buff *skb;
struct net_device *net = dev->netdev;
struct rfc2734_header hdr;
unsigned lf;
unsigned long flags;
struct fwnet_peer *peer;
struct fwnet_partial_datagram *pd;
int fg_off;
int dg_size;
u16 datagram_label;
int retval;
u16 ether_type;
hdr.w0 = be32_to_cpu(buf[0]);
lf = fwnet_get_hdr_lf(&hdr);
if (lf == RFC2374_HDR_UNFRAG) {
/*
* An unfragmented datagram has been received by the ieee1394
* bus. Build an skbuff around it so we can pass it to the
* high level network layer.
*/
ether_type = fwnet_get_hdr_ether_type(&hdr);
buf++;
len -= RFC2374_UNFRAG_HDR_SIZE;
skb = dev_alloc_skb(len + net->hard_header_len + 15);
if (unlikely(!skb)) {
fw_error("out of memory\n");
net->stats.rx_dropped++;
return -ENOMEM;
}
skb_reserve(skb, (net->hard_header_len + 15) & ~15);
memcpy(skb_put(skb, len), buf, len);
return fwnet_finish_incoming_packet(net, skb, source_node_id,
is_broadcast, ether_type);
}
/* A datagram fragment has been received, now the fun begins. */
hdr.w1 = ntohl(buf[1]);
buf += 2;
len -= RFC2374_FRAG_HDR_SIZE;
if (lf == RFC2374_HDR_FIRSTFRAG) {
ether_type = fwnet_get_hdr_ether_type(&hdr);
fg_off = 0;
} else {
ether_type = 0;
fg_off = fwnet_get_hdr_fg_off(&hdr);
}
datagram_label = fwnet_get_hdr_dgl(&hdr);
dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
spin_lock_irqsave(&dev->lock, flags);
peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
if (!peer) {
retval = -ENOENT;
goto fail;
}
pd = fwnet_pd_find(peer, datagram_label);
if (pd == NULL) {
while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) {
/* remove the oldest */
fwnet_pd_delete(list_first_entry(&peer->pd_list,
struct fwnet_partial_datagram, pd_link));
peer->pdg_size--;
}
pd = fwnet_pd_new(net, peer, datagram_label,
dg_size, buf, fg_off, len);
if (pd == NULL) {
retval = -ENOMEM;
goto fail;
}
peer->pdg_size++;
} else {
if (fwnet_frag_overlap(pd, fg_off, len) ||
pd->datagram_size != dg_size) {
/*
* Differing datagram sizes or overlapping fragments,
* discard old datagram and start a new one.
*/
fwnet_pd_delete(pd);
pd = fwnet_pd_new(net, peer, datagram_label,
dg_size, buf, fg_off, len);
if (pd == NULL) {
peer->pdg_size--;
retval = -ENOMEM;
goto fail;
}
} else {
if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
/*
* Couldn't save off fragment anyway
* so might as well obliterate the
* datagram now.
*/
fwnet_pd_delete(pd);
peer->pdg_size--;
retval = -ENOMEM;
goto fail;
}
}
} /* new datagram or add to existing one */
if (lf == RFC2374_HDR_FIRSTFRAG)
pd->ether_type = ether_type;
if (fwnet_pd_is_complete(pd)) {
ether_type = pd->ether_type;
peer->pdg_size--;
skb = skb_get(pd->skb);
fwnet_pd_delete(pd);
spin_unlock_irqrestore(&dev->lock, flags);
return fwnet_finish_incoming_packet(net, skb, source_node_id,
false, ether_type);
}
/*
* Datagram is not complete, we're done for the
* moment.
*/
retval = 0;
fail:
spin_unlock_irqrestore(&dev->lock, flags);
return retval;
}
static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
int tcode, int destination, int source, int generation,
unsigned long long offset, void *payload, size_t length,
void *callback_data)
{
struct fwnet_device *dev = callback_data;
int rcode;
if (destination == IEEE1394_ALL_NODES) {
kfree(r);
return;
}
if (offset != dev->handler.offset)
rcode = RCODE_ADDRESS_ERROR;
else if (tcode != TCODE_WRITE_BLOCK_REQUEST)
rcode = RCODE_TYPE_ERROR;
else if (fwnet_incoming_packet(dev, payload, length,
source, generation, false) != 0) {
fw_error("Incoming packet failure\n");
rcode = RCODE_CONFLICT_ERROR;
} else
rcode = RCODE_COMPLETE;
fw_send_response(card, r, rcode);
}
static void fwnet_receive_broadcast(struct fw_iso_context *context,
u32 cycle, size_t header_length, void *header, void *data)
{
struct fwnet_device *dev;
struct fw_iso_packet packet;
struct fw_card *card;
__be16 *hdr_ptr;
__be32 *buf_ptr;
int retval;
u32 length;
u16 source_node_id;
u32 specifier_id;
u32 ver;
unsigned long offset;
unsigned long flags;
dev = data;
card = dev->card;
hdr_ptr = header;
length = be16_to_cpup(hdr_ptr);
spin_lock_irqsave(&dev->lock, flags);
offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr;
buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++];
if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs)
dev->broadcast_rcv_next_ptr = 0;
spin_unlock_irqrestore(&dev->lock, flags);
specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
| (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
buf_ptr += 2;
length -= IEEE1394_GASP_HDR_SIZE;
fwnet_incoming_packet(dev, buf_ptr, length,
source_node_id, -1, true);
}
packet.payload_length = dev->rcv_buffer_size;
packet.interrupt = 1;
packet.skip = 0;
packet.tag = 3;
packet.sy = 0;
packet.header_length = IEEE1394_GASP_HDR_SIZE;
spin_lock_irqsave(&dev->lock, flags);
retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet,
&dev->broadcast_rcv_buffer, offset);
spin_unlock_irqrestore(&dev->lock, flags);
if (retval >= 0)
fw_iso_context_queue_flush(dev->broadcast_rcv_context);
else
fw_error("requeue failed\n");
}
static struct kmem_cache *fwnet_packet_task_cache;
static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
{
dev_kfree_skb_any(ptask->skb);
kmem_cache_free(fwnet_packet_task_cache, ptask);
}
/* Caller must hold dev->lock. */
static void dec_queued_datagrams(struct fwnet_device *dev)
{
if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS)
netif_wake_queue(dev->netdev);
}
static int fwnet_send_packet(struct fwnet_packet_task *ptask);
static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
{
struct fwnet_device *dev = ptask->dev;
struct sk_buff *skb = ptask->skb;
unsigned long flags;
bool free;
spin_lock_irqsave(&dev->lock, flags);
ptask->outstanding_pkts--;
/* Check whether we or the networking TX soft-IRQ is last user. */
free = (ptask->outstanding_pkts == 0 && ptask->enqueued);
if (free)
dec_queued_datagrams(dev);
if (ptask->outstanding_pkts == 0) {
dev->netdev->stats.tx_packets++;
dev->netdev->stats.tx_bytes += skb->len;
}
spin_unlock_irqrestore(&dev->lock, flags);
if (ptask->outstanding_pkts > 0) {
u16 dg_size;
u16 fg_off;
u16 datagram_label;
u16 lf;
/* Update the ptask to point to the next fragment and send it */
lf = fwnet_get_hdr_lf(&ptask->hdr);
switch (lf) {
case RFC2374_HDR_LASTFRAG:
case RFC2374_HDR_UNFRAG:
default:
fw_error("Outstanding packet %x lf %x, header %x,%x\n",
ptask->outstanding_pkts, lf, ptask->hdr.w0,
ptask->hdr.w1);
BUG();
case RFC2374_HDR_FIRSTFRAG:
/* Set frag type here for future interior fragments */
dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
break;
case RFC2374_HDR_INTFRAG:
dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
fg_off = fwnet_get_hdr_fg_off(&ptask->hdr)
+ ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
break;
}
skb_pull(skb, ptask->max_payload);
if (ptask->outstanding_pkts > 1) {
fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
dg_size, fg_off, datagram_label);
} else {
fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG,
dg_size, fg_off, datagram_label);
ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
}
fwnet_send_packet(ptask);
}
if (free)
fwnet_free_ptask(ptask);
}
static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask)
{
struct fwnet_device *dev = ptask->dev;
unsigned long flags;
bool free;
spin_lock_irqsave(&dev->lock, flags);
/* One fragment failed; don't try to send remaining fragments. */
ptask->outstanding_pkts = 0;
/* Check whether we or the networking TX soft-IRQ is last user. */
free = ptask->enqueued;
if (free)
dec_queued_datagrams(dev);
dev->netdev->stats.tx_dropped++;
dev->netdev->stats.tx_errors++;
spin_unlock_irqrestore(&dev->lock, flags);
if (free)
fwnet_free_ptask(ptask);
}
static void fwnet_write_complete(struct fw_card *card, int rcode,
void *payload, size_t length, void *data)
{
struct fwnet_packet_task *ptask = data;
static unsigned long j;
static int last_rcode, errors_skipped;
if (rcode == RCODE_COMPLETE) {
fwnet_transmit_packet_done(ptask);
} else {
fwnet_transmit_packet_failed(ptask);
if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
fw_error("fwnet_write_complete: "
"failed: %x (skipped %d)\n", rcode, errors_skipped);
errors_skipped = 0;
last_rcode = rcode;
} else
errors_skipped++;
}
}
static int fwnet_send_packet(struct fwnet_packet_task *ptask)
{
struct fwnet_device *dev;
unsigned tx_len;
struct rfc2734_header *bufhdr;
unsigned long flags;
bool free;
dev = ptask->dev;
tx_len = ptask->max_payload;
switch (fwnet_get_hdr_lf(&ptask->hdr)) {
case RFC2374_HDR_UNFRAG:
bufhdr = (struct rfc2734_header *)
skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE);
put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
break;
case RFC2374_HDR_FIRSTFRAG:
case RFC2374_HDR_INTFRAG:
case RFC2374_HDR_LASTFRAG:
bufhdr = (struct rfc2734_header *)
skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE);
put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1);
break;
default:
BUG();
}
if (ptask->dest_node == IEEE1394_ALL_NODES) {
u8 *p;
int generation;
int node_id;
/* ptask->generation may not have been set yet */
generation = dev->card->generation;
smp_rmb();
node_id = dev->card->node_id;
p = skb_push(ptask->skb, 8);
put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
| RFC2734_SW_VERSION, &p[4]);
/* We should not transmit if broadcast_channel.valid == 0. */
fw_send_request(dev->card, &ptask->transaction,
TCODE_STREAM_DATA,
fw_stream_packet_destination_id(3,
IEEE1394_BROADCAST_CHANNEL, 0),
generation, SCODE_100, 0ULL, ptask->skb->data,
tx_len + 8, fwnet_write_complete, ptask);
spin_lock_irqsave(&dev->lock, flags);
/* If the AT tasklet already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
else
dec_queued_datagrams(dev);
spin_unlock_irqrestore(&dev->lock, flags);
goto out;
}
fw_send_request(dev->card, &ptask->transaction,
TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node,
ptask->generation, ptask->speed, ptask->fifo_addr,
ptask->skb->data, tx_len, fwnet_write_complete, ptask);
spin_lock_irqsave(&dev->lock, flags);
/* If the AT tasklet already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
else
dec_queued_datagrams(dev);
spin_unlock_irqrestore(&dev->lock, flags);
dev->netdev->trans_start = jiffies;
out:
if (free)
fwnet_free_ptask(ptask);
return 0;
}
static int fwnet_broadcast_start(struct fwnet_device *dev)
{
struct fw_iso_context *context;
int retval;
unsigned num_packets;
unsigned max_receive;
struct fw_iso_packet packet;
unsigned long offset;
unsigned u;
if (dev->local_fifo == FWNET_NO_FIFO_ADDR) {
/* outside OHCI posted write area? */
static const struct fw_address_region region = {
.start = 0xffff00000000ULL,
.end = CSR_REGISTER_BASE,
};
dev->handler.length = 4096;
dev->handler.address_callback = fwnet_receive_packet;
dev->handler.callback_data = dev;
retval = fw_core_add_address_handler(&dev->handler, ®ion);
if (retval < 0)
goto failed_initial;
dev->local_fifo = dev->handler.offset;
}
max_receive = 1U << (dev->card->max_receive + 1);
num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
if (!dev->broadcast_rcv_context) {
void **ptrptr;
context = fw_iso_context_create(dev->card,
FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL,
dev->card->link_speed, 8, fwnet_receive_broadcast, dev);
if (IS_ERR(context)) {
retval = PTR_ERR(context);
goto failed_context_create;
}
retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer,
dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
if (retval < 0)
goto failed_buffer_init;
ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
if (!ptrptr) {
retval = -ENOMEM;
goto failed_ptrs_alloc;
}
dev->broadcast_rcv_buffer_ptrs = ptrptr;
for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
void *ptr;
unsigned v;
ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
*ptrptr++ = (void *)
((char *)ptr + v * max_receive);
}
dev->broadcast_rcv_context = context;
} else {
context = dev->broadcast_rcv_context;
}
packet.payload_length = max_receive;
packet.interrupt = 1;
packet.skip = 0;
packet.tag = 3;
packet.sy = 0;
packet.header_length = IEEE1394_GASP_HDR_SIZE;
offset = 0;
for (u = 0; u < num_packets; u++) {
retval = fw_iso_context_queue(context, &packet,
&dev->broadcast_rcv_buffer, offset);
if (retval < 0)
goto failed_rcv_queue;
offset += max_receive;
}
dev->num_broadcast_rcv_ptrs = num_packets;
dev->rcv_buffer_size = max_receive;
dev->broadcast_rcv_next_ptr = 0U;
retval = fw_iso_context_start(context, -1, 0,
FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
if (retval < 0)
goto failed_rcv_queue;
/* FIXME: adjust it according to the min. speed of all known peers? */
dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
- IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE;
dev->broadcast_state = FWNET_BROADCAST_RUNNING;
return 0;
failed_rcv_queue:
kfree(dev->broadcast_rcv_buffer_ptrs);
dev->broadcast_rcv_buffer_ptrs = NULL;
failed_ptrs_alloc:
fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
failed_buffer_init:
fw_iso_context_destroy(context);
dev->broadcast_rcv_context = NULL;
failed_context_create:
fw_core_remove_address_handler(&dev->handler);
failed_initial:
dev->local_fifo = FWNET_NO_FIFO_ADDR;
return retval;
}
static void set_carrier_state(struct fwnet_device *dev)
{
if (dev->peer_count > 1)
netif_carrier_on(dev->netdev);
else
netif_carrier_off(dev->netdev);
}
/* ifup */
static int fwnet_open(struct net_device *net)
{
struct fwnet_device *dev = netdev_priv(net);
int ret;
if (dev->broadcast_state == FWNET_BROADCAST_ERROR) {
ret = fwnet_broadcast_start(dev);
if (ret)
return ret;
}
netif_start_queue(net);
spin_lock_irq(&dev->lock);
set_carrier_state(dev);
spin_unlock_irq(&dev->lock);
return 0;
}
/* ifdown */
static int fwnet_stop(struct net_device *net)
{
netif_stop_queue(net);
/* Deallocate iso context for use by other applications? */
return 0;
}
static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
{
struct fwnet_header hdr_buf;
struct fwnet_device *dev = netdev_priv(net);
__be16 proto;
u16 dest_node;
unsigned max_payload;
u16 dg_size;
u16 *datagram_label_ptr;
struct fwnet_packet_task *ptask;
struct fwnet_peer *peer;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
/* Can this happen? */
if (netif_queue_stopped(dev->netdev)) {
spin_unlock_irqrestore(&dev->lock, flags);
return NETDEV_TX_BUSY;
}
ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC);
if (ptask == NULL)
goto fail;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
goto fail;
/*
* Make a copy of the driver-specific header.
* We might need to rebuild the header on tx failure.
*/
memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
skb_pull(skb, sizeof(hdr_buf));
proto = hdr_buf.h_proto;
dg_size = skb->len;
/*
* Set the transmission type for the packet. ARP packets and IP
* broadcast packets are sent via GASP.
*/
if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0
|| proto == htons(ETH_P_ARP)
|| (proto == htons(ETH_P_IP)
&& IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
max_payload = dev->broadcast_xmt_max_payload;
datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
ptask->fifo_addr = FWNET_NO_FIFO_ADDR;
ptask->generation = 0;
ptask->dest_node = IEEE1394_ALL_NODES;
ptask->speed = SCODE_100;
} else {
__be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest);
u8 generation;
peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR)
goto fail;
generation = peer->generation;
dest_node = peer->node_id;
max_payload = peer->max_payload;
datagram_label_ptr = &peer->datagram_label;
ptask->fifo_addr = peer->fifo;
ptask->generation = generation;
ptask->dest_node = dest_node;
ptask->speed = peer->speed;
}
/* If this is an ARP packet, convert it */
if (proto == htons(ETH_P_ARP)) {
struct arphdr *arp = (struct arphdr *)skb->data;
unsigned char *arp_ptr = (unsigned char *)(arp + 1);
struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data;
__be32 ipaddr;
ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN));
arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN;
arp1394->max_rec = dev->card->max_receive;
arp1394->sspd = dev->card->link_speed;
put_unaligned_be16(dev->local_fifo >> 32,
&arp1394->fifo_hi);
put_unaligned_be32(dev->local_fifo & 0xffffffff,
&arp1394->fifo_lo);
put_unaligned(ipaddr, &arp1394->sip);
}
ptask->hdr.w0 = 0;
ptask->hdr.w1 = 0;
ptask->skb = skb;
ptask->dev = dev;
/* Does it all fit in one packet? */
if (dg_size <= max_payload) {
fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto));
ptask->outstanding_pkts = 1;
max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE;
} else {
u16 datagram_label;
max_payload -= RFC2374_FRAG_OVERHEAD;
datagram_label = (*datagram_label_ptr)++;
fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size,
datagram_label);
ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload);
max_payload += RFC2374_FRAG_HDR_SIZE;
}
if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS)
netif_stop_queue(dev->netdev);
spin_unlock_irqrestore(&dev->lock, flags);
ptask->max_payload = max_payload;
ptask->enqueued = 0;
fwnet_send_packet(ptask);
return NETDEV_TX_OK;
fail:
spin_unlock_irqrestore(&dev->lock, flags);
if (ptask)
kmem_cache_free(fwnet_packet_task_cache, ptask);
if (skb != NULL)
dev_kfree_skb(skb);
net->stats.tx_dropped++;
net->stats.tx_errors++;
/*
* FIXME: According to a patch from 2003-02-26, "returning non-zero
* causes serious problems" here, allegedly. Before that patch,
* -ERRNO was returned which is not appropriate under Linux 2.6.
* Perhaps more needs to be done? Stop the queue in serious
* conditions and restart it elsewhere?
*/
return NETDEV_TX_OK;
}
static int fwnet_change_mtu(struct net_device *net, int new_mtu)
{
if (new_mtu < 68)
return -EINVAL;
net->mtu = new_mtu;
return 0;
}
static const struct ethtool_ops fwnet_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
static const struct net_device_ops fwnet_netdev_ops = {
.ndo_open = fwnet_open,
.ndo_stop = fwnet_stop,
.ndo_start_xmit = fwnet_tx,
.ndo_change_mtu = fwnet_change_mtu,
};
static void fwnet_init_dev(struct net_device *net)
{
net->header_ops = &fwnet_header_ops;
net->netdev_ops = &fwnet_netdev_ops;
net->watchdog_timeo = 2 * HZ;
net->flags = IFF_BROADCAST | IFF_MULTICAST;
net->features = NETIF_F_HIGHDMA;
net->addr_len = FWNET_ALEN;
net->hard_header_len = FWNET_HLEN;
net->type = ARPHRD_IEEE1394;
net->tx_queue_len = FWNET_TX_QUEUE_LEN;
net->ethtool_ops = &fwnet_ethtool_ops;
}
/* caller must hold fwnet_device_mutex */
static struct fwnet_device *fwnet_dev_find(struct fw_card *card)
{
struct fwnet_device *dev;
list_for_each_entry(dev, &fwnet_device_list, dev_link)
if (dev->card == card)
return dev;
return NULL;
}
static int fwnet_add_peer(struct fwnet_device *dev,
struct fw_unit *unit, struct fw_device *device)
{
struct fwnet_peer *peer;
peer = kmalloc(sizeof(*peer), GFP_KERNEL);
if (!peer)
return -ENOMEM;
dev_set_drvdata(&unit->device, peer);
peer->dev = dev;
peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
peer->fifo = FWNET_NO_FIFO_ADDR;
peer->ip = 0;
INIT_LIST_HEAD(&peer->pd_list);
peer->pdg_size = 0;
peer->datagram_label = 0;
peer->speed = device->max_speed;
peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed);
peer->generation = device->generation;
smp_rmb();
peer->node_id = device->node_id;
spin_lock_irq(&dev->lock);
list_add_tail(&peer->peer_link, &dev->peer_list);
dev->peer_count++;
set_carrier_state(dev);
spin_unlock_irq(&dev->lock);
return 0;
}
static int fwnet_probe(struct device *_dev)
{
struct fw_unit *unit = fw_unit(_dev);
struct fw_device *device = fw_parent_device(unit);
struct fw_card *card = device->card;
struct net_device *net;
bool allocated_netdev = false;
struct fwnet_device *dev;
unsigned max_mtu;
int ret;
mutex_lock(&fwnet_device_mutex);
dev = fwnet_dev_find(card);
if (dev) {
net = dev->netdev;
goto have_dev;
}
net = alloc_netdev(sizeof(*dev), "firewire%d", fwnet_init_dev);
if (net == NULL) {
ret = -ENOMEM;
goto out;
}
allocated_netdev = true;
SET_NETDEV_DEV(net, card->device);
dev = netdev_priv(net);
spin_lock_init(&dev->lock);
dev->broadcast_state = FWNET_BROADCAST_ERROR;
dev->broadcast_rcv_context = NULL;
dev->broadcast_xmt_max_payload = 0;
dev->broadcast_xmt_datagramlabel = 0;
dev->local_fifo = FWNET_NO_FIFO_ADDR;
dev->queued_datagrams = 0;
INIT_LIST_HEAD(&dev->peer_list);
dev->card = card;
dev->netdev = net;
/*
* Use the RFC 2734 default 1500 octets or the maximum payload
* as initial MTU
*/
max_mtu = (1 << (card->max_receive + 1))
- sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE;
net->mtu = min(1500U, max_mtu);
/* Set our hardware address while we're at it */
put_unaligned_be64(card->guid, net->dev_addr);
put_unaligned_be64(~0ULL, net->broadcast);
ret = register_netdev(net);
if (ret) {
fw_error("Cannot register the driver\n");
goto out;
}
list_add_tail(&dev->dev_link, &fwnet_device_list);
fw_notify("%s: IPv4 over FireWire on device %016llx\n",
net->name, (unsigned long long)card->guid);
have_dev:
ret = fwnet_add_peer(dev, unit, device);
if (ret && allocated_netdev) {
unregister_netdev(net);
list_del(&dev->dev_link);
}
out:
if (ret && allocated_netdev)
free_netdev(net);
mutex_unlock(&fwnet_device_mutex);
return ret;
}
static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev)
{
struct fwnet_partial_datagram *pd, *pd_next;
spin_lock_irq(&dev->lock);
list_del(&peer->peer_link);
dev->peer_count--;
set_carrier_state(dev);
spin_unlock_irq(&dev->lock);
list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
fwnet_pd_delete(pd);
kfree(peer);
}
static int fwnet_remove(struct device *_dev)
{
struct fwnet_peer *peer = dev_get_drvdata(_dev);
struct fwnet_device *dev = peer->dev;
struct net_device *net;
int i;
mutex_lock(&fwnet_device_mutex);
net = dev->netdev;
if (net && peer->ip)
arp_invalidate(net, peer->ip);
fwnet_remove_peer(peer, dev);
if (list_empty(&dev->peer_list)) {
unregister_netdev(net);
if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
fw_core_remove_address_handler(&dev->handler);
if (dev->broadcast_rcv_context) {
fw_iso_context_stop(dev->broadcast_rcv_context);
fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer,
dev->card);
fw_iso_context_destroy(dev->broadcast_rcv_context);
}
for (i = 0; dev->queued_datagrams && i < 5; i++)
ssleep(1);
WARN_ON(dev->queued_datagrams);
list_del(&dev->dev_link);
free_netdev(net);
}
mutex_unlock(&fwnet_device_mutex);
return 0;
}
/*
* FIXME abort partially sent fragmented datagrams,
* discard partially received fragmented datagrams
*/
static void fwnet_update(struct fw_unit *unit)
{
struct fw_device *device = fw_parent_device(unit);
struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
int generation;
generation = device->generation;
spin_lock_irq(&peer->dev->lock);
peer->node_id = device->node_id;
peer->generation = generation;
spin_unlock_irq(&peer->dev->lock);
}
static const struct ieee1394_device_id fwnet_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION,
.specifier_id = IANA_SPECIFIER_ID,
.version = RFC2734_SW_VERSION,
},
{ }
};
static struct fw_driver fwnet_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "net",
.bus = &fw_bus_type,
.probe = fwnet_probe,
.remove = fwnet_remove,
},
.update = fwnet_update,
.id_table = fwnet_id_table,
};
static const u32 rfc2374_unit_directory_data[] = {
0x00040000, /* directory_length */
0x1200005e, /* unit_specifier_id: IANA */
0x81000003, /* textual descriptor offset */
0x13000001, /* unit_sw_version: RFC 2734 */
0x81000005, /* textual descriptor offset */
0x00030000, /* descriptor_length */
0x00000000, /* text */
0x00000000, /* minimal ASCII, en */
0x49414e41, /* I A N A */
0x00030000, /* descriptor_length */
0x00000000, /* text */
0x00000000, /* minimal ASCII, en */
0x49507634, /* I P v 4 */
};
static struct fw_descriptor rfc2374_unit_directory = {
.length = ARRAY_SIZE(rfc2374_unit_directory_data),
.key = (CSR_DIRECTORY | CSR_UNIT) << 24,
.data = rfc2374_unit_directory_data
};
static int __init fwnet_init(void)
{
int err;
err = fw_core_add_descriptor(&rfc2374_unit_directory);
if (err)
return err;
fwnet_packet_task_cache = kmem_cache_create("packet_task",
sizeof(struct fwnet_packet_task), 0, 0, NULL);
if (!fwnet_packet_task_cache) {
err = -ENOMEM;
goto out;
}
err = driver_register(&fwnet_driver.driver);
if (!err)
return 0;
kmem_cache_destroy(fwnet_packet_task_cache);
out:
fw_core_remove_descriptor(&rfc2374_unit_directory);
return err;
}
module_init(fwnet_init);
static void __exit fwnet_cleanup(void)
{
driver_unregister(&fwnet_driver.driver);
kmem_cache_destroy(fwnet_packet_task_cache);
fw_core_remove_descriptor(&rfc2374_unit_directory);
}
module_exit(fwnet_cleanup);
MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>");
MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
| gpl-2.0 |
IntelBUAP/Repo-Linux-RT | arch/powerpc/kernel/udbg_16550.c | 1789 | 6573 | /*
* udbg for NS16550 compatible serial ports
*
* Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <asm/udbg.h>
#include <asm/io.h>
#include <asm/reg_a2.h>
extern u8 real_readb(volatile u8 __iomem *addr);
extern void real_writeb(u8 data, volatile u8 __iomem *addr);
extern u8 real_205_readb(volatile u8 __iomem *addr);
extern void real_205_writeb(u8 data, volatile u8 __iomem *addr);
#define UART_RBR 0
#define UART_IER 1
#define UART_FCR 2
#define UART_LCR 3
#define UART_MCR 4
#define UART_LSR 5
#define UART_MSR 6
#define UART_SCR 7
#define UART_THR UART_RBR
#define UART_IIR UART_FCR
#define UART_DLL UART_RBR
#define UART_DLM UART_IER
#define UART_DLAB UART_LCR
#define LSR_DR 0x01 /* Data ready */
#define LSR_OE 0x02 /* Overrun */
#define LSR_PE 0x04 /* Parity error */
#define LSR_FE 0x08 /* Framing error */
#define LSR_BI 0x10 /* Break */
#define LSR_THRE 0x20 /* Xmit holding register empty */
#define LSR_TEMT 0x40 /* Xmitter empty */
#define LSR_ERR 0x80 /* Error */
#define LCR_DLAB 0x80
static u8 (*udbg_uart_in)(unsigned int reg);
static void (*udbg_uart_out)(unsigned int reg, u8 data);
static void udbg_uart_flush(void)
{
if (!udbg_uart_in)
return;
/* wait for idle */
while ((udbg_uart_in(UART_LSR) & LSR_THRE) == 0)
cpu_relax();
}
static void udbg_uart_putc(char c)
{
if (!udbg_uart_out)
return;
if (c == '\n')
udbg_uart_putc('\r');
udbg_uart_flush();
udbg_uart_out(UART_THR, c);
}
static int udbg_uart_getc_poll(void)
{
if (!udbg_uart_in)
return -1;
if (!(udbg_uart_in(UART_LSR) & LSR_DR))
return udbg_uart_in(UART_RBR);
return -1;
}
static int udbg_uart_getc(void)
{
if (!udbg_uart_in)
return -1;
/* wait for char */
while (!(udbg_uart_in(UART_LSR) & LSR_DR))
cpu_relax();
return udbg_uart_in(UART_RBR);
}
static void udbg_use_uart(void)
{
udbg_putc = udbg_uart_putc;
udbg_flush = udbg_uart_flush;
udbg_getc = udbg_uart_getc;
udbg_getc_poll = udbg_uart_getc_poll;
}
void udbg_uart_setup(unsigned int speed, unsigned int clock)
{
unsigned int dll, base_bauds;
if (!udbg_uart_out)
return;
if (clock == 0)
clock = 1843200;
if (speed == 0)
speed = 9600;
base_bauds = clock / 16;
dll = base_bauds / speed;
udbg_uart_out(UART_LCR, 0x00);
udbg_uart_out(UART_IER, 0xff);
udbg_uart_out(UART_IER, 0x00);
udbg_uart_out(UART_LCR, LCR_DLAB);
udbg_uart_out(UART_DLL, dll & 0xff);
udbg_uart_out(UART_DLM, dll >> 8);
/* 8 data, 1 stop, no parity */
udbg_uart_out(UART_LCR, 0x3);
/* RTS/DTR */
udbg_uart_out(UART_MCR, 0x3);
/* Clear & enable FIFOs */
udbg_uart_out(UART_FCR, 0x7);
}
unsigned int udbg_probe_uart_speed(unsigned int clock)
{
unsigned int dll, dlm, divisor, prescaler, speed;
u8 old_lcr;
old_lcr = udbg_uart_in(UART_LCR);
/* select divisor latch registers. */
udbg_uart_out(UART_LCR, old_lcr | LCR_DLAB);
/* now, read the divisor */
dll = udbg_uart_in(UART_DLL);
dlm = udbg_uart_in(UART_DLM);
divisor = dlm << 8 | dll;
/* check prescaling */
if (udbg_uart_in(UART_MCR) & 0x80)
prescaler = 4;
else
prescaler = 1;
/* restore the LCR */
udbg_uart_out(UART_LCR, old_lcr);
/* calculate speed */
speed = (clock / prescaler) / (divisor * 16);
/* sanity check */
if (speed > (clock / 16))
speed = 9600;
return speed;
}
static union {
unsigned char __iomem *mmio_base;
unsigned long pio_base;
} udbg_uart;
static unsigned int udbg_uart_stride = 1;
static u8 udbg_uart_in_pio(unsigned int reg)
{
return inb(udbg_uart.pio_base + (reg * udbg_uart_stride));
}
static void udbg_uart_out_pio(unsigned int reg, u8 data)
{
outb(data, udbg_uart.pio_base + (reg * udbg_uart_stride));
}
void udbg_uart_init_pio(unsigned long port, unsigned int stride)
{
if (!port)
return;
udbg_uart.pio_base = port;
udbg_uart_stride = stride;
udbg_uart_in = udbg_uart_in_pio;
udbg_uart_out = udbg_uart_out_pio;
udbg_use_uart();
}
static u8 udbg_uart_in_mmio(unsigned int reg)
{
return in_8(udbg_uart.mmio_base + (reg * udbg_uart_stride));
}
static void udbg_uart_out_mmio(unsigned int reg, u8 data)
{
out_8(udbg_uart.mmio_base + (reg * udbg_uart_stride), data);
}
void udbg_uart_init_mmio(void __iomem *addr, unsigned int stride)
{
if (!addr)
return;
udbg_uart.mmio_base = addr;
udbg_uart_stride = stride;
udbg_uart_in = udbg_uart_in_mmio;
udbg_uart_out = udbg_uart_out_mmio;
udbg_use_uart();
}
#ifdef CONFIG_PPC_MAPLE
#define UDBG_UART_MAPLE_ADDR ((void __iomem *)0xf40003f8)
static u8 udbg_uart_in_maple(unsigned int reg)
{
return real_readb(UDBG_UART_MAPLE_ADDR + reg);
}
static void udbg_uart_out_maple(unsigned int reg, u8 val)
{
real_writeb(val, UDBG_UART_MAPLE_ADDR + reg);
}
void __init udbg_init_maple_realmode(void)
{
udbg_uart_in = udbg_uart_in_maple;
udbg_uart_out = udbg_uart_out_maple;
udbg_use_uart();
}
#endif /* CONFIG_PPC_MAPLE */
#ifdef CONFIG_PPC_PASEMI
#define UDBG_UART_PAS_ADDR ((void __iomem *)0xfcff03f8UL)
static u8 udbg_uart_in_pas(unsigned int reg)
{
return real_205_readb(UDBG_UART_PAS_ADDR + reg);
}
static void udbg_uart_out_pas(unsigned int reg, u8 val)
{
real_205_writeb(val, UDBG_UART_PAS_ADDR + reg);
}
void __init udbg_init_pas_realmode(void)
{
udbg_uart_in = udbg_uart_in_pas;
udbg_uart_out = udbg_uart_out_pas;
udbg_use_uart();
}
#endif /* CONFIG_PPC_PASEMI */
#ifdef CONFIG_PPC_EARLY_DEBUG_44x
#include <platforms/44x/44x.h>
static u8 udbg_uart_in_44x_as1(unsigned int reg)
{
return as1_readb((void __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR + reg);
}
static void udbg_uart_out_44x_as1(unsigned int reg, u8 val)
{
as1_writeb(val, (void __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR + reg);
}
void __init udbg_init_44x_as1(void)
{
udbg_uart_in = udbg_uart_in_44x_as1;
udbg_uart_out = udbg_uart_out_44x_as1;
udbg_use_uart();
}
#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
#ifdef CONFIG_PPC_EARLY_DEBUG_40x
static u8 udbg_uart_in_40x(unsigned int reg)
{
return real_readb((void __iomem *)CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR
+ reg);
}
static void udbg_uart_out_40x(unsigned int reg, u8 val)
{
real_writeb(val, (void __iomem *)CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR
+ reg);
}
void __init udbg_init_40x_realmode(void)
{
udbg_uart_in = udbg_uart_in_40x;
udbg_uart_out = udbg_uart_out_40x;
udbg_use_uart();
}
#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
| gpl-2.0 |
Ca1ne/Enoch | drivers/acpi/processor_driver.c | 2045 | 22112 | /*
* acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
*
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
* Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
* - Added processor hotplug support
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* TBD:
* 1. Make # power states dynamic.
* 2. Support duty_cycle values that span bit 4.
* 3. Optimize by having scheduler determine business instead of
* having us try to calculate it here.
* 4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/cpufreq.h>
#include <linux/cpu.h>
#include <linux/dmi.h>
#include <linux/moduleparam.h>
#include <linux/cpuidle.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/cpu.h>
#include <asm/delay.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
#define PREFIX "ACPI: "
#define ACPI_PROCESSOR_CLASS "processor"
#define ACPI_PROCESSOR_DEVICE_NAME "Processor"
#define ACPI_PROCESSOR_FILE_INFO "info"
#define ACPI_PROCESSOR_FILE_THROTTLING "throttling"
#define ACPI_PROCESSOR_FILE_LIMIT "limit"
#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
#define ACPI_PROCESSOR_NOTIFY_POWER 0x81
#define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
#define ACPI_PROCESSOR_LIMIT_USER 0
#define ACPI_PROCESSOR_LIMIT_THERMAL 1
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_driver");
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Processor Driver");
MODULE_LICENSE("GPL");
static int acpi_processor_add(struct acpi_device *device);
static int acpi_processor_remove(struct acpi_device *device, int type);
static void acpi_processor_notify(struct acpi_device *device, u32 event);
static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
static int acpi_processor_handle_eject(struct acpi_processor *pr);
static const struct acpi_device_id processor_device_ids[] = {
{ACPI_PROCESSOR_OBJECT_HID, 0},
{"ACPI0007", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, processor_device_ids);
static struct acpi_driver acpi_processor_driver = {
.name = "processor",
.class = ACPI_PROCESSOR_CLASS,
.ids = processor_device_ids,
.ops = {
.add = acpi_processor_add,
.remove = acpi_processor_remove,
.suspend = acpi_processor_suspend,
.resume = acpi_processor_resume,
.notify = acpi_processor_notify,
},
};
#define INSTALL_NOTIFY_HANDLER 1
#define UNINSTALL_NOTIFY_HANDLER 2
DEFINE_PER_CPU(struct acpi_processor *, processors);
EXPORT_PER_CPU_SYMBOL(processors);
struct acpi_processor_errata errata __read_mostly;
/* --------------------------------------------------------------------------
Errata Handling
-------------------------------------------------------------------------- */
static int acpi_processor_errata_piix4(struct pci_dev *dev)
{
u8 value1 = 0;
u8 value2 = 0;
if (!dev)
return -EINVAL;
/*
* Note that 'dev' references the PIIX4 ACPI Controller.
*/
switch (dev->revision) {
case 0:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
break;
case 1:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
break;
case 2:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
break;
case 3:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
break;
}
switch (dev->revision) {
case 0: /* PIIX4 A-step */
case 1: /* PIIX4 B-step */
/*
* See specification changes #13 ("Manual Throttle Duty Cycle")
* and #14 ("Enabling and Disabling Manual Throttle"), plus
* erratum #5 ("STPCLK# Deassertion Time") from the January
* 2002 PIIX4 specification update. Applies to only older
* PIIX4 models.
*/
errata.piix4.throttle = 1;
case 2: /* PIIX4E */
case 3: /* PIIX4M */
/*
* See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
* Livelock") from the January 2002 PIIX4 specification update.
* Applies to all PIIX4 models.
*/
/*
* BM-IDE
* ------
* Find the PIIX4 IDE Controller and get the Bus Master IDE
* Status register address. We'll use this later to read
* each IDE controller's DMA status to make sure we catch all
* DMA activity.
*/
dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB,
PCI_ANY_ID, PCI_ANY_ID, NULL);
if (dev) {
errata.piix4.bmisx = pci_resource_start(dev, 4);
pci_dev_put(dev);
}
/*
* Type-F DMA
* ----------
* Find the PIIX4 ISA Controller and read the Motherboard
* DMA controller's status to see if Type-F (Fast) DMA mode
* is enabled (bit 7) on either channel. Note that we'll
* disable C3 support if this is enabled, as some legacy
* devices won't operate well if fast DMA is disabled.
*/
dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_0,
PCI_ANY_ID, PCI_ANY_ID, NULL);
if (dev) {
pci_read_config_byte(dev, 0x76, &value1);
pci_read_config_byte(dev, 0x77, &value2);
if ((value1 & 0x80) || (value2 & 0x80))
errata.piix4.fdma = 1;
pci_dev_put(dev);
}
break;
}
if (errata.piix4.bmisx)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Bus master activity detection (BM-IDE) erratum enabled\n"));
if (errata.piix4.fdma)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Type-F DMA livelock erratum (C3 disabled)\n"));
return 0;
}
static int acpi_processor_errata(struct acpi_processor *pr)
{
int result = 0;
struct pci_dev *dev = NULL;
if (!pr)
return -EINVAL;
/*
* PIIX4
*/
dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
PCI_ANY_ID, NULL);
if (dev) {
result = acpi_processor_errata_piix4(dev);
pci_dev_put(dev);
}
return result;
}
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
static int acpi_processor_get_info(struct acpi_device *device)
{
acpi_status status = 0;
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr;
int cpu_index, device_declaration = 0;
static int cpu0_initialized;
pr = acpi_driver_data(device);
if (!pr)
return -EINVAL;
if (num_online_cpus() > 1)
errata.smp = TRUE;
acpi_processor_errata(pr);
/*
* Check to see if we have bus mastering arbitration control. This
* is required for proper C3 usage (to maintain cache coherency).
*/
if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
pr->flags.bm_control = 1;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Bus mastering arbitration control present\n"));
} else
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"No bus mastering arbitration control\n"));
if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
/* Declared with "Processor" statement; match ProcessorID */
status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Evaluating processor object\n");
return -ENODEV;
}
/*
* TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
* >>> 'acpi_get_processor_id(acpi_id, &id)' in
* arch/xxx/acpi.c
*/
pr->acpi_id = object.processor.proc_id;
} else {
/*
* Declared with "Device" statement; match _UID.
* Note that we don't handle string _UIDs yet.
*/
unsigned long long value;
status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
NULL, &value);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
"Evaluating processor _UID [%#x]\n", status);
return -ENODEV;
}
device_declaration = 1;
pr->acpi_id = value;
}
cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id);
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
if (!cpu0_initialized && (cpu_index == -1) &&
(num_online_cpus() == 1)) {
cpu_index = 0;
}
cpu0_initialized = 1;
pr->id = cpu_index;
/*
* Extra Processor objects may be enumerated on MP systems with
* less than the max # of CPUs. They should be ignored _iff
* they are physically not present.
*/
if (pr->id == -1) {
if (ACPI_FAILURE
(acpi_processor_hotadd_init(pr->handle, &pr->id))) {
return -ENODEV;
}
}
/*
* On some boxes several processors use the same processor bus id.
* But they are located in different scope. For example:
* \_SB.SCK0.CPU0
* \_SB.SCK1.CPU0
* Rename the processor device bus id. And the new bus id will be
* generated as the following format:
* CPU+CPU ID.
*/
sprintf(acpi_device_bid(device), "CPU%X", pr->id);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
pr->acpi_id));
if (!object.processor.pblk_address)
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
else if (object.processor.pblk_length != 6)
printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
object.processor.pblk_length);
else {
pr->throttling.address = object.processor.pblk_address;
pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
pr->pblk = object.processor.pblk_address;
/*
* We don't care about error returns - we just try to mark
* these reserved so that nobody else is confused into thinking
* that this region might be unused..
*
* (In particular, allocating the IO range for Cardbus)
*/
request_region(pr->throttling.address, 6, "ACPI CPU throttle");
}
/*
* If ACPI describes a slot number for this CPU, we can use it
* ensure we get the right value in the "physical id" field
* of /proc/cpuinfo
*/
status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
if (ACPI_SUCCESS(status))
arch_fix_phys_package_id(pr->id, object.integer.value);
return 0;
}
static DEFINE_PER_CPU(void *, processor_device_array);
static void acpi_processor_notify(struct acpi_device *device, u32 event)
{
struct acpi_processor *pr = acpi_driver_data(device);
int saved;
if (!pr)
return;
switch (event) {
case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
saved = pr->performance_platform_limit;
acpi_processor_ppc_has_changed(pr, 1);
if (saved == pr->performance_platform_limit)
break;
acpi_bus_generate_proc_event(device, event,
pr->performance_platform_limit);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
pr->performance_platform_limit);
break;
case ACPI_PROCESSOR_NOTIFY_POWER:
acpi_processor_cst_has_changed(pr);
acpi_bus_generate_proc_event(device, event, 0);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
case ACPI_PROCESSOR_NOTIFY_THROTTLING:
acpi_processor_tstate_has_changed(pr);
acpi_bus_generate_proc_event(device, event, 0);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
break;
}
return;
}
static int acpi_cpu_soft_notify(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct acpi_processor *pr = per_cpu(processors, cpu);
if (action == CPU_ONLINE && pr) {
acpi_processor_ppc_has_changed(pr, 0);
acpi_processor_cst_has_changed(pr);
acpi_processor_reevaluate_tstate(pr, action);
acpi_processor_tstate_has_changed(pr);
}
if (action == CPU_DEAD && pr) {
/* invalidate the flag.throttling after one CPU is offline */
acpi_processor_reevaluate_tstate(pr, action);
}
return NOTIFY_OK;
}
static struct notifier_block acpi_cpu_notifier =
{
.notifier_call = acpi_cpu_soft_notify,
};
static int __cpuinit acpi_processor_add(struct acpi_device *device)
{
struct acpi_processor *pr = NULL;
int result = 0;
struct sys_device *sysdev;
pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
if (!pr)
return -ENOMEM;
if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
kfree(pr);
return -ENOMEM;
}
pr->handle = device->handle;
strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
device->driver_data = pr;
result = acpi_processor_get_info(device);
if (result) {
/* Processor is physically not present */
return 0;
}
#ifdef CONFIG_SMP
if (pr->id >= setup_max_cpus && pr->id != 0)
return 0;
#endif
BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
/*
* Buggy BIOS check
* ACPI id of processors can be reported wrongly by the BIOS.
* Don't trust it blindly
*/
if (per_cpu(processor_device_array, pr->id) != NULL &&
per_cpu(processor_device_array, pr->id) != device) {
printk(KERN_WARNING "BIOS reported wrong ACPI id "
"for the processor\n");
result = -ENODEV;
goto err_free_cpumask;
}
per_cpu(processor_device_array, pr->id) = device;
per_cpu(processors, pr->id) = pr;
sysdev = get_cpu_sysdev(pr->id);
if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
result = -EFAULT;
goto err_free_cpumask;
}
#ifdef CONFIG_CPU_FREQ
acpi_processor_ppc_has_changed(pr, 0);
#endif
acpi_processor_get_throttling_info(pr);
acpi_processor_get_limit_info(pr);
if (cpuidle_get_driver() == &acpi_idle_driver)
acpi_processor_power_init(pr, device);
pr->cdev = thermal_cooling_device_register("Processor", device,
&processor_cooling_ops);
if (IS_ERR(pr->cdev)) {
result = PTR_ERR(pr->cdev);
goto err_power_exit;
}
dev_dbg(&device->dev, "registered as cooling_device%d\n",
pr->cdev->id);
result = sysfs_create_link(&device->dev.kobj,
&pr->cdev->device.kobj,
"thermal_cooling");
if (result) {
printk(KERN_ERR PREFIX "Create sysfs link\n");
goto err_thermal_unregister;
}
result = sysfs_create_link(&pr->cdev->device.kobj,
&device->dev.kobj,
"device");
if (result) {
printk(KERN_ERR PREFIX "Create sysfs link\n");
goto err_remove_sysfs;
}
return 0;
err_remove_sysfs:
sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
err_thermal_unregister:
thermal_cooling_device_unregister(pr->cdev);
err_power_exit:
acpi_processor_power_exit(pr, device);
err_free_cpumask:
free_cpumask_var(pr->throttling.shared_cpu_map);
return result;
}
static int acpi_processor_remove(struct acpi_device *device, int type)
{
struct acpi_processor *pr = NULL;
if (!device || !acpi_driver_data(device))
return -EINVAL;
pr = acpi_driver_data(device);
if (pr->id >= nr_cpu_ids)
goto free;
if (type == ACPI_BUS_REMOVAL_EJECT) {
if (acpi_processor_handle_eject(pr))
return -EINVAL;
}
acpi_processor_power_exit(pr, device);
sysfs_remove_link(&device->dev.kobj, "sysdev");
if (pr->cdev) {
sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
sysfs_remove_link(&pr->cdev->device.kobj, "device");
thermal_cooling_device_unregister(pr->cdev);
pr->cdev = NULL;
}
per_cpu(processors, pr->id) = NULL;
per_cpu(processor_device_array, pr->id) = NULL;
free:
free_cpumask_var(pr->throttling.shared_cpu_map);
kfree(pr);
return 0;
}
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/****************************************************************************
* Acpi processor hotplug support *
****************************************************************************/
static int is_processor_present(acpi_handle handle)
{
acpi_status status;
unsigned long long sta = 0;
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
return 1;
/*
* _STA is mandatory for a processor that supports hot plug
*/
if (status == AE_NOT_FOUND)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Processor does not support hot plug\n"));
else
ACPI_EXCEPTION((AE_INFO, status,
"Processor Device is not present"));
return 0;
}
static
int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
{
acpi_handle phandle;
struct acpi_device *pdev;
if (acpi_get_parent(handle, &phandle)) {
return -ENODEV;
}
if (acpi_bus_get_device(phandle, &pdev)) {
return -ENODEV;
}
if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
return -ENODEV;
}
return 0;
}
static void acpi_processor_hotplug_notify(acpi_handle handle,
u32 event, void *data)
{
struct acpi_processor *pr;
struct acpi_device *device = NULL;
int result;
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Processor driver received %s event\n",
(event == ACPI_NOTIFY_BUS_CHECK) ?
"ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
if (!is_processor_present(handle))
break;
if (acpi_bus_get_device(handle, &device)) {
result = acpi_processor_device_add(handle, &device);
if (result)
printk(KERN_ERR PREFIX
"Unable to add the device\n");
break;
}
break;
case ACPI_NOTIFY_EJECT_REQUEST:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"received ACPI_NOTIFY_EJECT_REQUEST\n"));
if (acpi_bus_get_device(handle, &device)) {
printk(KERN_ERR PREFIX
"Device don't exist, dropping EJECT\n");
break;
}
pr = acpi_driver_data(device);
if (!pr) {
printk(KERN_ERR PREFIX
"Driver data is NULL, dropping EJECT\n");
return;
}
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
break;
}
return;
}
static acpi_status
processor_walk_namespace_cb(acpi_handle handle,
u32 lvl, void *context, void **rv)
{
acpi_status status;
int *action = context;
acpi_object_type type = 0;
status = acpi_get_type(handle, &type);
if (ACPI_FAILURE(status))
return (AE_OK);
if (type != ACPI_TYPE_PROCESSOR)
return (AE_OK);
switch (*action) {
case INSTALL_NOTIFY_HANDLER:
acpi_install_notify_handler(handle,
ACPI_SYSTEM_NOTIFY,
acpi_processor_hotplug_notify,
NULL);
break;
case UNINSTALL_NOTIFY_HANDLER:
acpi_remove_notify_handler(handle,
ACPI_SYSTEM_NOTIFY,
acpi_processor_hotplug_notify);
break;
default:
break;
}
return (AE_OK);
}
static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
{
if (!is_processor_present(handle)) {
return AE_ERROR;
}
if (acpi_map_lsapic(handle, p_cpu))
return AE_ERROR;
if (arch_register_cpu(*p_cpu)) {
acpi_unmap_lsapic(*p_cpu);
return AE_ERROR;
}
return AE_OK;
}
static int acpi_processor_handle_eject(struct acpi_processor *pr)
{
if (cpu_online(pr->id))
cpu_down(pr->id);
arch_unregister_cpu(pr->id);
acpi_unmap_lsapic(pr->id);
return (0);
}
#else
static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
{
return AE_ERROR;
}
static int acpi_processor_handle_eject(struct acpi_processor *pr)
{
return (-EINVAL);
}
#endif
static
void acpi_processor_install_hotplug_notify(void)
{
#ifdef CONFIG_ACPI_HOTPLUG_CPU
int action = INSTALL_NOTIFY_HANDLER;
acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
processor_walk_namespace_cb, NULL, &action, NULL);
#endif
register_hotcpu_notifier(&acpi_cpu_notifier);
}
static
void acpi_processor_uninstall_hotplug_notify(void)
{
#ifdef CONFIG_ACPI_HOTPLUG_CPU
int action = UNINSTALL_NOTIFY_HANDLER;
acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
processor_walk_namespace_cb, NULL, &action, NULL);
#endif
unregister_hotcpu_notifier(&acpi_cpu_notifier);
}
/*
* We keep the driver loaded even when ACPI is not running.
* This is needed for the powernow-k8 driver, that works even without
* ACPI, but needs symbols from this driver
*/
static int __init acpi_processor_init(void)
{
int result = 0;
if (acpi_disabled)
return 0;
memset(&errata, 0, sizeof(errata));
if (!cpuidle_register_driver(&acpi_idle_driver)) {
printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
acpi_idle_driver.name);
} else {
printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
cpuidle_get_driver()->name);
}
result = acpi_bus_register_driver(&acpi_processor_driver);
if (result < 0)
goto out_cpuidle;
acpi_processor_install_hotplug_notify();
acpi_thermal_cpufreq_init();
acpi_processor_ppc_init();
acpi_processor_throttling_init();
return 0;
out_cpuidle:
cpuidle_unregister_driver(&acpi_idle_driver);
return result;
}
static void __exit acpi_processor_exit(void)
{
if (acpi_disabled)
return;
acpi_processor_ppc_exit();
acpi_thermal_cpufreq_exit();
acpi_processor_uninstall_hotplug_notify();
acpi_bus_unregister_driver(&acpi_processor_driver);
cpuidle_unregister_driver(&acpi_idle_driver);
return;
}
module_init(acpi_processor_init);
module_exit(acpi_processor_exit);
MODULE_ALIAS("processor");
| gpl-2.0 |
MoKee/android_kernel_cyanogen_msm8916-amss | arch/arm/mach-shmobile/board-kzm9g-reference.c | 2045 | 3582 | /*
* KZM-A9-GT board support - Reference Device Tree Implementation
*
* Copyright (C) 2012 Horms Solutions Ltd.
*
* Based on board-kzm9g.c
* Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/input.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <mach/sh73a0.h>
#include <mach/common.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
static unsigned long pin_pullup_conf[] = {
PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 0),
};
static const struct pinctrl_map kzm_pinctrl_map[] = {
PIN_MAP_MUX_GROUP_DEFAULT("e6826000.i2c", "pfc-sh73a0",
"i2c3_1", "i2c3"),
/* MMCIF */
PIN_MAP_MUX_GROUP_DEFAULT("e6bd0000.mmcif", "pfc-sh73a0",
"mmc0_data8_0", "mmc0"),
PIN_MAP_MUX_GROUP_DEFAULT("e6bd0000.mmcif", "pfc-sh73a0",
"mmc0_ctrl_0", "mmc0"),
PIN_MAP_CONFIGS_PIN_DEFAULT("e6bd0000.mmcif", "pfc-sh73a0",
"PORT279", pin_pullup_conf),
PIN_MAP_CONFIGS_GROUP_DEFAULT("e6bd0000.mmcif", "pfc-sh73a0",
"mmc0_data8_0", pin_pullup_conf),
/* SCIFA4 */
PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-sh73a0",
"scifa4_data", "scifa4"),
PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-sh73a0",
"scifa4_ctrl", "scifa4"),
/* SDHI0 */
PIN_MAP_MUX_GROUP_DEFAULT("ee100000.sdhi", "pfc-sh73a0",
"sdhi0_data4", "sdhi0"),
PIN_MAP_MUX_GROUP_DEFAULT("ee100000.sdhi", "pfc-sh73a0",
"sdhi0_ctrl", "sdhi0"),
PIN_MAP_MUX_GROUP_DEFAULT("ee100000.sdhi", "pfc-sh73a0",
"sdhi0_cd", "sdhi0"),
PIN_MAP_MUX_GROUP_DEFAULT("ee100000.sdhi", "pfc-sh73a0",
"sdhi0_wp", "sdhi0"),
/* SDHI2 */
PIN_MAP_MUX_GROUP_DEFAULT("ee140000.sdhi", "pfc-sh73a0",
"sdhi2_data4", "sdhi2"),
PIN_MAP_MUX_GROUP_DEFAULT("ee140000.sdhi", "pfc-sh73a0",
"sdhi2_ctrl", "sdhi2"),
};
static void __init kzm_init(void)
{
sh73a0_add_standard_devices_dt();
pinctrl_register_mappings(kzm_pinctrl_map, ARRAY_SIZE(kzm_pinctrl_map));
sh73a0_pinmux_init();
/* enable SD */
gpio_request(GPIO_FN_SDHI0_VCCQ_MC0_ON, NULL);
gpio_request_one(15, GPIOF_OUT_INIT_HIGH, NULL); /* power */
gpio_request_one(14, GPIOF_OUT_INIT_HIGH, NULL); /* power */
#ifdef CONFIG_CACHE_L2X0
/* Early BRESP enable, Shared attribute override enable, 64K*8way */
l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
#endif
}
static const char *kzm9g_boards_compat_dt[] __initdata = {
"renesas,kzm9g-reference",
NULL,
};
DT_MACHINE_START(KZM9G_DT, "kzm9g-reference")
.smp = smp_ops(sh73a0_smp_ops),
.map_io = sh73a0_map_io,
.init_early = sh73a0_init_delay,
.nr_irqs = NR_IRQS_LEGACY,
.init_irq = irqchip_init,
.init_machine = kzm_init,
.init_time = shmobile_timer_init,
.dt_compat = kzm9g_boards_compat_dt,
MACHINE_END
| gpl-2.0 |
hurrian/kernel_samsung_trelte | arch/tile/gxio/usb_host.c | 2301 | 2064 | /*
* Copyright 2012 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/*
*
* Implementation of USB gxio calls.
*/
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <gxio/iorpc_globals.h>
#include <gxio/iorpc_usb_host.h>
#include <gxio/kiorpc.h>
#include <gxio/usb_host.h>
int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index,
int is_ehci)
{
char file[32];
int fd;
if (is_ehci)
snprintf(file, sizeof(file), "usb_host/%d/iorpc/ehci",
usb_index);
else
snprintf(file, sizeof(file), "usb_host/%d/iorpc/ohci",
usb_index);
fd = hv_dev_open((HV_VirtAddr) file, 0);
if (fd < 0) {
if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
return fd;
else
return -ENODEV;
}
context->fd = fd;
// Map in the MMIO space.
context->mmio_base =
(void __force *)iorpc_ioremap(fd, 0, HV_USB_HOST_MMIO_SIZE);
if (context->mmio_base == NULL) {
hv_dev_close(context->fd);
return -ENODEV;
}
return 0;
}
EXPORT_SYMBOL_GPL(gxio_usb_host_init);
int gxio_usb_host_destroy(gxio_usb_host_context_t * context)
{
iounmap((void __force __iomem *)(context->mmio_base));
hv_dev_close(context->fd);
context->mmio_base = NULL;
context->fd = -1;
return 0;
}
EXPORT_SYMBOL_GPL(gxio_usb_host_destroy);
void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context)
{
return context->mmio_base;
}
EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_start);
size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context)
{
return HV_USB_HOST_MMIO_SIZE;
}
EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_len);
| gpl-2.0 |
omerjerk/android_kernel_xiaomi_ferrari | arch/powerpc/sysdev/xics/xics-common.c | 2301 | 10315 | /*
* Copyright 2011 IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/types.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/debugfs.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/rtas.h>
#include <asm/xics.h>
#include <asm/firmware.h>
/* Globals common to all ICP/ICS implementations */
const struct icp_ops *icp_ops;
unsigned int xics_default_server = 0xff;
unsigned int xics_default_distrib_server = 0;
unsigned int xics_interrupt_server_size = 8;
DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
struct irq_domain *xics_host;
static LIST_HEAD(ics_list);
void xics_update_irq_servers(void)
{
int i, j;
struct device_node *np;
u32 ilen;
const u32 *ireg;
u32 hcpuid;
/* Find the server numbers for the boot cpu. */
np = of_get_cpu_node(boot_cpuid, NULL);
BUG_ON(!np);
hcpuid = get_hard_smp_processor_id(boot_cpuid);
xics_default_server = xics_default_distrib_server = hcpuid;
pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server);
ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
if (!ireg) {
of_node_put(np);
return;
}
i = ilen / sizeof(int);
/* Global interrupt distribution server is specified in the last
* entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
* entry fom this property for current boot cpu id and use it as
* default distribution server
*/
for (j = 0; j < i; j += 2) {
if (ireg[j] == hcpuid) {
xics_default_distrib_server = ireg[j+1];
break;
}
}
pr_devel("xics: xics_default_distrib_server = 0x%x\n",
xics_default_distrib_server);
of_node_put(np);
}
/* GIQ stuff, currently only supported on RTAS setups, will have
* to be sorted properly for bare metal
*/
void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
{
#ifdef CONFIG_PPC_RTAS
int index;
int status;
if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
return;
index = (1UL << xics_interrupt_server_size) - 1 - gserver;
status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
GLOBAL_INTERRUPT_QUEUE, index, join, status);
#endif
}
void xics_setup_cpu(void)
{
icp_ops->set_priority(LOWEST_PRIORITY);
xics_set_cpu_giq(xics_default_distrib_server, 1);
}
void xics_mask_unknown_vec(unsigned int vec)
{
struct ics *ics;
pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec);
list_for_each_entry(ics, &ics_list, link)
ics->mask_unknown(ics, vec);
}
#ifdef CONFIG_SMP
static void xics_request_ipi(void)
{
unsigned int ipi;
ipi = irq_create_mapping(xics_host, XICS_IPI);
BUG_ON(ipi == NO_IRQ);
/*
* IPIs are marked IRQF_PERCPU. The handler was set in map.
*/
BUG_ON(request_irq(ipi, icp_ops->ipi_action,
IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
}
int __init xics_smp_probe(void)
{
/* Setup cause_ipi callback based on which ICP is used */
smp_ops->cause_ipi = icp_ops->cause_ipi;
/* Register all the IPIs */
xics_request_ipi();
return cpumask_weight(cpu_possible_mask);
}
#endif /* CONFIG_SMP */
void xics_teardown_cpu(void)
{
struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
/*
* we have to reset the cppr index to 0 because we're
* not going to return from the IPI
*/
os_cppr->index = 0;
icp_ops->set_priority(0);
icp_ops->teardown_cpu();
}
void xics_kexec_teardown_cpu(int secondary)
{
xics_teardown_cpu();
icp_ops->flush_ipi();
/*
* Some machines need to have at least one cpu in the GIQ,
* so leave the master cpu in the group.
*/
if (secondary)
xics_set_cpu_giq(xics_default_distrib_server, 0);
}
#ifdef CONFIG_HOTPLUG_CPU
/* Interrupts are disabled. */
void xics_migrate_irqs_away(void)
{
int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
unsigned int irq, virq;
struct irq_desc *desc;
/* If we used to be the default server, move to the new "boot_cpuid" */
if (hw_cpu == xics_default_server)
xics_update_irq_servers();
/* Reject any interrupt that was queued to us... */
icp_ops->set_priority(0);
/* Remove ourselves from the global interrupt queue */
xics_set_cpu_giq(xics_default_distrib_server, 0);
/* Allow IPIs again... */
icp_ops->set_priority(DEFAULT_PRIORITY);
for_each_irq_desc(virq, desc) {
struct irq_chip *chip;
long server;
unsigned long flags;
struct ics *ics;
/* We can't set affinity on ISA interrupts */
if (virq < NUM_ISA_INTERRUPTS)
continue;
/* We only need to migrate enabled IRQS */
if (!desc->action)
continue;
if (desc->irq_data.domain != xics_host)
continue;
irq = desc->irq_data.hwirq;
/* We need to get IPIs still. */
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
continue;
chip = irq_desc_get_chip(desc);
if (!chip || !chip->irq_set_affinity)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
/* Locate interrupt server */
server = -1;
ics = irq_get_chip_data(virq);
if (ics)
server = ics->get_server(ics, irq);
if (server < 0) {
printk(KERN_ERR "%s: Can't find server for irq %d\n",
__func__, irq);
goto unlock;
}
/* We only support delivery to all cpus or to one cpu.
* The irq has to be migrated only in the single cpu
* case.
*/
if (server != hw_cpu)
goto unlock;
/* This is expected during cpu offline. */
if (cpu_online(cpu))
pr_warning("IRQ %u affinity broken off cpu %u\n",
virq, cpu);
/* Reset affinity to all cpus */
raw_spin_unlock_irqrestore(&desc->lock, flags);
irq_set_affinity(virq, cpu_all_mask);
continue;
unlock:
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_SMP
/*
* For the moment we only implement delivery to all cpus or one cpu.
*
* If the requested affinity is cpu_all_mask, we set global affinity.
* If not we set it to the first cpu in the mask, even if multiple cpus
* are set. This is so things like irqbalance (which set core and package
* wide affinities) do the right thing.
*
* We need to fix this to implement support for the links
*/
int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
unsigned int strict_check)
{
if (!distribute_irqs)
return xics_default_server;
if (!cpumask_subset(cpu_possible_mask, cpumask)) {
int server = cpumask_first_and(cpu_online_mask, cpumask);
if (server < nr_cpu_ids)
return get_hard_smp_processor_id(server);
if (strict_check)
return -1;
}
/*
* Workaround issue with some versions of JS20 firmware that
* deliver interrupts to cpus which haven't been started. This
* happens when using the maxcpus= boot option.
*/
if (cpumask_equal(cpu_online_mask, cpu_present_mask))
return xics_default_distrib_server;
return xics_default_server;
}
#endif /* CONFIG_SMP */
static int xics_host_match(struct irq_domain *h, struct device_node *node)
{
struct ics *ics;
list_for_each_entry(ics, &ics_list, link)
if (ics->host_match(ics, node))
return 1;
return 0;
}
/* Dummies */
static void xics_ipi_unmask(struct irq_data *d) { }
static void xics_ipi_mask(struct irq_data *d) { }
static struct irq_chip xics_ipi_chip = {
.name = "XICS",
.irq_eoi = NULL, /* Patched at init time */
.irq_mask = xics_ipi_mask,
.irq_unmask = xics_ipi_unmask,
};
static int xics_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct ics *ics;
pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
/* They aren't all level sensitive but we just don't really know */
irq_set_status_flags(virq, IRQ_LEVEL);
/* Don't call into ICS for IPIs */
if (hw == XICS_IPI) {
irq_set_chip_and_handler(virq, &xics_ipi_chip,
handle_percpu_irq);
return 0;
}
/* Let the ICS setup the chip data */
list_for_each_entry(ics, &ics_list, link)
if (ics->map(ics, virq) == 0)
return 0;
return -EINVAL;
}
static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
/* Current xics implementation translates everything
* to level. It is not technically right for MSIs but this
* is irrelevant at this point. We might get smarter in the future
*/
*out_hwirq = intspec[0];
*out_flags = IRQ_TYPE_LEVEL_LOW;
return 0;
}
static struct irq_domain_ops xics_host_ops = {
.match = xics_host_match,
.map = xics_host_map,
.xlate = xics_host_xlate,
};
static void __init xics_init_host(void)
{
xics_host = irq_domain_add_tree(NULL, &xics_host_ops, NULL);
BUG_ON(xics_host == NULL);
irq_set_default_host(xics_host);
}
void __init xics_register_ics(struct ics *ics)
{
list_add(&ics->link, &ics_list);
}
static void __init xics_get_server_size(void)
{
struct device_node *np;
const u32 *isize;
/* We fetch the interrupt server size from the first ICS node
* we find if any
*/
np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics");
if (!np)
return;
isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
if (!isize)
return;
xics_interrupt_server_size = *isize;
of_node_put(np);
}
void __init xics_init(void)
{
int rc = -1;
/* Fist locate ICP */
if (firmware_has_feature(FW_FEATURE_LPAR))
rc = icp_hv_init();
if (rc < 0)
rc = icp_native_init();
if (rc < 0) {
pr_warning("XICS: Cannot find a Presentation Controller !\n");
return;
}
/* Copy get_irq callback over to ppc_md */
ppc_md.get_irq = icp_ops->get_irq;
/* Patch up IPI chip EOI */
xics_ipi_chip.irq_eoi = icp_ops->eoi;
/* Now locate ICS */
rc = ics_rtas_init();
if (rc < 0)
rc = ics_opal_init();
if (rc < 0)
pr_warning("XICS: Cannot find a Source Controller !\n");
/* Initialize common bits */
xics_get_server_size();
xics_update_irq_servers();
xics_init_host();
xics_setup_cpu();
}
| gpl-2.0 |
isnehalkiran/kernel-msm | drivers/hwmon/max6650.c | 2301 | 19847 | /*
* max6650.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring.
*
* (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
*
* based on code written by John Morris <john.morris@spirentcom.com>
* Copyright (c) 2003 Spirent Communications
* and Claus Gindhart <claus.gindhart@kontron.com>
*
* This module has only been tested with the MAX6650 chip. It should
* also work with the MAX6651. It does not distinguish max6650 and max6651
* chips.
*
* The datasheet was last seen at:
*
* http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
/*
* Insmod parameters
*/
/* fan_voltage: 5=5V fan, 12=12V fan, 0=don't change */
static int fan_voltage;
/* prescaler: Possible values are 1, 2, 4, 8, 16 or 0 for don't change */
static int prescaler;
/* clock: The clock frequency of the chip the driver should assume */
static int clock = 254000;
module_param(fan_voltage, int, S_IRUGO);
module_param(prescaler, int, S_IRUGO);
module_param(clock, int, S_IRUGO);
/*
* MAX 6650/6651 registers
*/
#define MAX6650_REG_SPEED 0x00
#define MAX6650_REG_CONFIG 0x02
#define MAX6650_REG_GPIO_DEF 0x04
#define MAX6650_REG_DAC 0x06
#define MAX6650_REG_ALARM_EN 0x08
#define MAX6650_REG_ALARM 0x0A
#define MAX6650_REG_TACH0 0x0C
#define MAX6650_REG_TACH1 0x0E
#define MAX6650_REG_TACH2 0x10
#define MAX6650_REG_TACH3 0x12
#define MAX6650_REG_GPIO_STAT 0x14
#define MAX6650_REG_COUNT 0x16
/*
* Config register bits
*/
#define MAX6650_CFG_V12 0x08
#define MAX6650_CFG_PRESCALER_MASK 0x07
#define MAX6650_CFG_PRESCALER_2 0x01
#define MAX6650_CFG_PRESCALER_4 0x02
#define MAX6650_CFG_PRESCALER_8 0x03
#define MAX6650_CFG_PRESCALER_16 0x04
#define MAX6650_CFG_MODE_MASK 0x30
#define MAX6650_CFG_MODE_ON 0x00
#define MAX6650_CFG_MODE_OFF 0x10
#define MAX6650_CFG_MODE_CLOSED_LOOP 0x20
#define MAX6650_CFG_MODE_OPEN_LOOP 0x30
#define MAX6650_COUNT_MASK 0x03
/*
* Alarm status register bits
*/
#define MAX6650_ALRM_MAX 0x01
#define MAX6650_ALRM_MIN 0x02
#define MAX6650_ALRM_TACH 0x04
#define MAX6650_ALRM_GPIO1 0x08
#define MAX6650_ALRM_GPIO2 0x10
/* Minimum and maximum values of the FAN-RPM */
#define FAN_RPM_MIN 240
#define FAN_RPM_MAX 30000
#define DIV_FROM_REG(reg) (1 << (reg & 7))
static int max6650_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int max6650_init_client(struct i2c_client *client);
static int max6650_remove(struct i2c_client *client);
static struct max6650_data *max6650_update_device(struct device *dev);
/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id max6650_id[] = {
{ "max6650", 1 },
{ "max6651", 4 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max6650_id);
static struct i2c_driver max6650_driver = {
.driver = {
.name = "max6650",
},
.probe = max6650_probe,
.remove = max6650_remove,
.id_table = max6650_id,
};
/*
* Client data (each client gets its own)
*/
struct max6650_data {
struct device *hwmon_dev;
struct mutex update_lock;
int nr_fans;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* register values */
u8 speed;
u8 config;
u8 tach[4];
u8 count;
u8 dac;
u8 alarm;
};
static ssize_t get_fan(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max6650_data *data = max6650_update_device(dev);
int rpm;
/*
* Calculation details:
*
* Each tachometer counts over an interval given by the "count"
* register (0.25, 0.5, 1 or 2 seconds). This module assumes
* that the fans produce two pulses per revolution (this seems
* to be the most common).
*/
rpm = ((data->tach[attr->index] * 120) / DIV_FROM_REG(data->count));
return sprintf(buf, "%d\n", rpm);
}
/*
* Set the fan speed to the specified RPM (or read back the RPM setting).
* This works in closed loop mode only. Use pwm1 for open loop speed setting.
*
* The MAX6650/1 will automatically control fan speed when in closed loop
* mode.
*
* Assumptions:
*
* 1) The MAX6650/1 internal 254kHz clock frequency is set correctly. Use
* the clock module parameter if you need to fine tune this.
*
* 2) The prescaler (low three bits of the config register) has already
* been set to an appropriate value. Use the prescaler module parameter
* if your BIOS doesn't initialize the chip properly.
*
* The relevant equations are given on pages 21 and 22 of the datasheet.
*
* From the datasheet, the relevant equation when in regulation is:
*
* [fCLK / (128 x (KTACH + 1))] = 2 x FanSpeed / KSCALE
*
* where:
*
* fCLK is the oscillator frequency (either the 254kHz internal
* oscillator or the externally applied clock)
*
* KTACH is the value in the speed register
*
* FanSpeed is the speed of the fan in rps
*
* KSCALE is the prescaler value (1, 2, 4, 8, or 16)
*
* When reading, we need to solve for FanSpeed. When writing, we need to
* solve for KTACH.
*
* Note: this tachometer is completely separate from the tachometers
* used to measure the fan speeds. Only one fan's speed (fan1) is
* controlled.
*/
static ssize_t get_target(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct max6650_data *data = max6650_update_device(dev);
int kscale, ktach, rpm;
/*
* Use the datasheet equation:
*
* FanSpeed = KSCALE x fCLK / [256 x (KTACH + 1)]
*
* then multiply by 60 to give rpm.
*/
kscale = DIV_FROM_REG(data->config);
ktach = data->speed;
rpm = 60 * kscale * clock / (256 * (ktach + 1));
return sprintf(buf, "%d\n", rpm);
}
static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
int kscale, ktach;
unsigned long rpm;
int err;
err = kstrtoul(buf, 10, &rpm);
if (err)
return err;
rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
/*
* Divide the required speed by 60 to get from rpm to rps, then
* use the datasheet equation:
*
* KTACH = [(fCLK x KSCALE) / (256 x FanSpeed)] - 1
*/
mutex_lock(&data->update_lock);
kscale = DIV_FROM_REG(data->config);
ktach = ((clock * kscale) / (256 * rpm / 60)) - 1;
if (ktach < 0)
ktach = 0;
if (ktach > 255)
ktach = 255;
data->speed = ktach;
i2c_smbus_write_byte_data(client, MAX6650_REG_SPEED, data->speed);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Get/set the fan speed in open loop mode using pwm1 sysfs file.
* Speed is given as a relative value from 0 to 255, where 255 is maximum
* speed. Note that this is done by writing directly to the chip's DAC,
* it won't change the closed loop speed set by fan1_target.
* Also note that due to rounding errors it is possible that you don't read
* back exactly the value you have set.
*/
static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr,
char *buf)
{
int pwm;
struct max6650_data *data = max6650_update_device(dev);
/*
* Useful range for dac is 0-180 for 12V fans and 0-76 for 5V fans.
* Lower DAC values mean higher speeds.
*/
if (data->config & MAX6650_CFG_V12)
pwm = 255 - (255 * (int)data->dac)/180;
else
pwm = 255 - (255 * (int)data->dac)/76;
if (pwm < 0)
pwm = 0;
return sprintf(buf, "%d\n", pwm);
}
static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
unsigned long pwm;
int err;
err = kstrtoul(buf, 10, &pwm);
if (err)
return err;
pwm = clamp_val(pwm, 0, 255);
mutex_lock(&data->update_lock);
if (data->config & MAX6650_CFG_V12)
data->dac = 180 - (180 * pwm)/255;
else
data->dac = 76 - (76 * pwm)/255;
i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, data->dac);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Get/Set controller mode:
* Possible values:
* 0 = Fan always on
* 1 = Open loop, Voltage is set according to speed, not regulated.
* 2 = Closed loop, RPM for all fans regulated by fan1 tachometer
*/
static ssize_t get_enable(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct max6650_data *data = max6650_update_device(dev);
int mode = (data->config & MAX6650_CFG_MODE_MASK) >> 4;
int sysfs_modes[4] = {0, 1, 2, 1};
return sprintf(buf, "%d\n", sysfs_modes[mode]);
}
static ssize_t set_enable(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
int max6650_modes[3] = {0, 3, 2};
unsigned long mode;
int err;
err = kstrtoul(buf, 10, &mode);
if (err)
return err;
if (mode > 2)
return -EINVAL;
mutex_lock(&data->update_lock);
data->config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG);
data->config = (data->config & ~MAX6650_CFG_MODE_MASK)
| (max6650_modes[mode] << 4);
i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, data->config);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Read/write functions for fan1_div sysfs file. The MAX6650 has no such
* divider. We handle this by converting between divider and counttime:
*
* (counttime == k) <==> (divider == 2^k), k = 0, 1, 2, or 3
*
* Lower values of k allow to connect a faster fan without the risk of
* counter overflow. The price is lower resolution. You can also set counttime
* using the module parameter. Note that the module parameter "prescaler" also
* influences the behaviour. Unfortunately, there's no sysfs attribute
* defined for that. See the data sheet for details.
*/
static ssize_t get_div(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct max6650_data *data = max6650_update_device(dev);
return sprintf(buf, "%d\n", DIV_FROM_REG(data->count));
}
static ssize_t set_div(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
unsigned long div;
int err;
err = kstrtoul(buf, 10, &div);
if (err)
return err;
mutex_lock(&data->update_lock);
switch (div) {
case 1:
data->count = 0;
break;
case 2:
data->count = 1;
break;
case 4:
data->count = 2;
break;
case 8:
data->count = 3;
break;
default:
mutex_unlock(&data->update_lock);
return -EINVAL;
}
i2c_smbus_write_byte_data(client, MAX6650_REG_COUNT, data->count);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Get alarm stati:
* Possible values:
* 0 = no alarm
* 1 = alarm
*/
static ssize_t get_alarm(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max6650_data *data = max6650_update_device(dev);
struct i2c_client *client = to_i2c_client(dev);
int alarm = 0;
if (data->alarm & attr->index) {
mutex_lock(&data->update_lock);
alarm = 1;
data->alarm &= ~attr->index;
data->alarm |= i2c_smbus_read_byte_data(client,
MAX6650_REG_ALARM);
mutex_unlock(&data->update_lock);
}
return sprintf(buf, "%d\n", alarm);
}
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1);
static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2);
static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3);
static DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, set_target);
static DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_div, set_div);
static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_enable, set_enable);
static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm);
static SENSOR_DEVICE_ATTR(fan1_max_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_MAX);
static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_MIN);
static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_TACH);
static SENSOR_DEVICE_ATTR(gpio1_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_GPIO1);
static SENSOR_DEVICE_ATTR(gpio2_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_GPIO2);
static umode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct i2c_client *client = to_i2c_client(dev);
u8 alarm_en = i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN);
struct device_attribute *devattr;
/*
* Hide the alarms that have not been enabled by the firmware
*/
devattr = container_of(a, struct device_attribute, attr);
if (devattr == &sensor_dev_attr_fan1_max_alarm.dev_attr
|| devattr == &sensor_dev_attr_fan1_min_alarm.dev_attr
|| devattr == &sensor_dev_attr_fan1_fault.dev_attr
|| devattr == &sensor_dev_attr_gpio1_alarm.dev_attr
|| devattr == &sensor_dev_attr_gpio2_alarm.dev_attr) {
if (!(alarm_en & to_sensor_dev_attr(devattr)->index))
return 0;
}
return a->mode;
}
static struct attribute *max6650_attrs[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&dev_attr_fan1_target.attr,
&dev_attr_fan1_div.attr,
&dev_attr_pwm1_enable.attr,
&dev_attr_pwm1.attr,
&sensor_dev_attr_fan1_max_alarm.dev_attr.attr,
&sensor_dev_attr_fan1_min_alarm.dev_attr.attr,
&sensor_dev_attr_fan1_fault.dev_attr.attr,
&sensor_dev_attr_gpio1_alarm.dev_attr.attr,
&sensor_dev_attr_gpio2_alarm.dev_attr.attr,
NULL
};
static struct attribute_group max6650_attr_grp = {
.attrs = max6650_attrs,
.is_visible = max6650_attrs_visible,
};
static struct attribute *max6651_attrs[] = {
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan4_input.dev_attr.attr,
NULL
};
static const struct attribute_group max6651_attr_grp = {
.attrs = max6651_attrs,
};
/*
* Real code
*/
static int max6650_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max6650_data *data;
int err;
data = devm_kzalloc(&client->dev, sizeof(struct max6650_data),
GFP_KERNEL);
if (!data) {
dev_err(&client->dev, "out of memory.\n");
return -ENOMEM;
}
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->nr_fans = id->driver_data;
/*
* Initialize the max6650 chip
*/
err = max6650_init_client(client);
if (err)
return err;
err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp);
if (err)
return err;
/* 3 additional fan inputs for the MAX6651 */
if (data->nr_fans == 4) {
err = sysfs_create_group(&client->dev.kobj, &max6651_attr_grp);
if (err)
goto err_remove;
}
data->hwmon_dev = hwmon_device_register(&client->dev);
if (!IS_ERR(data->hwmon_dev))
return 0;
err = PTR_ERR(data->hwmon_dev);
dev_err(&client->dev, "error registering hwmon device.\n");
if (data->nr_fans == 4)
sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp);
err_remove:
sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
return err;
}
static int max6650_remove(struct i2c_client *client)
{
struct max6650_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
if (data->nr_fans == 4)
sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp);
sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
return 0;
}
static int max6650_init_client(struct i2c_client *client)
{
struct max6650_data *data = i2c_get_clientdata(client);
int config;
int err = -EIO;
config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG);
if (config < 0) {
dev_err(&client->dev, "Error reading config, aborting.\n");
return err;
}
switch (fan_voltage) {
case 0:
break;
case 5:
config &= ~MAX6650_CFG_V12;
break;
case 12:
config |= MAX6650_CFG_V12;
break;
default:
dev_err(&client->dev, "illegal value for fan_voltage (%d)\n",
fan_voltage);
}
dev_info(&client->dev, "Fan voltage is set to %dV.\n",
(config & MAX6650_CFG_V12) ? 12 : 5);
switch (prescaler) {
case 0:
break;
case 1:
config &= ~MAX6650_CFG_PRESCALER_MASK;
break;
case 2:
config = (config & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_2;
break;
case 4:
config = (config & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_4;
break;
case 8:
config = (config & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_8;
break;
case 16:
config = (config & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_16;
break;
default:
dev_err(&client->dev, "illegal value for prescaler (%d)\n",
prescaler);
}
dev_info(&client->dev, "Prescaler is set to %d.\n",
1 << (config & MAX6650_CFG_PRESCALER_MASK));
/*
* If mode is set to "full off", we change it to "open loop" and
* set DAC to 255, which has the same effect. We do this because
* there's no "full off" mode defined in hwmon specifcations.
*/
if ((config & MAX6650_CFG_MODE_MASK) == MAX6650_CFG_MODE_OFF) {
dev_dbg(&client->dev, "Change mode to open loop, full off.\n");
config = (config & ~MAX6650_CFG_MODE_MASK)
| MAX6650_CFG_MODE_OPEN_LOOP;
if (i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, 255)) {
dev_err(&client->dev, "DAC write error, aborting.\n");
return err;
}
}
if (i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, config)) {
dev_err(&client->dev, "Config write error, aborting.\n");
return err;
}
data->config = config;
data->count = i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT);
return 0;
}
static const u8 tach_reg[] = {
MAX6650_REG_TACH0,
MAX6650_REG_TACH1,
MAX6650_REG_TACH2,
MAX6650_REG_TACH3,
};
static struct max6650_data *max6650_update_device(struct device *dev)
{
int i;
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
data->speed = i2c_smbus_read_byte_data(client,
MAX6650_REG_SPEED);
data->config = i2c_smbus_read_byte_data(client,
MAX6650_REG_CONFIG);
for (i = 0; i < data->nr_fans; i++) {
data->tach[i] = i2c_smbus_read_byte_data(client,
tach_reg[i]);
}
data->count = i2c_smbus_read_byte_data(client,
MAX6650_REG_COUNT);
data->dac = i2c_smbus_read_byte_data(client, MAX6650_REG_DAC);
/*
* Alarms are cleared on read in case the condition that
* caused the alarm is removed. Keep the value latched here
* for providing the register through different alarm files.
*/
data->alarm |= i2c_smbus_read_byte_data(client,
MAX6650_REG_ALARM);
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
module_i2c_driver(max6650_driver);
MODULE_AUTHOR("Hans J. Koch");
MODULE_DESCRIPTION("MAX6650 sensor driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
chadouming/canuck-3.10 | drivers/scsi/t128.c | 2557 | 11810 | #define AUTOSENSE
#define PSEUDO_DMA
/*
* Trantor T128/T128F/T228 driver
* Note : architecturally, the T100 and T130 are different and won't
* work
*
* Copyright 1993, Drew Eckhardt
* Visionary Computing
* (Unix and Linux consulting and custom programming)
* drew@colorado.edu
* +1 (303) 440-4894
*
* DISTRIBUTION RELEASE 3.
*
* For more information, please consult
*
* Trantor Systems, Ltd.
* T128/T128F/T228 SCSI Host Adapter
* Hardware Specifications
*
* Trantor Systems, Ltd.
* 5415 Randall Place
* Fremont, CA 94538
* 1+ (415) 770-1400, FAX 1+ (415) 770-9910
*
* and
*
* NCR 5380 Family
* SCSI Protocol Controller
* Databook
*
* NCR Microelectronics
* 1635 Aeroplaza Drive
* Colorado Springs, CO 80916
* 1+ (719) 578-3400
* 1+ (800) 334-5454
*/
/*
* Options :
* AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
* for commands that return with a CHECK CONDITION status.
*
* PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance
* increase compared to polled I/O.
*
* PARITY - enable parity checking. Not supported.
*
* SCSI2 - enable support for SCSI-II tagged queueing. Untested.
*
*
* UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You
* only really want to use this if you're having a problem with
* dropped characters during high speed communications, and even
* then, you're going to be better off twiddling with transfersize.
*
* USLEEP - enable support for devices that don't disconnect. Untested.
*
* The card is detected and initialized in one of several ways :
* 1. Autoprobe (default) - since the board is memory mapped,
* a BIOS signature is scanned for to locate the registers.
* An interrupt is triggered to autoprobe for the interrupt
* line.
*
* 2. With command line overrides - t128=address,irq may be
* used on the LILO command line to override the defaults.
*
* 3. With the T128_OVERRIDE compile time define. This is
* specified as an array of address, irq tuples. Ie, for
* one board at the default 0xcc000 address, IRQ5, I could say
* -DT128_OVERRIDE={{0xcc000, 5}}
*
* Note that if the override methods are used, place holders must
* be specified for other boards in the system.
*
* T128/T128F jumper/dipswitch settings (note : on my sample, the switches
* were epoxy'd shut, meaning I couldn't change the 0xcc000 base address) :
*
* T128 Sw7 Sw8 Sw6 = 0ws Sw5 = boot
* T128F Sw6 Sw7 Sw5 = 0ws Sw4 = boot Sw8 = floppy disable
* cc000 off off
* c8000 off on
* dc000 on off
* d8000 on on
*
*
* Interrupts
* There is a 12 pin jumper block, jp1, numbered as follows :
* T128 (JP1) T128F (J5)
* 2 4 6 8 10 12 11 9 7 5 3 1
* 1 3 5 7 9 11 12 10 8 6 4 2
*
* 3 2-4
* 5 1-3
* 7 3-5
* T128F only
* 10 8-10
* 12 7-9
* 14 10-12
* 15 9-11
*/
/*
* $Log: t128.c,v $
*/
#include <linux/signal.h>
#include <linux/io.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "t128.h"
#define AUTOPROBE_IRQ
#include "NCR5380.h"
static struct override {
unsigned long address;
int irq;
} overrides
#ifdef T128_OVERRIDE
[] __initdata = T128_OVERRIDE;
#else
[4] __initdata = {{0, IRQ_AUTO}, {0, IRQ_AUTO},
{0 ,IRQ_AUTO}, {0, IRQ_AUTO}};
#endif
#define NO_OVERRIDES ARRAY_SIZE(overrides)
static struct base {
unsigned int address;
int noauto;
} bases[] __initdata = {
{ 0xcc000, 0}, { 0xc8000, 0}, { 0xdc000, 0}, { 0xd8000, 0}
};
#define NO_BASES ARRAY_SIZE(bases)
static struct signature {
const char *string;
int offset;
} signatures[] __initdata = {
{"TSROM: SCSI BIOS, Version 1.12", 0x36},
};
#define NO_SIGNATURES ARRAY_SIZE(signatures)
/*
* Function : t128_setup(char *str, int *ints)
*
* Purpose : LILO command line initialization of the overrides array,
*
* Inputs : str - unused, ints - array of integer parameters with ints[0]
* equal to the number of ints.
*
*/
void __init t128_setup(char *str, int *ints){
static int commandline_current = 0;
int i;
if (ints[0] != 2)
printk("t128_setup : usage t128=address,irq\n");
else
if (commandline_current < NO_OVERRIDES) {
overrides[commandline_current].address = ints[1];
overrides[commandline_current].irq = ints[2];
for (i = 0; i < NO_BASES; ++i)
if (bases[i].address == ints[1]) {
bases[i].noauto = 1;
break;
}
++commandline_current;
}
}
/*
* Function : int t128_detect(struct scsi_host_template * tpnt)
*
* Purpose : detects and initializes T128,T128F, or T228 controllers
* that were autoprobed, overridden on the LILO command line,
* or specified at compile time.
*
* Inputs : tpnt - template for this SCSI adapter.
*
* Returns : 1 if a host adapter was found, 0 if not.
*
*/
int __init t128_detect(struct scsi_host_template * tpnt){
static int current_override = 0, current_base = 0;
struct Scsi_Host *instance;
unsigned long base;
void __iomem *p;
int sig, count;
tpnt->proc_name = "t128";
tpnt->show_info = t128_show_info;
tpnt->write_info = t128_write_info;
for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
base = 0;
p = NULL;
if (overrides[current_override].address) {
base = overrides[current_override].address;
p = ioremap(bases[current_base].address, 0x2000);
if (!p)
base = 0;
} else
for (; !base && (current_base < NO_BASES); ++current_base) {
#if (TDEBUG & TDEBUG_INIT)
printk("scsi-t128 : probing address %08x\n", bases[current_base].address);
#endif
if (bases[current_base].noauto)
continue;
p = ioremap(bases[current_base].address, 0x2000);
if (!p)
continue;
for (sig = 0; sig < NO_SIGNATURES; ++sig)
if (check_signature(p + signatures[sig].offset,
signatures[sig].string,
strlen(signatures[sig].string))) {
base = bases[current_base].address;
#if (TDEBUG & TDEBUG_INIT)
printk("scsi-t128 : detected board.\n");
#endif
goto found;
}
iounmap(p);
}
#if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT)
printk("scsi-t128 : base = %08x\n", (unsigned int) base);
#endif
if (!base)
break;
found:
instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
if(instance == NULL)
break;
instance->base = base;
((struct NCR5380_hostdata *)instance->hostdata)->base = p;
NCR5380_init(instance, 0);
if (overrides[current_override].irq != IRQ_AUTO)
instance->irq = overrides[current_override].irq;
else
instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
if (instance->irq != SCSI_IRQ_NONE)
if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128",
instance)) {
printk("scsi%d : IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE;
}
if (instance->irq == SCSI_IRQ_NONE) {
printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
}
#if defined(TDEBUG) && (TDEBUG & TDEBUG_INIT)
printk("scsi%d : irq = %d\n", instance->host_no, instance->irq);
#endif
printk("scsi%d : at 0x%08lx", instance->host_no, instance->base);
if (instance->irq == SCSI_IRQ_NONE)
printk (" interrupts disabled");
else
printk (" irq %d", instance->irq);
printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
CAN_QUEUE, CMD_PER_LUN, T128_PUBLIC_RELEASE);
NCR5380_print_options(instance);
printk("\n");
++current_override;
++count;
}
return count;
}
static int t128_release(struct Scsi_Host *shost)
{
NCR5380_local_declare();
NCR5380_setup(shost);
if (shost->irq)
free_irq(shost->irq, shost);
NCR5380_exit(shost);
if (shost->io_port && shost->n_io_port)
release_region(shost->io_port, shost->n_io_port);
scsi_unregister(shost);
iounmap(base);
return 0;
}
/*
* Function : int t128_biosparam(Disk * disk, struct block_device *dev, int *ip)
*
* Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
* the specified device / size.
*
* Inputs : size = size of device in sectors (512 bytes), dev = block device
* major / minor, ip[] = {heads, sectors, cylinders}
*
* Returns : always 0 (success), initializes ip
*
*/
/*
* XXX Most SCSI boards use this mapping, I could be incorrect. Some one
* using hard disks on a trantor should verify that this mapping corresponds
* to that used by the BIOS / ASPI driver by running the linux fdisk program
* and matching the H_C_S coordinates to what DOS uses.
*/
int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int * ip)
{
ip[0] = 64;
ip[1] = 32;
ip[2] = capacity >> 11;
return 0;
}
/*
* Function : int NCR5380_pread (struct Scsi_Host *instance,
* unsigned char *dst, int len)
*
* Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
* dst
*
* Inputs : dst = destination, len = length in bytes
*
* Returns : 0 on success, non zero on a failure such as a watchdog
* timeout.
*/
static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
int len) {
NCR5380_local_declare();
void __iomem *reg;
unsigned char *d = dst;
register int i = len;
NCR5380_setup(instance);
reg = base + T_DATA_REG_OFFSET;
#if 0
for (; i; --i) {
while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
#else
while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
for (; i; --i) {
#endif
*d++ = readb(reg);
}
if (readb(base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
unsigned char tmp;
void __iomem *foo = base + T_CONTROL_REG_OFFSET;
tmp = readb(foo);
writeb(tmp | T_CR_CT, foo);
writeb(tmp, foo);
printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
instance->host_no);
return -1;
} else
return 0;
}
/*
* Function : int NCR5380_pwrite (struct Scsi_Host *instance,
* unsigned char *src, int len)
*
* Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
* src
*
* Inputs : src = source, len = length in bytes
*
* Returns : 0 on success, non zero on a failure such as a watchdog
* timeout.
*/
static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src,
int len) {
NCR5380_local_declare();
void __iomem *reg;
unsigned char *s = src;
register int i = len;
NCR5380_setup(instance);
reg = base + T_DATA_REG_OFFSET;
#if 0
for (; i; --i) {
while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
#else
while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
for (; i; --i) {
#endif
writeb(*s++, reg);
}
if (readb(base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
unsigned char tmp;
void __iomem *foo = base + T_CONTROL_REG_OFFSET;
tmp = readb(foo);
writeb(tmp | T_CR_CT, foo);
writeb(tmp, foo);
printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
instance->host_no);
return -1;
} else
return 0;
}
MODULE_LICENSE("GPL");
#include "NCR5380.c"
static struct scsi_host_template driver_template = {
.name = "Trantor T128/T128F/T228",
.detect = t128_detect,
.release = t128_release,
.queuecommand = t128_queue_command,
.eh_abort_handler = t128_abort,
.eh_bus_reset_handler = t128_bus_reset,
.bios_param = t128_biosparam,
.can_queue = CAN_QUEUE,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
.use_clustering = DISABLE_CLUSTERING,
};
#include "scsi_module.c"
| gpl-2.0 |
fweisbec/tracing | fs/jbd/commit.c | 3325 | 28750 | /*
* linux/fs/jbd/commit.c
*
* Written by Stephen C. Tweedie <sct@redhat.com>, 1998
*
* Copyright 1998 Red Hat corp --- All Rights Reserved
*
* This file is part of the Linux kernel and is made available under
* the terms of the GNU General Public License, version 2, or at your
* option, any later version, incorporated herein by reference.
*
* Journal commit routines for the generic filesystem journaling code;
* part of the ext2fs journaling system.
*/
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <trace/events/jbd.h>
/*
* Default IO end handler for temporary BJ_IO buffer_heads.
*/
static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
{
BUFFER_TRACE(bh, "");
if (uptodate)
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
unlock_buffer(bh);
}
/*
* When an ext3-ordered file is truncated, it is possible that many pages are
* not successfully freed, because they are attached to a committing transaction.
* After the transaction commits, these pages are left on the LRU, with no
* ->mapping, and with attached buffers. These pages are trivially reclaimable
* by the VM, but their apparent absence upsets the VM accounting, and it makes
* the numbers in /proc/meminfo look odd.
*
* So here, we have a buffer which has just come off the forget list. Look to
* see if we can strip all buffers from the backing page.
*
* Called under journal->j_list_lock. The caller provided us with a ref
* against the buffer, and we drop that here.
*/
static void release_buffer_page(struct buffer_head *bh)
{
struct page *page;
if (buffer_dirty(bh))
goto nope;
if (atomic_read(&bh->b_count) != 1)
goto nope;
page = bh->b_page;
if (!page)
goto nope;
if (page->mapping)
goto nope;
/* OK, it's a truncated page */
if (!trylock_page(page))
goto nope;
page_cache_get(page);
__brelse(bh);
try_to_free_buffers(page);
unlock_page(page);
page_cache_release(page);
return;
nope:
__brelse(bh);
}
/*
* Decrement reference counter for data buffer. If it has been marked
* 'BH_Freed', release it and the page to which it belongs if possible.
*/
static void release_data_buffer(struct buffer_head *bh)
{
if (buffer_freed(bh)) {
clear_buffer_freed(bh);
release_buffer_page(bh);
} else
put_bh(bh);
}
/*
* Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
* held. For ranking reasons we must trylock. If we lose, schedule away and
* return 0. j_list_lock is dropped in this case.
*/
static int inverted_lock(journal_t *journal, struct buffer_head *bh)
{
if (!jbd_trylock_bh_state(bh)) {
spin_unlock(&journal->j_list_lock);
schedule();
return 0;
}
return 1;
}
/* Done it all: now write the commit record. We should have
* cleaned up our previous buffers by now, so if we are in abort
* mode we can now just skip the rest of the journal write
* entirely.
*
* Returns 1 if the journal needs to be aborted or 0 on success
*/
static int journal_write_commit_record(journal_t *journal,
transaction_t *commit_transaction)
{
struct journal_head *descriptor;
struct buffer_head *bh;
journal_header_t *header;
int ret;
if (is_journal_aborted(journal))
return 0;
descriptor = journal_get_descriptor_buffer(journal);
if (!descriptor)
return 1;
bh = jh2bh(descriptor);
header = (journal_header_t *)(bh->b_data);
header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
JBUFFER_TRACE(descriptor, "write commit block");
set_buffer_dirty(bh);
if (journal->j_flags & JFS_BARRIER)
ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_FLUSH_FUA);
else
ret = sync_dirty_buffer(bh);
put_bh(bh); /* One for getblk() */
journal_put_journal_head(descriptor);
return (ret == -EIO);
}
static void journal_do_submit_data(struct buffer_head **wbuf, int bufs,
int write_op)
{
int i;
for (i = 0; i < bufs; i++) {
wbuf[i]->b_end_io = end_buffer_write_sync;
/* We use-up our safety reference in submit_bh() */
submit_bh(write_op, wbuf[i]);
}
}
/*
* Submit all the data buffers to disk
*/
static int journal_submit_data_buffers(journal_t *journal,
transaction_t *commit_transaction,
int write_op)
{
struct journal_head *jh;
struct buffer_head *bh;
int locked;
int bufs = 0;
struct buffer_head **wbuf = journal->j_wbuf;
int err = 0;
/*
* Whenever we unlock the journal and sleep, things can get added
* onto ->t_sync_datalist, so we have to keep looping back to
* write_out_data until we *know* that the list is empty.
*
* Cleanup any flushed data buffers from the data list. Even in
* abort mode, we want to flush this out as soon as possible.
*/
write_out_data:
cond_resched();
spin_lock(&journal->j_list_lock);
while (commit_transaction->t_sync_datalist) {
jh = commit_transaction->t_sync_datalist;
bh = jh2bh(jh);
locked = 0;
/* Get reference just to make sure buffer does not disappear
* when we are forced to drop various locks */
get_bh(bh);
/* If the buffer is dirty, we need to submit IO and hence
* we need the buffer lock. We try to lock the buffer without
* blocking. If we fail, we need to drop j_list_lock and do
* blocking lock_buffer().
*/
if (buffer_dirty(bh)) {
if (!trylock_buffer(bh)) {
BUFFER_TRACE(bh, "needs blocking lock");
spin_unlock(&journal->j_list_lock);
trace_jbd_do_submit_data(journal,
commit_transaction);
/* Write out all data to prevent deadlocks */
journal_do_submit_data(wbuf, bufs, write_op);
bufs = 0;
lock_buffer(bh);
spin_lock(&journal->j_list_lock);
}
locked = 1;
}
/* We have to get bh_state lock. Again out of order, sigh. */
if (!inverted_lock(journal, bh)) {
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
}
/* Someone already cleaned up the buffer? */
if (!buffer_jbd(bh) || bh2jh(bh) != jh
|| jh->b_transaction != commit_transaction
|| jh->b_jlist != BJ_SyncData) {
jbd_unlock_bh_state(bh);
if (locked)
unlock_buffer(bh);
BUFFER_TRACE(bh, "already cleaned up");
release_data_buffer(bh);
continue;
}
if (locked && test_clear_buffer_dirty(bh)) {
BUFFER_TRACE(bh, "needs writeout, adding to array");
wbuf[bufs++] = bh;
__journal_file_buffer(jh, commit_transaction,
BJ_Locked);
jbd_unlock_bh_state(bh);
if (bufs == journal->j_wbufsize) {
spin_unlock(&journal->j_list_lock);
trace_jbd_do_submit_data(journal,
commit_transaction);
journal_do_submit_data(wbuf, bufs, write_op);
bufs = 0;
goto write_out_data;
}
} else if (!locked && buffer_locked(bh)) {
__journal_file_buffer(jh, commit_transaction,
BJ_Locked);
jbd_unlock_bh_state(bh);
put_bh(bh);
} else {
BUFFER_TRACE(bh, "writeout complete: unfile");
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
__journal_unfile_buffer(jh);
jbd_unlock_bh_state(bh);
if (locked)
unlock_buffer(bh);
release_data_buffer(bh);
}
if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
spin_unlock(&journal->j_list_lock);
goto write_out_data;
}
}
spin_unlock(&journal->j_list_lock);
trace_jbd_do_submit_data(journal, commit_transaction);
journal_do_submit_data(wbuf, bufs, write_op);
return err;
}
/*
* journal_commit_transaction
*
* The primary function for committing a transaction to the log. This
* function is called by the journal thread to begin a complete commit.
*/
void journal_commit_transaction(journal_t *journal)
{
transaction_t *commit_transaction;
struct journal_head *jh, *new_jh, *descriptor;
struct buffer_head **wbuf = journal->j_wbuf;
int bufs;
int flags;
int err;
unsigned int blocknr;
ktime_t start_time;
u64 commit_time;
char *tagp = NULL;
journal_header_t *header;
journal_block_tag_t *tag = NULL;
int space_left = 0;
int first_tag = 0;
int tag_flag;
int i;
struct blk_plug plug;
/*
* First job: lock down the current transaction and wait for
* all outstanding updates to complete.
*/
/* Do we need to erase the effects of a prior journal_flush? */
if (journal->j_flags & JFS_FLUSHED) {
jbd_debug(3, "super block updated\n");
journal_update_superblock(journal, 1);
} else {
jbd_debug(3, "superblock not updated\n");
}
J_ASSERT(journal->j_running_transaction != NULL);
J_ASSERT(journal->j_committing_transaction == NULL);
commit_transaction = journal->j_running_transaction;
J_ASSERT(commit_transaction->t_state == T_RUNNING);
trace_jbd_start_commit(journal, commit_transaction);
jbd_debug(1, "JBD: starting commit of transaction %d\n",
commit_transaction->t_tid);
spin_lock(&journal->j_state_lock);
commit_transaction->t_state = T_LOCKED;
trace_jbd_commit_locking(journal, commit_transaction);
spin_lock(&commit_transaction->t_handle_lock);
while (commit_transaction->t_updates) {
DEFINE_WAIT(wait);
prepare_to_wait(&journal->j_wait_updates, &wait,
TASK_UNINTERRUPTIBLE);
if (commit_transaction->t_updates) {
spin_unlock(&commit_transaction->t_handle_lock);
spin_unlock(&journal->j_state_lock);
schedule();
spin_lock(&journal->j_state_lock);
spin_lock(&commit_transaction->t_handle_lock);
}
finish_wait(&journal->j_wait_updates, &wait);
}
spin_unlock(&commit_transaction->t_handle_lock);
J_ASSERT (commit_transaction->t_outstanding_credits <=
journal->j_max_transaction_buffers);
/*
* First thing we are allowed to do is to discard any remaining
* BJ_Reserved buffers. Note, it is _not_ permissible to assume
* that there are no such buffers: if a large filesystem
* operation like a truncate needs to split itself over multiple
* transactions, then it may try to do a journal_restart() while
* there are still BJ_Reserved buffers outstanding. These must
* be released cleanly from the current transaction.
*
* In this case, the filesystem must still reserve write access
* again before modifying the buffer in the new transaction, but
* we do not require it to remember exactly which old buffers it
* has reserved. This is consistent with the existing behaviour
* that multiple journal_get_write_access() calls to the same
* buffer are perfectly permissible.
*/
while (commit_transaction->t_reserved_list) {
jh = commit_transaction->t_reserved_list;
JBUFFER_TRACE(jh, "reserved, unused: refile");
/*
* A journal_get_undo_access()+journal_release_buffer() may
* leave undo-committed data.
*/
if (jh->b_committed_data) {
struct buffer_head *bh = jh2bh(jh);
jbd_lock_bh_state(bh);
jbd_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
jbd_unlock_bh_state(bh);
}
journal_refile_buffer(journal, jh);
}
/*
* Now try to drop any written-back buffers from the journal's
* checkpoint lists. We do this *before* commit because it potentially
* frees some memory
*/
spin_lock(&journal->j_list_lock);
__journal_clean_checkpoint_list(journal);
spin_unlock(&journal->j_list_lock);
jbd_debug (3, "JBD: commit phase 1\n");
/*
* Clear revoked flag to reflect there is no revoked buffers
* in the next transaction which is going to be started.
*/
journal_clear_buffer_revoked_flags(journal);
/*
* Switch to a new revoke table.
*/
journal_switch_revoke_table(journal);
trace_jbd_commit_flushing(journal, commit_transaction);
commit_transaction->t_state = T_FLUSH;
journal->j_committing_transaction = commit_transaction;
journal->j_running_transaction = NULL;
start_time = ktime_get();
commit_transaction->t_log_start = journal->j_head;
wake_up(&journal->j_wait_transaction_locked);
spin_unlock(&journal->j_state_lock);
jbd_debug (3, "JBD: commit phase 2\n");
/*
* Now start flushing things to disk, in the order they appear
* on the transaction lists. Data blocks go first.
*/
blk_start_plug(&plug);
err = journal_submit_data_buffers(journal, commit_transaction,
WRITE_SYNC);
blk_finish_plug(&plug);
/*
* Wait for all previously submitted IO to complete.
*/
spin_lock(&journal->j_list_lock);
while (commit_transaction->t_locked_list) {
struct buffer_head *bh;
jh = commit_transaction->t_locked_list->b_tprev;
bh = jh2bh(jh);
get_bh(bh);
if (buffer_locked(bh)) {
spin_unlock(&journal->j_list_lock);
wait_on_buffer(bh);
spin_lock(&journal->j_list_lock);
}
if (unlikely(!buffer_uptodate(bh))) {
if (!trylock_page(bh->b_page)) {
spin_unlock(&journal->j_list_lock);
lock_page(bh->b_page);
spin_lock(&journal->j_list_lock);
}
if (bh->b_page->mapping)
set_bit(AS_EIO, &bh->b_page->mapping->flags);
unlock_page(bh->b_page);
SetPageError(bh->b_page);
err = -EIO;
}
if (!inverted_lock(journal, bh)) {
put_bh(bh);
spin_lock(&journal->j_list_lock);
continue;
}
if (buffer_jbd(bh) && bh2jh(bh) == jh &&
jh->b_transaction == commit_transaction &&
jh->b_jlist == BJ_Locked)
__journal_unfile_buffer(jh);
jbd_unlock_bh_state(bh);
release_data_buffer(bh);
cond_resched_lock(&journal->j_list_lock);
}
spin_unlock(&journal->j_list_lock);
if (err) {
char b[BDEVNAME_SIZE];
printk(KERN_WARNING
"JBD: Detected IO errors while flushing file data "
"on %s\n", bdevname(journal->j_fs_dev, b));
if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR)
journal_abort(journal, err);
err = 0;
}
blk_start_plug(&plug);
journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC);
/*
* If we found any dirty or locked buffers, then we should have
* looped back up to the write_out_data label. If there weren't
* any then journal_clean_data_list should have wiped the list
* clean by now, so check that it is in fact empty.
*/
J_ASSERT (commit_transaction->t_sync_datalist == NULL);
jbd_debug (3, "JBD: commit phase 3\n");
/*
* Way to go: we have now written out all of the data for a
* transaction! Now comes the tricky part: we need to write out
* metadata. Loop over the transaction's entire buffer list:
*/
spin_lock(&journal->j_state_lock);
commit_transaction->t_state = T_COMMIT;
spin_unlock(&journal->j_state_lock);
trace_jbd_commit_logging(journal, commit_transaction);
J_ASSERT(commit_transaction->t_nr_buffers <=
commit_transaction->t_outstanding_credits);
descriptor = NULL;
bufs = 0;
while (commit_transaction->t_buffers) {
/* Find the next buffer to be journaled... */
jh = commit_transaction->t_buffers;
/* If we're in abort mode, we just un-journal the buffer and
release it. */
if (is_journal_aborted(journal)) {
clear_buffer_jbddirty(jh2bh(jh));
JBUFFER_TRACE(jh, "journal is aborting: refile");
journal_refile_buffer(journal, jh);
/* If that was the last one, we need to clean up
* any descriptor buffers which may have been
* already allocated, even if we are now
* aborting. */
if (!commit_transaction->t_buffers)
goto start_journal_io;
continue;
}
/* Make sure we have a descriptor block in which to
record the metadata buffer. */
if (!descriptor) {
struct buffer_head *bh;
J_ASSERT (bufs == 0);
jbd_debug(4, "JBD: get descriptor\n");
descriptor = journal_get_descriptor_buffer(journal);
if (!descriptor) {
journal_abort(journal, -EIO);
continue;
}
bh = jh2bh(descriptor);
jbd_debug(4, "JBD: got buffer %llu (%p)\n",
(unsigned long long)bh->b_blocknr, bh->b_data);
header = (journal_header_t *)&bh->b_data[0];
header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
tagp = &bh->b_data[sizeof(journal_header_t)];
space_left = bh->b_size - sizeof(journal_header_t);
first_tag = 1;
set_buffer_jwrite(bh);
set_buffer_dirty(bh);
wbuf[bufs++] = bh;
/* Record it so that we can wait for IO
completion later */
BUFFER_TRACE(bh, "ph3: file as descriptor");
journal_file_buffer(descriptor, commit_transaction,
BJ_LogCtl);
}
/* Where is the buffer to be written? */
err = journal_next_log_block(journal, &blocknr);
/* If the block mapping failed, just abandon the buffer
and repeat this loop: we'll fall into the
refile-on-abort condition above. */
if (err) {
journal_abort(journal, err);
continue;
}
/*
* start_this_handle() uses t_outstanding_credits to determine
* the free space in the log, but this counter is changed
* by journal_next_log_block() also.
*/
commit_transaction->t_outstanding_credits--;
/* Bump b_count to prevent truncate from stumbling over
the shadowed buffer! @@@ This can go if we ever get
rid of the BJ_IO/BJ_Shadow pairing of buffers. */
get_bh(jh2bh(jh));
/* Make a temporary IO buffer with which to write it out
(this will requeue both the metadata buffer and the
temporary IO buffer). new_bh goes on BJ_IO*/
set_buffer_jwrite(jh2bh(jh));
/*
* akpm: journal_write_metadata_buffer() sets
* new_bh->b_transaction to commit_transaction.
* We need to clean this up before we release new_bh
* (which is of type BJ_IO)
*/
JBUFFER_TRACE(jh, "ph3: write metadata");
flags = journal_write_metadata_buffer(commit_transaction,
jh, &new_jh, blocknr);
set_buffer_jwrite(jh2bh(new_jh));
wbuf[bufs++] = jh2bh(new_jh);
/* Record the new block's tag in the current descriptor
buffer */
tag_flag = 0;
if (flags & 1)
tag_flag |= JFS_FLAG_ESCAPE;
if (!first_tag)
tag_flag |= JFS_FLAG_SAME_UUID;
tag = (journal_block_tag_t *) tagp;
tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
tag->t_flags = cpu_to_be32(tag_flag);
tagp += sizeof(journal_block_tag_t);
space_left -= sizeof(journal_block_tag_t);
if (first_tag) {
memcpy (tagp, journal->j_uuid, 16);
tagp += 16;
space_left -= 16;
first_tag = 0;
}
/* If there's no more to do, or if the descriptor is full,
let the IO rip! */
if (bufs == journal->j_wbufsize ||
commit_transaction->t_buffers == NULL ||
space_left < sizeof(journal_block_tag_t) + 16) {
jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
/* Write an end-of-descriptor marker before
submitting the IOs. "tag" still points to
the last tag we set up. */
tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
start_journal_io:
for (i = 0; i < bufs; i++) {
struct buffer_head *bh = wbuf[i];
lock_buffer(bh);
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
bh->b_end_io = journal_end_buffer_io_sync;
submit_bh(WRITE_SYNC, bh);
}
cond_resched();
/* Force a new descriptor to be generated next
time round the loop. */
descriptor = NULL;
bufs = 0;
}
}
blk_finish_plug(&plug);
/* Lo and behold: we have just managed to send a transaction to
the log. Before we can commit it, wait for the IO so far to
complete. Control buffers being written are on the
transaction's t_log_list queue, and metadata buffers are on
the t_iobuf_list queue.
Wait for the buffers in reverse order. That way we are
less likely to be woken up until all IOs have completed, and
so we incur less scheduling load.
*/
jbd_debug(3, "JBD: commit phase 4\n");
/*
* akpm: these are BJ_IO, and j_list_lock is not needed.
* See __journal_try_to_free_buffer.
*/
wait_for_iobuf:
while (commit_transaction->t_iobuf_list != NULL) {
struct buffer_head *bh;
jh = commit_transaction->t_iobuf_list->b_tprev;
bh = jh2bh(jh);
if (buffer_locked(bh)) {
wait_on_buffer(bh);
goto wait_for_iobuf;
}
if (cond_resched())
goto wait_for_iobuf;
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
clear_buffer_jwrite(bh);
JBUFFER_TRACE(jh, "ph4: unfile after journal write");
journal_unfile_buffer(journal, jh);
/*
* ->t_iobuf_list should contain only dummy buffer_heads
* which were created by journal_write_metadata_buffer().
*/
BUFFER_TRACE(bh, "dumping temporary bh");
journal_put_journal_head(jh);
__brelse(bh);
J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
free_buffer_head(bh);
/* We also have to unlock and free the corresponding
shadowed buffer */
jh = commit_transaction->t_shadow_list->b_tprev;
bh = jh2bh(jh);
clear_buffer_jwrite(bh);
J_ASSERT_BH(bh, buffer_jbddirty(bh));
/* The metadata is now released for reuse, but we need
to remember it against this transaction so that when
we finally commit, we can do any checkpointing
required. */
JBUFFER_TRACE(jh, "file as BJ_Forget");
journal_file_buffer(jh, commit_transaction, BJ_Forget);
/*
* Wake up any transactions which were waiting for this
* IO to complete. The barrier must be here so that changes
* by journal_file_buffer() take effect before wake_up_bit()
* does the waitqueue check.
*/
smp_mb();
wake_up_bit(&bh->b_state, BH_Unshadow);
JBUFFER_TRACE(jh, "brelse shadowed buffer");
__brelse(bh);
}
J_ASSERT (commit_transaction->t_shadow_list == NULL);
jbd_debug(3, "JBD: commit phase 5\n");
/* Here we wait for the revoke record and descriptor record buffers */
wait_for_ctlbuf:
while (commit_transaction->t_log_list != NULL) {
struct buffer_head *bh;
jh = commit_transaction->t_log_list->b_tprev;
bh = jh2bh(jh);
if (buffer_locked(bh)) {
wait_on_buffer(bh);
goto wait_for_ctlbuf;
}
if (cond_resched())
goto wait_for_ctlbuf;
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
clear_buffer_jwrite(bh);
journal_unfile_buffer(journal, jh);
journal_put_journal_head(jh);
__brelse(bh); /* One for getblk */
/* AKPM: bforget here */
}
if (err)
journal_abort(journal, err);
jbd_debug(3, "JBD: commit phase 6\n");
/* All metadata is written, now write commit record and do cleanup */
spin_lock(&journal->j_state_lock);
J_ASSERT(commit_transaction->t_state == T_COMMIT);
commit_transaction->t_state = T_COMMIT_RECORD;
spin_unlock(&journal->j_state_lock);
if (journal_write_commit_record(journal, commit_transaction))
err = -EIO;
if (err)
journal_abort(journal, err);
/* End of a transaction! Finally, we can do checkpoint
processing: any buffers committed as a result of this
transaction can be removed from any checkpoint list it was on
before. */
jbd_debug(3, "JBD: commit phase 7\n");
J_ASSERT(commit_transaction->t_sync_datalist == NULL);
J_ASSERT(commit_transaction->t_buffers == NULL);
J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
J_ASSERT(commit_transaction->t_iobuf_list == NULL);
J_ASSERT(commit_transaction->t_shadow_list == NULL);
J_ASSERT(commit_transaction->t_log_list == NULL);
restart_loop:
/*
* As there are other places (journal_unmap_buffer()) adding buffers
* to this list we have to be careful and hold the j_list_lock.
*/
spin_lock(&journal->j_list_lock);
while (commit_transaction->t_forget) {
transaction_t *cp_transaction;
struct buffer_head *bh;
int try_to_free = 0;
jh = commit_transaction->t_forget;
spin_unlock(&journal->j_list_lock);
bh = jh2bh(jh);
/*
* Get a reference so that bh cannot be freed before we are
* done with it.
*/
get_bh(bh);
jbd_lock_bh_state(bh);
J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
jh->b_transaction == journal->j_running_transaction);
/*
* If there is undo-protected committed data against
* this buffer, then we can remove it now. If it is a
* buffer needing such protection, the old frozen_data
* field now points to a committed version of the
* buffer, so rotate that field to the new committed
* data.
*
* Otherwise, we can just throw away the frozen data now.
*/
if (jh->b_committed_data) {
jbd_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
if (jh->b_frozen_data) {
jh->b_committed_data = jh->b_frozen_data;
jh->b_frozen_data = NULL;
}
} else if (jh->b_frozen_data) {
jbd_free(jh->b_frozen_data, bh->b_size);
jh->b_frozen_data = NULL;
}
spin_lock(&journal->j_list_lock);
cp_transaction = jh->b_cp_transaction;
if (cp_transaction) {
JBUFFER_TRACE(jh, "remove from old cp transaction");
__journal_remove_checkpoint(jh);
}
/* Only re-checkpoint the buffer_head if it is marked
* dirty. If the buffer was added to the BJ_Forget list
* by journal_forget, it may no longer be dirty and
* there's no point in keeping a checkpoint record for
* it. */
/* A buffer which has been freed while still being
* journaled by a previous transaction may end up still
* being dirty here, but we want to avoid writing back
* that buffer in the future after the "add to orphan"
* operation been committed, That's not only a performance
* gain, it also stops aliasing problems if the buffer is
* left behind for writeback and gets reallocated for another
* use in a different page. */
if (buffer_freed(bh) && !jh->b_next_transaction) {
clear_buffer_freed(bh);
clear_buffer_jbddirty(bh);
}
if (buffer_jbddirty(bh)) {
JBUFFER_TRACE(jh, "add to new checkpointing trans");
__journal_insert_checkpoint(jh, commit_transaction);
if (is_journal_aborted(journal))
clear_buffer_jbddirty(bh);
} else {
J_ASSERT_BH(bh, !buffer_dirty(bh));
/*
* The buffer on BJ_Forget list and not jbddirty means
* it has been freed by this transaction and hence it
* could not have been reallocated until this
* transaction has committed. *BUT* it could be
* reallocated once we have written all the data to
* disk and before we process the buffer on BJ_Forget
* list.
*/
if (!jh->b_next_transaction)
try_to_free = 1;
}
JBUFFER_TRACE(jh, "refile or unfile freed buffer");
__journal_refile_buffer(jh);
jbd_unlock_bh_state(bh);
if (try_to_free)
release_buffer_page(bh);
else
__brelse(bh);
cond_resched_lock(&journal->j_list_lock);
}
spin_unlock(&journal->j_list_lock);
/*
* This is a bit sleazy. We use j_list_lock to protect transition
* of a transaction into T_FINISHED state and calling
* __journal_drop_transaction(). Otherwise we could race with
* other checkpointing code processing the transaction...
*/
spin_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
/*
* Now recheck if some buffers did not get attached to the transaction
* while the lock was dropped...
*/
if (commit_transaction->t_forget) {
spin_unlock(&journal->j_list_lock);
spin_unlock(&journal->j_state_lock);
goto restart_loop;
}
/* Done with this transaction! */
jbd_debug(3, "JBD: commit phase 8\n");
J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);
commit_transaction->t_state = T_FINISHED;
J_ASSERT(commit_transaction == journal->j_committing_transaction);
journal->j_commit_sequence = commit_transaction->t_tid;
journal->j_committing_transaction = NULL;
commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
/*
* weight the commit time higher than the average time so we don't
* react too strongly to vast changes in commit time
*/
if (likely(journal->j_average_commit_time))
journal->j_average_commit_time = (commit_time*3 +
journal->j_average_commit_time) / 4;
else
journal->j_average_commit_time = commit_time;
spin_unlock(&journal->j_state_lock);
if (commit_transaction->t_checkpoint_list == NULL &&
commit_transaction->t_checkpoint_io_list == NULL) {
__journal_drop_transaction(journal, commit_transaction);
} else {
if (journal->j_checkpoint_transactions == NULL) {
journal->j_checkpoint_transactions = commit_transaction;
commit_transaction->t_cpnext = commit_transaction;
commit_transaction->t_cpprev = commit_transaction;
} else {
commit_transaction->t_cpnext =
journal->j_checkpoint_transactions;
commit_transaction->t_cpprev =
commit_transaction->t_cpnext->t_cpprev;
commit_transaction->t_cpnext->t_cpprev =
commit_transaction;
commit_transaction->t_cpprev->t_cpnext =
commit_transaction;
}
}
spin_unlock(&journal->j_list_lock);
trace_jbd_end_commit(journal, commit_transaction);
jbd_debug(1, "JBD: commit %d complete, head %d\n",
journal->j_commit_sequence, journal->j_tail_sequence);
wake_up(&journal->j_wait_done_commit);
}
| gpl-2.0 |
tweakos/HD-Kernel | drivers/net/wan/sbni.c | 3581 | 43201 | /* sbni.c: Granch SBNI12 leased line adapters driver for linux
*
* Written 2001 by Denis I.Timofeev (timofeev@granch.ru)
*
* Previous versions were written by Yaroslav Polyakov,
* Alexey Zverev and Max Khon.
*
* Driver supports SBNI12-02,-04,-05,-10,-11 cards, single and
* double-channel, PCI and ISA modifications.
* More info and useful utilities to work with SBNI12 cards you can find
* at http://www.granch.com (English) or http://www.granch.ru (Russian)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License.
*
*
* 5.0.1 Jun 22 2001
* - Fixed bug in probe
* 5.0.0 Jun 06 2001
* - Driver was completely redesigned by Denis I.Timofeev,
* - now PCI/Dual, ISA/Dual (with single interrupt line) models are
* - supported
* 3.3.0 Thu Feb 24 21:30:28 NOVT 2000
* - PCI cards support
* 3.2.0 Mon Dec 13 22:26:53 NOVT 1999
* - Completely rebuilt all the packet storage system
* - to work in Ethernet-like style.
* 3.1.1 just fixed some bugs (5 aug 1999)
* 3.1.0 added balancing feature (26 apr 1999)
* 3.0.1 just fixed some bugs (14 apr 1999).
* 3.0.0 Initial Revision, Yaroslav Polyakov (24 Feb 1999)
* - added pre-calculation for CRC, fixed bug with "len-2" frames,
* - removed outbound fragmentation (MTU=1000), written CRC-calculation
* - on asm, added work with hard_headers and now we have our own cache
* - for them, optionally supported word-interchange on some chipsets,
*
* Known problem: this driver wasn't tested on multiprocessor machine.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/fcntl.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <net/net_namespace.h>
#include <net/arp.h>
#include <asm/io.h>
#include <asm/types.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include "sbni.h"
/* device private data */
struct net_local {
struct timer_list watchdog;
spinlock_t lock;
struct sk_buff *rx_buf_p; /* receive buffer ptr */
struct sk_buff *tx_buf_p; /* transmit buffer ptr */
unsigned int framelen; /* current frame length */
unsigned int maxframe; /* maximum valid frame length */
unsigned int state;
unsigned int inppos, outpos; /* positions in rx/tx buffers */
/* transmitting frame number - from frames qty to 1 */
unsigned int tx_frameno;
/* expected number of next receiving frame */
unsigned int wait_frameno;
/* count of failed attempts to frame send - 32 attempts do before
error - while receiver tunes on opposite side of wire */
unsigned int trans_errors;
/* idle time; send pong when limit exceeded */
unsigned int timer_ticks;
/* fields used for receive level autoselection */
int delta_rxl;
unsigned int cur_rxl_index, timeout_rxl;
unsigned long cur_rxl_rcvd, prev_rxl_rcvd;
struct sbni_csr1 csr1; /* current value of CSR1 */
struct sbni_in_stats in_stats; /* internal statistics */
struct net_device *second; /* for ISA/dual cards */
#ifdef CONFIG_SBNI_MULTILINE
struct net_device *master;
struct net_device *link;
#endif
};
static int sbni_card_probe( unsigned long );
static int sbni_pci_probe( struct net_device * );
static struct net_device *sbni_probe1(struct net_device *, unsigned long, int);
static int sbni_open( struct net_device * );
static int sbni_close( struct net_device * );
static netdev_tx_t sbni_start_xmit(struct sk_buff *,
struct net_device * );
static int sbni_ioctl( struct net_device *, struct ifreq *, int );
static void set_multicast_list( struct net_device * );
static irqreturn_t sbni_interrupt( int, void * );
static void handle_channel( struct net_device * );
static int recv_frame( struct net_device * );
static void send_frame( struct net_device * );
static int upload_data( struct net_device *,
unsigned, unsigned, unsigned, u32 );
static void download_data( struct net_device *, u32 * );
static void sbni_watchdog( unsigned long );
static void interpret_ack( struct net_device *, unsigned );
static int append_frame_to_pkt( struct net_device *, unsigned, u32 );
static void indicate_pkt( struct net_device * );
static void card_start( struct net_device * );
static void prepare_to_send( struct sk_buff *, struct net_device * );
static void drop_xmit_queue( struct net_device * );
static void send_frame_header( struct net_device *, u32 * );
static int skip_tail( unsigned int, unsigned int, u32 );
static int check_fhdr( u32, u32 *, u32 *, u32 *, u32 *, u32 * );
static void change_level( struct net_device * );
static void timeout_change_level( struct net_device * );
static u32 calc_crc32( u32, u8 *, u32 );
static struct sk_buff * get_rx_buf( struct net_device * );
static int sbni_init( struct net_device * );
#ifdef CONFIG_SBNI_MULTILINE
static int enslave( struct net_device *, struct net_device * );
static int emancipate( struct net_device * );
#endif
#ifdef __i386__
#define ASM_CRC 1
#endif
static const char version[] =
"Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
static int skip_pci_probe __initdata = 0;
static int scandone __initdata = 0;
static int num __initdata = 0;
static unsigned char rxl_tab[];
static u32 crc32tab[];
/* A list of all installed devices, for removing the driver module. */
static struct net_device *sbni_cards[ SBNI_MAX_NUM_CARDS ];
/* Lists of device's parameters */
static u32 io[ SBNI_MAX_NUM_CARDS ] __initdata =
{ [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
static u32 irq[ SBNI_MAX_NUM_CARDS ] __initdata;
static u32 baud[ SBNI_MAX_NUM_CARDS ] __initdata;
static u32 rxl[ SBNI_MAX_NUM_CARDS ] __initdata =
{ [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata;
#ifndef MODULE
typedef u32 iarr[];
static iarr __initdata *dest[5] = { &io, &irq, &baud, &rxl, &mac };
#endif
/* A zero-terminated list of I/O addresses to be probed on ISA bus */
static unsigned int netcard_portlist[ ] __initdata = {
0x210, 0x214, 0x220, 0x224, 0x230, 0x234, 0x240, 0x244, 0x250, 0x254,
0x260, 0x264, 0x270, 0x274, 0x280, 0x284, 0x290, 0x294, 0x2a0, 0x2a4,
0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4,
0 };
#define NET_LOCAL_LOCK(dev) (((struct net_local *)netdev_priv(dev))->lock)
/*
* Look for SBNI card which addr stored in dev->base_addr, if nonzero.
* Otherwise, look through PCI bus. If none PCI-card was found, scan ISA.
*/
static inline int __init
sbni_isa_probe( struct net_device *dev )
{
if( dev->base_addr > 0x1ff &&
request_region( dev->base_addr, SBNI_IO_EXTENT, dev->name ) &&
sbni_probe1( dev, dev->base_addr, dev->irq ) )
return 0;
else {
printk( KERN_ERR "sbni: base address 0x%lx is busy, or adapter "
"is malfunctional!\n", dev->base_addr );
return -ENODEV;
}
}
static const struct net_device_ops sbni_netdev_ops = {
.ndo_open = sbni_open,
.ndo_stop = sbni_close,
.ndo_start_xmit = sbni_start_xmit,
.ndo_set_multicast_list = set_multicast_list,
.ndo_do_ioctl = sbni_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static void __init sbni_devsetup(struct net_device *dev)
{
ether_setup( dev );
dev->netdev_ops = &sbni_netdev_ops;
}
int __init sbni_probe(int unit)
{
struct net_device *dev;
static unsigned version_printed __initdata = 0;
int err;
dev = alloc_netdev(sizeof(struct net_local), "sbni", sbni_devsetup);
if (!dev)
return -ENOMEM;
dev->netdev_ops = &sbni_netdev_ops;
sprintf(dev->name, "sbni%d", unit);
netdev_boot_setup_check(dev);
err = sbni_init(dev);
if (err) {
free_netdev(dev);
return err;
}
err = register_netdev(dev);
if (err) {
release_region( dev->base_addr, SBNI_IO_EXTENT );
free_netdev(dev);
return err;
}
if( version_printed++ == 0 )
printk( KERN_INFO "%s", version );
return 0;
}
static int __init sbni_init(struct net_device *dev)
{
int i;
if( dev->base_addr )
return sbni_isa_probe( dev );
/* otherwise we have to perform search our adapter */
if( io[ num ] != -1 )
dev->base_addr = io[ num ],
dev->irq = irq[ num ];
else if( scandone || io[ 0 ] != -1 )
return -ENODEV;
/* if io[ num ] contains non-zero address, then that is on ISA bus */
if( dev->base_addr )
return sbni_isa_probe( dev );
/* ...otherwise - scan PCI first */
if( !skip_pci_probe && !sbni_pci_probe( dev ) )
return 0;
if( io[ num ] == -1 ) {
/* Auto-scan will be stopped when first ISA card were found */
scandone = 1;
if( num > 0 )
return -ENODEV;
}
for( i = 0; netcard_portlist[ i ]; ++i ) {
int ioaddr = netcard_portlist[ i ];
if( request_region( ioaddr, SBNI_IO_EXTENT, dev->name ) &&
sbni_probe1( dev, ioaddr, 0 ))
return 0;
}
return -ENODEV;
}
static int __init
sbni_pci_probe( struct net_device *dev )
{
struct pci_dev *pdev = NULL;
while( (pdev = pci_get_class( PCI_CLASS_NETWORK_OTHER << 8, pdev ))
!= NULL ) {
int pci_irq_line;
unsigned long pci_ioaddr;
u16 subsys;
if( pdev->vendor != SBNI_PCI_VENDOR &&
pdev->device != SBNI_PCI_DEVICE )
continue;
pci_ioaddr = pci_resource_start( pdev, 0 );
pci_irq_line = pdev->irq;
/* Avoid already found cards from previous calls */
if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) {
pci_read_config_word( pdev, PCI_SUBSYSTEM_ID, &subsys );
if (subsys != 2)
continue;
/* Dual adapter is present */
if (!request_region(pci_ioaddr += 4, SBNI_IO_EXTENT,
dev->name ) )
continue;
}
if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs)
printk( KERN_WARNING
" WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n"
" You should use the PCI BIOS setup to assign a valid IRQ line.\n",
pci_irq_line );
/* avoiding re-enable dual adapters */
if( (pci_ioaddr & 7) == 0 && pci_enable_device( pdev ) ) {
release_region( pci_ioaddr, SBNI_IO_EXTENT );
pci_dev_put( pdev );
return -EIO;
}
if( sbni_probe1( dev, pci_ioaddr, pci_irq_line ) ) {
SET_NETDEV_DEV(dev, &pdev->dev);
/* not the best thing to do, but this is all messed up
for hotplug systems anyway... */
pci_dev_put( pdev );
return 0;
}
}
return -ENODEV;
}
static struct net_device * __init
sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
{
struct net_local *nl;
if( sbni_card_probe( ioaddr ) ) {
release_region( ioaddr, SBNI_IO_EXTENT );
return NULL;
}
outb( 0, ioaddr + CSR0 );
if( irq < 2 ) {
unsigned long irq_mask;
irq_mask = probe_irq_on();
outb( EN_INT | TR_REQ, ioaddr + CSR0 );
outb( PR_RES, ioaddr + CSR1 );
mdelay(50);
irq = probe_irq_off(irq_mask);
outb( 0, ioaddr + CSR0 );
if( !irq ) {
printk( KERN_ERR "%s: can't detect device irq!\n",
dev->name );
release_region( ioaddr, SBNI_IO_EXTENT );
return NULL;
}
} else if( irq == 2 )
irq = 9;
dev->irq = irq;
dev->base_addr = ioaddr;
/* Fill in sbni-specific dev fields. */
nl = netdev_priv(dev);
if( !nl ) {
printk( KERN_ERR "%s: unable to get memory!\n", dev->name );
release_region( ioaddr, SBNI_IO_EXTENT );
return NULL;
}
memset( nl, 0, sizeof(struct net_local) );
spin_lock_init( &nl->lock );
/* store MAC address (generate if that isn't known) */
*(__be16 *)dev->dev_addr = htons( 0x00ff );
*(__be32 *)(dev->dev_addr + 2) = htonl( 0x01000000 |
((mac[num] ?
mac[num] :
(u32)((long)netdev_priv(dev))) & 0x00ffffff));
/* store link settings (speed, receive level ) */
nl->maxframe = DEFAULT_FRAME_LEN;
nl->csr1.rate = baud[ num ];
if( (nl->cur_rxl_index = rxl[ num ]) == -1 )
/* autotune rxl */
nl->cur_rxl_index = DEF_RXL,
nl->delta_rxl = DEF_RXL_DELTA;
else
nl->delta_rxl = 0;
nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
if( inb( ioaddr + CSR0 ) & 0x01 )
nl->state |= FL_SLOW_MODE;
printk( KERN_NOTICE "%s: ioaddr %#lx, irq %d, "
"MAC: 00:ff:01:%02x:%02x:%02x\n",
dev->name, dev->base_addr, dev->irq,
((u8 *) dev->dev_addr) [3],
((u8 *) dev->dev_addr) [4],
((u8 *) dev->dev_addr) [5] );
printk( KERN_NOTICE "%s: speed %d, receive level ", dev->name,
( (nl->state & FL_SLOW_MODE) ? 500000 : 2000000)
/ (1 << nl->csr1.rate) );
if( nl->delta_rxl == 0 )
printk( "0x%x (fixed)\n", nl->cur_rxl_index );
else
printk( "(auto)\n");
#ifdef CONFIG_SBNI_MULTILINE
nl->master = dev;
nl->link = NULL;
#endif
sbni_cards[ num++ ] = dev;
return dev;
}
/* -------------------------------------------------------------------------- */
#ifdef CONFIG_SBNI_MULTILINE
static netdev_tx_t
sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
{
struct net_device *p;
netif_stop_queue( dev );
/* Looking for idle device in the list */
for( p = dev; p; ) {
struct net_local *nl = netdev_priv(p);
spin_lock( &nl->lock );
if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) {
p = nl->link;
spin_unlock( &nl->lock );
} else {
/* Idle dev is found */
prepare_to_send( skb, p );
spin_unlock( &nl->lock );
netif_start_queue( dev );
return NETDEV_TX_OK;
}
}
return NETDEV_TX_BUSY;
}
#else /* CONFIG_SBNI_MULTILINE */
static netdev_tx_t
sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
netif_stop_queue( dev );
spin_lock( &nl->lock );
prepare_to_send( skb, dev );
spin_unlock( &nl->lock );
return NETDEV_TX_OK;
}
#endif /* CONFIG_SBNI_MULTILINE */
/* -------------------------------------------------------------------------- */
/* interrupt handler */
/*
* SBNI12D-10, -11/ISA boards within "common interrupt" mode could not
* be looked as two independent single-channel devices. Every channel seems
* as Ethernet interface but interrupt handler must be common. Really, first
* channel ("master") driver only registers the handler. In its struct net_local
* it has got pointer to "slave" channel's struct net_local and handles that's
* interrupts too.
* dev of successfully attached ISA SBNI boards is linked to list.
* While next board driver is initialized, it scans this list. If one
* has found dev with same irq and ioaddr different by 4 then it assumes
* this board to be "master".
*/
static irqreturn_t
sbni_interrupt( int irq, void *dev_id )
{
struct net_device *dev = dev_id;
struct net_local *nl = netdev_priv(dev);
int repeat;
spin_lock( &nl->lock );
if( nl->second )
spin_lock(&NET_LOCAL_LOCK(nl->second));
do {
repeat = 0;
if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) )
handle_channel( dev ),
repeat = 1;
if( nl->second && /* second channel present */
(inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) )
handle_channel( nl->second ),
repeat = 1;
} while( repeat );
if( nl->second )
spin_unlock(&NET_LOCAL_LOCK(nl->second));
spin_unlock( &nl->lock );
return IRQ_HANDLED;
}
static void
handle_channel( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
unsigned long ioaddr = dev->base_addr;
int req_ans;
unsigned char csr0;
#ifdef CONFIG_SBNI_MULTILINE
/* Lock the master device because we going to change its local data */
if( nl->state & FL_SLAVE )
spin_lock(&NET_LOCAL_LOCK(nl->master));
#endif
outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 );
nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
for(;;) {
csr0 = inb( ioaddr + CSR0 );
if( ( csr0 & (RC_RDY | TR_RDY) ) == 0 )
break;
req_ans = !(nl->state & FL_PREV_OK);
if( csr0 & RC_RDY )
req_ans = recv_frame( dev );
/*
* TR_RDY always equals 1 here because we have owned the marker,
* and we set TR_REQ when disabled interrupts
*/
csr0 = inb( ioaddr + CSR0 );
if( !(csr0 & TR_RDY) || (csr0 & RC_RDY) )
printk( KERN_ERR "%s: internal error!\n", dev->name );
/* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */
if( req_ans || nl->tx_frameno != 0 )
send_frame( dev );
else
/* send marker without any data */
outb( inb( ioaddr + CSR0 ) & ~TR_REQ, ioaddr + CSR0 );
}
outb( inb( ioaddr + CSR0 ) | EN_INT, ioaddr + CSR0 );
#ifdef CONFIG_SBNI_MULTILINE
if( nl->state & FL_SLAVE )
spin_unlock(&NET_LOCAL_LOCK(nl->master));
#endif
}
/*
* Routine returns 1 if it need to acknoweledge received frame.
* Empty frame received without errors won't be acknoweledged.
*/
static int
recv_frame( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
unsigned long ioaddr = dev->base_addr;
u32 crc = CRC32_INITIAL;
unsigned framelen = 0, frameno, ack;
unsigned is_first, frame_ok = 0;
if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
frame_ok = framelen > 4
? upload_data( dev, framelen, frameno, is_first, crc )
: skip_tail( ioaddr, framelen, crc );
if( frame_ok )
interpret_ack( dev, ack );
}
outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
if( frame_ok ) {
nl->state |= FL_PREV_OK;
if( framelen > 4 )
nl->in_stats.all_rx_number++;
} else
nl->state &= ~FL_PREV_OK,
change_level( dev ),
nl->in_stats.all_rx_number++,
nl->in_stats.bad_rx_number++;
return !frame_ok || framelen > 4;
}
static void
send_frame( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
u32 crc = CRC32_INITIAL;
if( nl->state & FL_NEED_RESEND ) {
/* if frame was sended but not ACK'ed - resend it */
if( nl->trans_errors ) {
--nl->trans_errors;
if( nl->framelen != 0 )
nl->in_stats.resend_tx_number++;
} else {
/* cannot xmit with many attempts */
#ifdef CONFIG_SBNI_MULTILINE
if( (nl->state & FL_SLAVE) || nl->link )
#endif
nl->state |= FL_LINE_DOWN;
drop_xmit_queue( dev );
goto do_send;
}
} else
nl->trans_errors = TR_ERROR_COUNT;
send_frame_header( dev, &crc );
nl->state |= FL_NEED_RESEND;
/*
* FL_NEED_RESEND will be cleared after ACK, but if empty
* frame sended then in prepare_to_send next frame
*/
if( nl->framelen ) {
download_data( dev, &crc );
nl->in_stats.all_tx_number++;
nl->state |= FL_WAIT_ACK;
}
outsb( dev->base_addr + DAT, (u8 *)&crc, sizeof crc );
do_send:
outb( inb( dev->base_addr + CSR0 ) & ~TR_REQ, dev->base_addr + CSR0 );
if( nl->tx_frameno )
/* next frame exists - we request card to send it */
outb( inb( dev->base_addr + CSR0 ) | TR_REQ,
dev->base_addr + CSR0 );
}
/*
* Write the frame data into adapter's buffer memory, and calculate CRC.
* Do padding if necessary.
*/
static void
download_data( struct net_device *dev, u32 *crc_p )
{
struct net_local *nl = netdev_priv(dev);
struct sk_buff *skb = nl->tx_buf_p;
unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
outsb( dev->base_addr + DAT, skb->data + nl->outpos, len );
*crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
/* if packet too short we should write some more bytes to pad */
for( len = nl->framelen - len; len--; )
outb( 0, dev->base_addr + DAT ),
*crc_p = CRC32( 0, *crc_p );
}
static int
upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
unsigned is_first, u32 crc )
{
struct net_local *nl = netdev_priv(dev);
int frame_ok;
if( is_first )
nl->wait_frameno = frameno,
nl->inppos = 0;
if( nl->wait_frameno == frameno ) {
if( nl->inppos + framelen <= ETHER_MAX_LEN )
frame_ok = append_frame_to_pkt( dev, framelen, crc );
/*
* if CRC is right but framelen incorrect then transmitter
* error was occurred... drop entire packet
*/
else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc ))
!= 0 )
nl->wait_frameno = 0,
nl->inppos = 0,
#ifdef CONFIG_SBNI_MULTILINE
nl->master->stats.rx_errors++,
nl->master->stats.rx_missed_errors++;
#else
dev->stats.rx_errors++,
dev->stats.rx_missed_errors++;
#endif
/* now skip all frames until is_first != 0 */
} else
frame_ok = skip_tail( dev->base_addr, framelen, crc );
if( is_first && !frame_ok )
/*
* Frame has been broken, but we had already stored
* is_first... Drop entire packet.
*/
nl->wait_frameno = 0,
#ifdef CONFIG_SBNI_MULTILINE
nl->master->stats.rx_errors++,
nl->master->stats.rx_crc_errors++;
#else
dev->stats.rx_errors++,
dev->stats.rx_crc_errors++;
#endif
return frame_ok;
}
static inline void
send_complete( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
#ifdef CONFIG_SBNI_MULTILINE
nl->master->stats.tx_packets++;
nl->master->stats.tx_bytes += nl->tx_buf_p->len;
#else
dev->stats.tx_packets++;
dev->stats.tx_bytes += nl->tx_buf_p->len;
#endif
dev_kfree_skb_irq( nl->tx_buf_p );
nl->tx_buf_p = NULL;
nl->outpos = 0;
nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
nl->framelen = 0;
}
static void
interpret_ack( struct net_device *dev, unsigned ack )
{
struct net_local *nl = netdev_priv(dev);
if( ack == FRAME_SENT_OK ) {
nl->state &= ~FL_NEED_RESEND;
if( nl->state & FL_WAIT_ACK ) {
nl->outpos += nl->framelen;
if( --nl->tx_frameno )
nl->framelen = min_t(unsigned int,
nl->maxframe,
nl->tx_buf_p->len - nl->outpos);
else
send_complete( dev ),
#ifdef CONFIG_SBNI_MULTILINE
netif_wake_queue( nl->master );
#else
netif_wake_queue( dev );
#endif
}
}
nl->state &= ~FL_WAIT_ACK;
}
/*
* Glue received frame with previous fragments of packet.
* Indicate packet when last frame would be accepted.
*/
static int
append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc )
{
struct net_local *nl = netdev_priv(dev);
u8 *p;
if( nl->inppos + framelen > ETHER_MAX_LEN )
return 0;
if( !nl->rx_buf_p && !(nl->rx_buf_p = get_rx_buf( dev )) )
return 0;
p = nl->rx_buf_p->data + nl->inppos;
insb( dev->base_addr + DAT, p, framelen );
if( calc_crc32( crc, p, framelen ) != CRC32_REMAINDER )
return 0;
nl->inppos += framelen - 4;
if( --nl->wait_frameno == 0 ) /* last frame received */
indicate_pkt( dev );
return 1;
}
/*
* Prepare to start output on adapter.
* Transmitter will be actually activated when marker is accepted.
*/
static void
prepare_to_send( struct sk_buff *skb, struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
unsigned int len;
/* nl->tx_buf_p == NULL here! */
if( nl->tx_buf_p )
printk( KERN_ERR "%s: memory leak!\n", dev->name );
nl->outpos = 0;
nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
len = skb->len;
if( len < SBNI_MIN_LEN )
len = SBNI_MIN_LEN;
nl->tx_buf_p = skb;
nl->tx_frameno = DIV_ROUND_UP(len, nl->maxframe);
nl->framelen = len < nl->maxframe ? len : nl->maxframe;
outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 );
#ifdef CONFIG_SBNI_MULTILINE
nl->master->trans_start = jiffies;
#else
dev->trans_start = jiffies;
#endif
}
static void
drop_xmit_queue( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
if( nl->tx_buf_p )
dev_kfree_skb_any( nl->tx_buf_p ),
nl->tx_buf_p = NULL,
#ifdef CONFIG_SBNI_MULTILINE
nl->master->stats.tx_errors++,
nl->master->stats.tx_carrier_errors++;
#else
dev->stats.tx_errors++,
dev->stats.tx_carrier_errors++;
#endif
nl->tx_frameno = 0;
nl->framelen = 0;
nl->outpos = 0;
nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
#ifdef CONFIG_SBNI_MULTILINE
netif_start_queue( nl->master );
nl->master->trans_start = jiffies;
#else
netif_start_queue( dev );
dev->trans_start = jiffies;
#endif
}
static void
send_frame_header( struct net_device *dev, u32 *crc_p )
{
struct net_local *nl = netdev_priv(dev);
u32 crc = *crc_p;
u32 len_field = nl->framelen + 6; /* CRC + frameno + reserved */
u8 value;
if( nl->state & FL_NEED_RESEND )
len_field |= FRAME_RETRY; /* non-first attempt... */
if( nl->outpos == 0 )
len_field |= FRAME_FIRST;
len_field |= (nl->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD;
outb( SBNI_SIG, dev->base_addr + DAT );
value = (u8) len_field;
outb( value, dev->base_addr + DAT );
crc = CRC32( value, crc );
value = (u8) (len_field >> 8);
outb( value, dev->base_addr + DAT );
crc = CRC32( value, crc );
outb( nl->tx_frameno, dev->base_addr + DAT );
crc = CRC32( nl->tx_frameno, crc );
outb( 0, dev->base_addr + DAT );
crc = CRC32( 0, crc );
*crc_p = crc;
}
/*
* if frame tail not needed (incorrect number or received twice),
* it won't store, but CRC will be calculated
*/
static int
skip_tail( unsigned int ioaddr, unsigned int tail_len, u32 crc )
{
while( tail_len-- )
crc = CRC32( inb( ioaddr + DAT ), crc );
return crc == CRC32_REMAINDER;
}
/*
* Preliminary checks if frame header is correct, calculates its CRC
* and split it to simple fields
*/
static int
check_fhdr( u32 ioaddr, u32 *framelen, u32 *frameno, u32 *ack,
u32 *is_first, u32 *crc_p )
{
u32 crc = *crc_p;
u8 value;
if( inb( ioaddr + DAT ) != SBNI_SIG )
return 0;
value = inb( ioaddr + DAT );
*framelen = (u32)value;
crc = CRC32( value, crc );
value = inb( ioaddr + DAT );
*framelen |= ((u32)value) << 8;
crc = CRC32( value, crc );
*ack = *framelen & FRAME_ACK_MASK;
*is_first = (*framelen & FRAME_FIRST) != 0;
if( (*framelen &= FRAME_LEN_MASK) < 6 ||
*framelen > SBNI_MAX_FRAME - 3 )
return 0;
value = inb( ioaddr + DAT );
*frameno = (u32)value;
crc = CRC32( value, crc );
crc = CRC32( inb( ioaddr + DAT ), crc ); /* reserved byte */
*framelen -= 2;
*crc_p = crc;
return 1;
}
static struct sk_buff *
get_rx_buf( struct net_device *dev )
{
/* +2 is to compensate for the alignment fixup below */
struct sk_buff *skb = dev_alloc_skb( ETHER_MAX_LEN + 2 );
if( !skb )
return NULL;
skb_reserve( skb, 2 ); /* Align IP on longword boundaries */
return skb;
}
static void
indicate_pkt( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
struct sk_buff *skb = nl->rx_buf_p;
skb_put( skb, nl->inppos );
#ifdef CONFIG_SBNI_MULTILINE
skb->protocol = eth_type_trans( skb, nl->master );
netif_rx( skb );
++nl->master->stats.rx_packets;
nl->master->stats.rx_bytes += nl->inppos;
#else
skb->protocol = eth_type_trans( skb, dev );
netif_rx( skb );
++dev->stats.rx_packets;
dev->stats.rx_bytes += nl->inppos;
#endif
nl->rx_buf_p = NULL; /* protocol driver will clear this sk_buff */
}
/* -------------------------------------------------------------------------- */
/*
* Routine checks periodically wire activity and regenerates marker if
* connect was inactive for a long time.
*/
static void
sbni_watchdog( unsigned long arg )
{
struct net_device *dev = (struct net_device *) arg;
struct net_local *nl = netdev_priv(dev);
struct timer_list *w = &nl->watchdog;
unsigned long flags;
unsigned char csr0;
spin_lock_irqsave( &nl->lock, flags );
csr0 = inb( dev->base_addr + CSR0 );
if( csr0 & RC_CHK ) {
if( nl->timer_ticks ) {
if( csr0 & (RC_RDY | BU_EMP) )
/* receiving not active */
nl->timer_ticks--;
} else {
nl->in_stats.timeout_number++;
if( nl->delta_rxl )
timeout_change_level( dev );
outb( *(u_char *)&nl->csr1 | PR_RES,
dev->base_addr + CSR1 );
csr0 = inb( dev->base_addr + CSR0 );
}
} else
nl->state &= ~FL_LINE_DOWN;
outb( csr0 | RC_CHK, dev->base_addr + CSR0 );
init_timer( w );
w->expires = jiffies + SBNI_TIMEOUT;
w->data = arg;
w->function = sbni_watchdog;
add_timer( w );
spin_unlock_irqrestore( &nl->lock, flags );
}
static unsigned char rxl_tab[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08,
0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f
};
#define SIZE_OF_TIMEOUT_RXL_TAB 4
static unsigned char timeout_rxl_tab[] = {
0x03, 0x05, 0x08, 0x0b
};
/* -------------------------------------------------------------------------- */
static void
card_start( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
nl->state |= FL_PREV_OK;
nl->inppos = nl->outpos = 0;
nl->wait_frameno = 0;
nl->tx_frameno = 0;
nl->framelen = 0;
outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
outb( EN_INT, dev->base_addr + CSR0 );
}
/* -------------------------------------------------------------------------- */
/* Receive level auto-selection */
static void
change_level( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
if( nl->delta_rxl == 0 ) /* do not auto-negotiate RxL */
return;
if( nl->cur_rxl_index == 0 )
nl->delta_rxl = 1;
else if( nl->cur_rxl_index == 15 )
nl->delta_rxl = -1;
else if( nl->cur_rxl_rcvd < nl->prev_rxl_rcvd )
nl->delta_rxl = -nl->delta_rxl;
nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index += nl->delta_rxl ];
inb( dev->base_addr + CSR0 ); /* needs for PCI cards */
outb( *(u8 *)&nl->csr1, dev->base_addr + CSR1 );
nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
nl->cur_rxl_rcvd = 0;
}
static void
timeout_change_level( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ];
if( ++nl->timeout_rxl >= 4 )
nl->timeout_rxl = 0;
nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
inb( dev->base_addr + CSR0 );
outb( *(unsigned char *)&nl->csr1, dev->base_addr + CSR1 );
nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
nl->cur_rxl_rcvd = 0;
}
/* -------------------------------------------------------------------------- */
/*
* Open/initialize the board.
*/
static int
sbni_open( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
struct timer_list *w = &nl->watchdog;
/*
* For double ISA adapters within "common irq" mode, we have to
* determine whether primary or secondary channel is initialized,
* and set the irq handler only in first case.
*/
if( dev->base_addr < 0x400 ) { /* ISA only */
struct net_device **p = sbni_cards;
for( ; *p && p < sbni_cards + SBNI_MAX_NUM_CARDS; ++p )
if( (*p)->irq == dev->irq &&
((*p)->base_addr == dev->base_addr + 4 ||
(*p)->base_addr == dev->base_addr - 4) &&
(*p)->flags & IFF_UP ) {
((struct net_local *) (netdev_priv(*p)))
->second = dev;
printk( KERN_NOTICE "%s: using shared irq "
"with %s\n", dev->name, (*p)->name );
nl->state |= FL_SECONDARY;
goto handler_attached;
}
}
if( request_irq(dev->irq, sbni_interrupt, IRQF_SHARED, dev->name, dev) ) {
printk( KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, dev->irq );
return -EAGAIN;
}
handler_attached:
spin_lock( &nl->lock );
memset( &dev->stats, 0, sizeof(struct net_device_stats) );
memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
card_start( dev );
netif_start_queue( dev );
/* set timer watchdog */
init_timer( w );
w->expires = jiffies + SBNI_TIMEOUT;
w->data = (unsigned long) dev;
w->function = sbni_watchdog;
add_timer( w );
spin_unlock( &nl->lock );
return 0;
}
static int
sbni_close( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
if( nl->second && nl->second->flags & IFF_UP ) {
printk( KERN_NOTICE "Secondary channel (%s) is active!\n",
nl->second->name );
return -EBUSY;
}
#ifdef CONFIG_SBNI_MULTILINE
if( nl->state & FL_SLAVE )
emancipate( dev );
else
while( nl->link ) /* it's master device! */
emancipate( nl->link );
#endif
spin_lock( &nl->lock );
nl->second = NULL;
drop_xmit_queue( dev );
netif_stop_queue( dev );
del_timer( &nl->watchdog );
outb( 0, dev->base_addr + CSR0 );
if( !(nl->state & FL_SECONDARY) )
free_irq( dev->irq, dev );
nl->state &= FL_SECONDARY;
spin_unlock( &nl->lock );
return 0;
}
/*
Valid combinations in CSR0 (for probing):
VALID_DECODER 0000,0011,1011,1010
; 0 ; -
TR_REQ ; 1 ; +
TR_RDY ; 2 ; -
TR_RDY TR_REQ ; 3 ; +
BU_EMP ; 4 ; +
BU_EMP TR_REQ ; 5 ; +
BU_EMP TR_RDY ; 6 ; -
BU_EMP TR_RDY TR_REQ ; 7 ; +
RC_RDY ; 8 ; +
RC_RDY TR_REQ ; 9 ; +
RC_RDY TR_RDY ; 10 ; -
RC_RDY TR_RDY TR_REQ ; 11 ; -
RC_RDY BU_EMP ; 12 ; -
RC_RDY BU_EMP TR_REQ ; 13 ; -
RC_RDY BU_EMP TR_RDY ; 14 ; -
RC_RDY BU_EMP TR_RDY TR_REQ ; 15 ; -
*/
#define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200)
static int
sbni_card_probe( unsigned long ioaddr )
{
unsigned char csr0;
csr0 = inb( ioaddr + CSR0 );
if( csr0 != 0xff && csr0 != 0x00 ) {
csr0 &= ~EN_INT;
if( csr0 & BU_EMP )
csr0 |= EN_INT;
if( VALID_DECODER & (1 << (csr0 >> 4)) )
return 0;
}
return -ENODEV;
}
/* -------------------------------------------------------------------------- */
static int
sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
{
struct net_local *nl = netdev_priv(dev);
struct sbni_flags flags;
int error = 0;
#ifdef CONFIG_SBNI_MULTILINE
struct net_device *slave_dev;
char slave_name[ 8 ];
#endif
switch( cmd ) {
case SIOCDEVGETINSTATS :
if (copy_to_user( ifr->ifr_data, &nl->in_stats,
sizeof(struct sbni_in_stats) ))
error = -EFAULT;
break;
case SIOCDEVRESINSTATS :
if (!capable(CAP_NET_ADMIN))
return -EPERM;
memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
break;
case SIOCDEVGHWSTATE :
flags.mac_addr = *(u32 *)(dev->dev_addr + 3);
flags.rate = nl->csr1.rate;
flags.slow_mode = (nl->state & FL_SLOW_MODE) != 0;
flags.rxl = nl->cur_rxl_index;
flags.fixed_rxl = nl->delta_rxl == 0;
if (copy_to_user( ifr->ifr_data, &flags, sizeof flags ))
error = -EFAULT;
break;
case SIOCDEVSHWSTATE :
if (!capable(CAP_NET_ADMIN))
return -EPERM;
spin_lock( &nl->lock );
flags = *(struct sbni_flags*) &ifr->ifr_ifru;
if( flags.fixed_rxl )
nl->delta_rxl = 0,
nl->cur_rxl_index = flags.rxl;
else
nl->delta_rxl = DEF_RXL_DELTA,
nl->cur_rxl_index = DEF_RXL;
nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
nl->csr1.rate = flags.rate;
outb( *(u8 *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
spin_unlock( &nl->lock );
break;
#ifdef CONFIG_SBNI_MULTILINE
case SIOCDEVENSLAVE :
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name ))
return -EFAULT;
slave_dev = dev_get_by_name(&init_net, slave_name );
if( !slave_dev || !(slave_dev->flags & IFF_UP) ) {
printk( KERN_ERR "%s: trying to enslave non-active "
"device %s\n", dev->name, slave_name );
return -EPERM;
}
return enslave( dev, slave_dev );
case SIOCDEVEMANSIPATE :
if (!capable(CAP_NET_ADMIN))
return -EPERM;
return emancipate( dev );
#endif /* CONFIG_SBNI_MULTILINE */
default :
return -EOPNOTSUPP;
}
return error;
}
#ifdef CONFIG_SBNI_MULTILINE
static int
enslave( struct net_device *dev, struct net_device *slave_dev )
{
struct net_local *nl = netdev_priv(dev);
struct net_local *snl = netdev_priv(slave_dev);
if( nl->state & FL_SLAVE ) /* This isn't master or free device */
return -EBUSY;
if( snl->state & FL_SLAVE ) /* That was already enslaved */
return -EBUSY;
spin_lock( &nl->lock );
spin_lock( &snl->lock );
/* append to list */
snl->link = nl->link;
nl->link = slave_dev;
snl->master = dev;
snl->state |= FL_SLAVE;
/* Summary statistics of MultiLine operation will be stored
in master's counters */
memset( &slave_dev->stats, 0, sizeof(struct net_device_stats) );
netif_stop_queue( slave_dev );
netif_wake_queue( dev ); /* Now we are able to transmit */
spin_unlock( &snl->lock );
spin_unlock( &nl->lock );
printk( KERN_NOTICE "%s: slave device (%s) attached.\n",
dev->name, slave_dev->name );
return 0;
}
static int
emancipate( struct net_device *dev )
{
struct net_local *snl = netdev_priv(dev);
struct net_device *p = snl->master;
struct net_local *nl = netdev_priv(p);
if( !(snl->state & FL_SLAVE) )
return -EINVAL;
spin_lock( &nl->lock );
spin_lock( &snl->lock );
drop_xmit_queue( dev );
/* exclude from list */
for(;;) { /* must be in list */
struct net_local *t = netdev_priv(p);
if( t->link == dev ) {
t->link = snl->link;
break;
}
p = t->link;
}
snl->link = NULL;
snl->master = dev;
snl->state &= ~FL_SLAVE;
netif_start_queue( dev );
spin_unlock( &snl->lock );
spin_unlock( &nl->lock );
dev_put( dev );
return 0;
}
#endif
static void
set_multicast_list( struct net_device *dev )
{
return; /* sbni always operate in promiscuos mode */
}
#ifdef MODULE
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(baud, int, NULL, 0);
module_param_array(rxl, int, NULL, 0);
module_param_array(mac, int, NULL, 0);
module_param(skip_pci_probe, bool, 0);
MODULE_LICENSE("GPL");
int __init init_module( void )
{
struct net_device *dev;
int err;
while( num < SBNI_MAX_NUM_CARDS ) {
dev = alloc_netdev(sizeof(struct net_local),
"sbni%d", sbni_devsetup);
if( !dev)
break;
sprintf( dev->name, "sbni%d", num );
err = sbni_init(dev);
if (err) {
free_netdev(dev);
break;
}
if( register_netdev( dev ) ) {
release_region( dev->base_addr, SBNI_IO_EXTENT );
free_netdev( dev );
break;
}
}
return *sbni_cards ? 0 : -ENODEV;
}
void
cleanup_module(void)
{
int i;
for (i = 0; i < SBNI_MAX_NUM_CARDS; ++i) {
struct net_device *dev = sbni_cards[i];
if (dev != NULL) {
unregister_netdev(dev);
release_region(dev->base_addr, SBNI_IO_EXTENT);
free_netdev(dev);
}
}
}
#else /* MODULE */
static int __init
sbni_setup( char *p )
{
int n, parm;
if( *p++ != '(' )
goto bad_param;
for( n = 0, parm = 0; *p && n < 8; ) {
(*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
if( !*p || *p == ')' )
return 1;
if( *p == ';' )
++p, ++n, parm = 0;
else if( *p++ != ',' )
break;
else
if( ++parm >= 5 )
break;
}
bad_param:
printk( KERN_ERR "Error in sbni kernel parameter!\n" );
return 0;
}
__setup( "sbni=", sbni_setup );
#endif /* MODULE */
/* -------------------------------------------------------------------------- */
#ifdef ASM_CRC
static u32
calc_crc32( u32 crc, u8 *p, u32 len )
{
register u32 _crc;
_crc = crc;
__asm__ __volatile__ (
"xorl %%ebx, %%ebx\n"
"movl %2, %%esi\n"
"movl %3, %%ecx\n"
"movl $crc32tab, %%edi\n"
"shrl $2, %%ecx\n"
"jz 1f\n"
".align 4\n"
"0:\n"
"movb %%al, %%bl\n"
"movl (%%esi), %%edx\n"
"shrl $8, %%eax\n"
"xorb %%dl, %%bl\n"
"shrl $8, %%edx\n"
"xorl (%%edi,%%ebx,4), %%eax\n"
"movb %%al, %%bl\n"
"shrl $8, %%eax\n"
"xorb %%dl, %%bl\n"
"shrl $8, %%edx\n"
"xorl (%%edi,%%ebx,4), %%eax\n"
"movb %%al, %%bl\n"
"shrl $8, %%eax\n"
"xorb %%dl, %%bl\n"
"movb %%dh, %%dl\n"
"xorl (%%edi,%%ebx,4), %%eax\n"
"movb %%al, %%bl\n"
"shrl $8, %%eax\n"
"xorb %%dl, %%bl\n"
"addl $4, %%esi\n"
"xorl (%%edi,%%ebx,4), %%eax\n"
"decl %%ecx\n"
"jnz 0b\n"
"1:\n"
"movl %3, %%ecx\n"
"andl $3, %%ecx\n"
"jz 2f\n"
"movb %%al, %%bl\n"
"shrl $8, %%eax\n"
"xorb (%%esi), %%bl\n"
"xorl (%%edi,%%ebx,4), %%eax\n"
"decl %%ecx\n"
"jz 2f\n"
"movb %%al, %%bl\n"
"shrl $8, %%eax\n"
"xorb 1(%%esi), %%bl\n"
"xorl (%%edi,%%ebx,4), %%eax\n"
"decl %%ecx\n"
"jz 2f\n"
"movb %%al, %%bl\n"
"shrl $8, %%eax\n"
"xorb 2(%%esi), %%bl\n"
"xorl (%%edi,%%ebx,4), %%eax\n"
"2:\n"
: "=a" (_crc)
: "0" (_crc), "g" (p), "g" (len)
: "bx", "cx", "dx", "si", "di"
);
return _crc;
}
#else /* ASM_CRC */
static u32
calc_crc32( u32 crc, u8 *p, u32 len )
{
while( len-- )
crc = CRC32( *p++, crc );
return crc;
}
#endif /* ASM_CRC */
static u32 crc32tab[] __attribute__ ((aligned(8))) = {
0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37,
0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E,
0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605,
0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C,
0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53,
0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A,
0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661,
0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278,
0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF,
0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6,
0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD,
0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4,
0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B,
0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82,
0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9,
0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0,
0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7,
0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE,
0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795,
0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C,
0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3,
0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA,
0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1,
0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8,
0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F,
0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76,
0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D,
0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344,
0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B,
0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12,
0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739,
0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320,
0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17,
0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E,
0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525,
0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C,
0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73,
0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A,
0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541,
0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158,
0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF,
0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6,
0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED,
0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4,
0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB,
0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2,
0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589,
0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190,
0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87,
0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E,
0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5,
0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC,
0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3,
0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA,
0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1,
0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8,
0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F,
0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856,
0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D,
0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064,
0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B,
0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832,
0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419,
0x660951BA, 0x110E612C, 0x88073096, 0xFF000000
};
| gpl-2.0 |
garwynn/D710BST_GC01_Kernel | arch/um/sys-x86_64/signal.c | 3837 | 8160 | /*
* Copyright (C) 2003 PathScale, Inc.
* Copyright (C) 2003 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/kernel.h>
#include <asm/unistd.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include "frame_kern.h"
#include "skas.h"
void copy_sc(struct uml_pt_regs *regs, void *from)
{
struct sigcontext *sc = from;
#define GETREG(regs, regno, sc, regname) \
(regs)->gp[(regno) / sizeof(unsigned long)] = (sc)->regname
GETREG(regs, R8, sc, r8);
GETREG(regs, R9, sc, r9);
GETREG(regs, R10, sc, r10);
GETREG(regs, R11, sc, r11);
GETREG(regs, R12, sc, r12);
GETREG(regs, R13, sc, r13);
GETREG(regs, R14, sc, r14);
GETREG(regs, R15, sc, r15);
GETREG(regs, RDI, sc, di);
GETREG(regs, RSI, sc, si);
GETREG(regs, RBP, sc, bp);
GETREG(regs, RBX, sc, bx);
GETREG(regs, RDX, sc, dx);
GETREG(regs, RAX, sc, ax);
GETREG(regs, RCX, sc, cx);
GETREG(regs, RSP, sc, sp);
GETREG(regs, RIP, sc, ip);
GETREG(regs, EFLAGS, sc, flags);
GETREG(regs, CS, sc, cs);
#undef GETREG
}
static int copy_sc_from_user(struct pt_regs *regs,
struct sigcontext __user *from,
struct _fpstate __user *fpp)
{
struct user_i387_struct fp;
int err = 0;
#define GETREG(regs, regno, sc, regname) \
__get_user((regs)->regs.gp[(regno) / sizeof(unsigned long)], \
&(sc)->regname)
err |= GETREG(regs, R8, from, r8);
err |= GETREG(regs, R9, from, r9);
err |= GETREG(regs, R10, from, r10);
err |= GETREG(regs, R11, from, r11);
err |= GETREG(regs, R12, from, r12);
err |= GETREG(regs, R13, from, r13);
err |= GETREG(regs, R14, from, r14);
err |= GETREG(regs, R15, from, r15);
err |= GETREG(regs, RDI, from, di);
err |= GETREG(regs, RSI, from, si);
err |= GETREG(regs, RBP, from, bp);
err |= GETREG(regs, RBX, from, bx);
err |= GETREG(regs, RDX, from, dx);
err |= GETREG(regs, RAX, from, ax);
err |= GETREG(regs, RCX, from, cx);
err |= GETREG(regs, RSP, from, sp);
err |= GETREG(regs, RIP, from, ip);
err |= GETREG(regs, EFLAGS, from, flags);
err |= GETREG(regs, CS, from, cs);
if (err)
return 1;
#undef GETREG
err = copy_from_user(&fp, fpp, sizeof(struct user_i387_struct));
if (err)
return 1;
err = restore_fp_registers(userspace_pid[current_thread_info()->cpu],
(unsigned long *) &fp);
if (err < 0) {
printk(KERN_ERR "copy_sc_from_user - "
"restore_fp_registers failed, errno = %d\n",
-err);
return 1;
}
return 0;
}
static int copy_sc_to_user(struct sigcontext __user *to,
struct _fpstate __user *to_fp, struct pt_regs *regs,
unsigned long mask, unsigned long sp)
{
struct faultinfo * fi = ¤t->thread.arch.faultinfo;
struct user_i387_struct fp;
int err = 0;
err |= __put_user(0, &to->gs);
err |= __put_user(0, &to->fs);
#define PUTREG(regs, regno, sc, regname) \
__put_user((regs)->regs.gp[(regno) / sizeof(unsigned long)], \
&(sc)->regname)
err |= PUTREG(regs, RDI, to, di);
err |= PUTREG(regs, RSI, to, si);
err |= PUTREG(regs, RBP, to, bp);
/*
* Must use original RSP, which is passed in, rather than what's in
* the pt_regs, because that's already been updated to point at the
* signal frame.
*/
err |= __put_user(sp, &to->sp);
err |= PUTREG(regs, RBX, to, bx);
err |= PUTREG(regs, RDX, to, dx);
err |= PUTREG(regs, RCX, to, cx);
err |= PUTREG(regs, RAX, to, ax);
err |= PUTREG(regs, R8, to, r8);
err |= PUTREG(regs, R9, to, r9);
err |= PUTREG(regs, R10, to, r10);
err |= PUTREG(regs, R11, to, r11);
err |= PUTREG(regs, R12, to, r12);
err |= PUTREG(regs, R13, to, r13);
err |= PUTREG(regs, R14, to, r14);
err |= PUTREG(regs, R15, to, r15);
err |= PUTREG(regs, CS, to, cs); /* XXX x86_64 doesn't do this */
err |= __put_user(fi->cr2, &to->cr2);
err |= __put_user(fi->error_code, &to->err);
err |= __put_user(fi->trap_no, &to->trapno);
err |= PUTREG(regs, RIP, to, ip);
err |= PUTREG(regs, EFLAGS, to, flags);
#undef PUTREG
err |= __put_user(mask, &to->oldmask);
if (err)
return 1;
err = save_fp_registers(userspace_pid[current_thread_info()->cpu],
(unsigned long *) &fp);
if (err < 0) {
printk(KERN_ERR "copy_sc_from_user - restore_fp_registers "
"failed, errno = %d\n", -err);
return 1;
}
if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
return 1;
return err;
}
struct rt_sigframe
{
char __user *pretcode;
struct ucontext uc;
struct siginfo info;
struct _fpstate fpstate;
};
int setup_signal_stack_si(unsigned long stack_top, int sig,
struct k_sigaction *ka, struct pt_regs * regs,
siginfo_t *info, sigset_t *set)
{
struct rt_sigframe __user *frame;
unsigned long save_sp = PT_REGS_RSP(regs);
int err = 0;
struct task_struct *me = current;
frame = (struct rt_sigframe __user *)
round_down(stack_top - sizeof(struct rt_sigframe), 16);
/* Subtract 128 for a red zone and 8 for proper alignment */
frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128 - 8);
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto out;
if (ka->sa.sa_flags & SA_SIGINFO) {
err |= copy_siginfo_to_user(&frame->info, info);
if (err)
goto out;
}
/*
* Update SP now because the page fault handler refuses to extend
* the stack if the faulting address is too far below the current
* SP, which frame now certainly is. If there's an error, the original
* value is restored on the way out.
* When writing the sigcontext to the stack, we have to write the
* original value, so that's passed to copy_sc_to_user, which does
* the right thing with it.
*/
PT_REGS_RSP(regs) = (unsigned long) frame;
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(save_sp),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
set->sig[0], save_sp);
err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
if (sizeof(*set) == 16) {
__put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
__put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
}
else
err |= __copy_to_user(&frame->uc.uc_sigmask, set,
sizeof(*set));
/*
* Set up to return from userspace. If provided, use a stub
* already in userspace.
*/
/* x86-64 should always use SA_RESTORER. */
if (ka->sa.sa_flags & SA_RESTORER)
err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
else
/* could use a vstub here */
goto restore_sp;
if (err)
goto restore_sp;
/* Set up registers for signal handler */
{
struct exec_domain *ed = current_thread_info()->exec_domain;
if (unlikely(ed && ed->signal_invmap && sig < 32))
sig = ed->signal_invmap[sig];
}
PT_REGS_RDI(regs) = sig;
/* In case the signal handler was declared without prototypes */
PT_REGS_RAX(regs) = 0;
/*
* This also works for non SA_SIGINFO handlers because they expect the
* next argument after the signal number on the stack.
*/
PT_REGS_RSI(regs) = (unsigned long) &frame->info;
PT_REGS_RDX(regs) = (unsigned long) &frame->uc;
PT_REGS_RIP(regs) = (unsigned long) ka->sa.sa_handler;
out:
return err;
restore_sp:
PT_REGS_RSP(regs) = save_sp;
return err;
}
long sys_rt_sigreturn(struct pt_regs *regs)
{
unsigned long sp = PT_REGS_SP(¤t->thread.regs);
struct rt_sigframe __user *frame =
(struct rt_sigframe __user *)(sp - 8);
struct ucontext __user *uc = &frame->uc;
sigset_t set;
if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
goto segfault;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(¤t->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
if (copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext,
&frame->fpstate))
goto segfault;
/* Avoid ERESTART handling */
PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1;
return PT_REGS_SYSCALL_RET(¤t->thread.regs);
segfault:
force_sig(SIGSEGV, current);
return 0;
}
| gpl-2.0 |
InfinitiveOS-Devices/android_kernel_oneplus_msm8974 | tools/perf/perf.c | 3837 | 12280 | /*
* perf.c
*
* Performance analysis utility.
*
* This is the main hub from which the sub-commands (perf stat,
* perf top, perf record, perf report, etc.) are started.
*/
#include "builtin.h"
#include "util/exec_cmd.h"
#include "util/cache.h"
#include "util/quote.h"
#include "util/run-command.h"
#include "util/parse-events.h"
#include "util/debugfs.h"
const char perf_usage_string[] =
"perf [--version] [--help] COMMAND [ARGS]";
const char perf_more_info_string[] =
"See 'perf help COMMAND' for more information on a specific command.";
int use_browser = -1;
static int use_pager = -1;
struct pager_config {
const char *cmd;
int val;
};
static int pager_command_config(const char *var, const char *value, void *data)
{
struct pager_config *c = data;
if (!prefixcmp(var, "pager.") && !strcmp(var + 6, c->cmd))
c->val = perf_config_bool(var, value);
return 0;
}
/* returns 0 for "no pager", 1 for "use pager", and -1 for "not specified" */
int check_pager_config(const char *cmd)
{
struct pager_config c;
c.cmd = cmd;
c.val = -1;
perf_config(pager_command_config, &c);
return c.val;
}
static int tui_command_config(const char *var, const char *value, void *data)
{
struct pager_config *c = data;
if (!prefixcmp(var, "tui.") && !strcmp(var + 4, c->cmd))
c->val = perf_config_bool(var, value);
return 0;
}
/* returns 0 for "no tui", 1 for "use tui", and -1 for "not specified" */
static int check_tui_config(const char *cmd)
{
struct pager_config c;
c.cmd = cmd;
c.val = -1;
perf_config(tui_command_config, &c);
return c.val;
}
static void commit_pager_choice(void)
{
switch (use_pager) {
case 0:
setenv("PERF_PAGER", "cat", 1);
break;
case 1:
/* setup_pager(); */
break;
default:
break;
}
}
static int handle_options(const char ***argv, int *argc, int *envchanged)
{
int handled = 0;
while (*argc > 0) {
const char *cmd = (*argv)[0];
if (cmd[0] != '-')
break;
/*
* For legacy reasons, the "version" and "help"
* commands can be written with "--" prepended
* to make them look like flags.
*/
if (!strcmp(cmd, "--help") || !strcmp(cmd, "--version"))
break;
/*
* Check remaining flags.
*/
if (!prefixcmp(cmd, CMD_EXEC_PATH)) {
cmd += strlen(CMD_EXEC_PATH);
if (*cmd == '=')
perf_set_argv_exec_path(cmd + 1);
else {
puts(perf_exec_path());
exit(0);
}
} else if (!strcmp(cmd, "--html-path")) {
puts(system_path(PERF_HTML_PATH));
exit(0);
} else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) {
use_pager = 1;
} else if (!strcmp(cmd, "--no-pager")) {
use_pager = 0;
if (envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--perf-dir")) {
if (*argc < 2) {
fprintf(stderr, "No directory given for --perf-dir.\n");
usage(perf_usage_string);
}
setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1);
if (envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
handled++;
} else if (!prefixcmp(cmd, CMD_PERF_DIR)) {
setenv(PERF_DIR_ENVIRONMENT, cmd + strlen(CMD_PERF_DIR), 1);
if (envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--work-tree")) {
if (*argc < 2) {
fprintf(stderr, "No directory given for --work-tree.\n");
usage(perf_usage_string);
}
setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1);
if (envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
} else if (!prefixcmp(cmd, CMD_WORK_TREE)) {
setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + strlen(CMD_WORK_TREE), 1);
if (envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--debugfs-dir")) {
if (*argc < 2) {
fprintf(stderr, "No directory given for --debugfs-dir.\n");
usage(perf_usage_string);
}
debugfs_set_path((*argv)[1]);
if (envchanged)
*envchanged = 1;
(*argv)++;
(*argc)--;
} else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR));
fprintf(stderr, "dir: %s\n", debugfs_mountpoint);
if (envchanged)
*envchanged = 1;
} else {
fprintf(stderr, "Unknown option: %s\n", cmd);
usage(perf_usage_string);
}
(*argv)++;
(*argc)--;
handled++;
}
return handled;
}
static int handle_alias(int *argcp, const char ***argv)
{
int envchanged = 0, ret = 0, saved_errno = errno;
int count, option_count;
const char **new_argv;
const char *alias_command;
char *alias_string;
alias_command = (*argv)[0];
alias_string = alias_lookup(alias_command);
if (alias_string) {
if (alias_string[0] == '!') {
if (*argcp > 1) {
struct strbuf buf;
strbuf_init(&buf, PATH_MAX);
strbuf_addstr(&buf, alias_string);
sq_quote_argv(&buf, (*argv) + 1, PATH_MAX);
free(alias_string);
alias_string = buf.buf;
}
ret = system(alias_string + 1);
if (ret >= 0 && WIFEXITED(ret) &&
WEXITSTATUS(ret) != 127)
exit(WEXITSTATUS(ret));
die("Failed to run '%s' when expanding alias '%s'",
alias_string + 1, alias_command);
}
count = split_cmdline(alias_string, &new_argv);
if (count < 0)
die("Bad alias.%s string", alias_command);
option_count = handle_options(&new_argv, &count, &envchanged);
if (envchanged)
die("alias '%s' changes environment variables\n"
"You can use '!perf' in the alias to do this.",
alias_command);
memmove(new_argv - option_count, new_argv,
count * sizeof(char *));
new_argv -= option_count;
if (count < 1)
die("empty alias for %s", alias_command);
if (!strcmp(alias_command, new_argv[0]))
die("recursive alias: %s", alias_command);
new_argv = realloc(new_argv, sizeof(char *) *
(count + *argcp + 1));
/* insert after command name */
memcpy(new_argv + count, *argv + 1, sizeof(char *) * *argcp);
new_argv[count + *argcp] = NULL;
*argv = new_argv;
*argcp += count - 1;
ret = 1;
}
errno = saved_errno;
return ret;
}
const char perf_version_string[] = PERF_VERSION;
#define RUN_SETUP (1<<0)
#define USE_PAGER (1<<1)
/*
* require working tree to be present -- anything uses this needs
* RUN_SETUP for reading from the configuration file.
*/
#define NEED_WORK_TREE (1<<2)
struct cmd_struct {
const char *cmd;
int (*fn)(int, const char **, const char *);
int option;
};
static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
{
int status;
struct stat st;
const char *prefix;
prefix = NULL;
if (p->option & RUN_SETUP)
prefix = NULL; /* setup_perf_directory(); */
if (use_browser == -1)
use_browser = check_tui_config(p->cmd);
if (use_pager == -1 && p->option & RUN_SETUP)
use_pager = check_pager_config(p->cmd);
if (use_pager == -1 && p->option & USE_PAGER)
use_pager = 1;
commit_pager_choice();
status = p->fn(argc, argv, prefix);
exit_browser(status);
if (status)
return status & 0xff;
/* Somebody closed stdout? */
if (fstat(fileno(stdout), &st))
return 0;
/* Ignore write errors for pipes and sockets.. */
if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode))
return 0;
/* Check for ENOSPC and EIO errors.. */
if (fflush(stdout))
die("write failure on standard output: %s", strerror(errno));
if (ferror(stdout))
die("unknown write failure on standard output");
if (fclose(stdout))
die("close failed on standard output: %s", strerror(errno));
return 0;
}
static void handle_internal_command(int argc, const char **argv)
{
const char *cmd = argv[0];
static struct cmd_struct commands[] = {
{ "buildid-cache", cmd_buildid_cache, 0 },
{ "buildid-list", cmd_buildid_list, 0 },
{ "diff", cmd_diff, 0 },
{ "evlist", cmd_evlist, 0 },
{ "help", cmd_help, 0 },
{ "list", cmd_list, 0 },
{ "record", cmd_record, 0 },
{ "report", cmd_report, 0 },
{ "bench", cmd_bench, 0 },
{ "stat", cmd_stat, 0 },
{ "periodic", cmd_periodic, 0 },
{ "timechart", cmd_timechart, 0 },
{ "top", cmd_top, 0 },
{ "annotate", cmd_annotate, 0 },
{ "version", cmd_version, 0 },
{ "script", cmd_script, 0 },
{ "sched", cmd_sched, 0 },
{ "probe", cmd_probe, 0 },
{ "kmem", cmd_kmem, 0 },
{ "lock", cmd_lock, 0 },
{ "kvm", cmd_kvm, 0 },
{ "test", cmd_test, 0 },
{ "inject", cmd_inject, 0 },
};
unsigned int i;
static const char ext[] = STRIP_EXTENSION;
if (sizeof(ext) > 1) {
i = strlen(argv[0]) - strlen(ext);
if (i > 0 && !strcmp(argv[0] + i, ext)) {
char *argv0 = strdup(argv[0]);
argv[0] = cmd = argv0;
argv0[i] = '\0';
}
}
/* Turn "perf cmd --help" into "perf help cmd" */
if (argc > 1 && !strcmp(argv[1], "--help")) {
argv[1] = argv[0];
argv[0] = cmd = "help";
}
for (i = 0; i < ARRAY_SIZE(commands); i++) {
struct cmd_struct *p = commands+i;
if (strcmp(p->cmd, cmd))
continue;
exit(run_builtin(p, argc, argv));
}
}
static void execv_dashed_external(const char **argv)
{
struct strbuf cmd = STRBUF_INIT;
const char *tmp;
int status;
strbuf_addf(&cmd, "perf-%s", argv[0]);
/*
* argv[0] must be the perf command, but the argv array
* belongs to the caller, and may be reused in
* subsequent loop iterations. Save argv[0] and
* restore it on error.
*/
tmp = argv[0];
argv[0] = cmd.buf;
/*
* if we fail because the command is not found, it is
* OK to return. Otherwise, we just pass along the status code.
*/
status = run_command_v_opt(argv, 0);
if (status != -ERR_RUN_COMMAND_EXEC) {
if (IS_RUN_COMMAND_ERR(status))
die("unable to run '%s'", argv[0]);
exit(-status);
}
errno = ENOENT; /* as if we called execvp */
argv[0] = tmp;
strbuf_release(&cmd);
}
static int run_argv(int *argcp, const char ***argv)
{
int done_alias = 0;
while (1) {
/* See if it's an internal command */
handle_internal_command(*argcp, *argv);
/* .. then try the external ones */
execv_dashed_external(*argv);
/* It could be an alias -- this works around the insanity
* of overriding "perf log" with "perf show" by having
* alias.log = show
*/
if (done_alias || !handle_alias(argcp, argv))
break;
done_alias = 1;
}
return done_alias;
}
static void pthread__block_sigwinch(void)
{
sigset_t set;
sigemptyset(&set);
sigaddset(&set, SIGWINCH);
pthread_sigmask(SIG_BLOCK, &set, NULL);
}
void pthread__unblock_sigwinch(void)
{
sigset_t set;
sigemptyset(&set);
sigaddset(&set, SIGWINCH);
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
}
int main(int argc, const char **argv)
{
const char *cmd;
cmd = perf_extract_argv0_path(argv[0]);
if (!cmd)
cmd = "perf-help";
/* get debugfs mount point from /proc/mounts */
debugfs_mount(NULL);
/*
* "perf-xxxx" is the same as "perf xxxx", but we obviously:
*
* - cannot take flags in between the "perf" and the "xxxx".
* - cannot execute it externally (since it would just do
* the same thing over again)
*
* So we just directly call the internal command handler, and
* die if that one cannot handle it.
*/
if (!prefixcmp(cmd, "perf-")) {
cmd += 5;
argv[0] = cmd;
handle_internal_command(argc, argv);
die("cannot handle %s internally", cmd);
}
/* Look for flags.. */
argv++;
argc--;
handle_options(&argv, &argc, NULL);
commit_pager_choice();
set_buildid_dir();
if (argc > 0) {
if (!prefixcmp(argv[0], "--"))
argv[0] += 2;
} else {
/* The user didn't specify a command; give them help */
printf("\n usage: %s\n\n", perf_usage_string);
list_common_cmds_help();
printf("\n %s\n\n", perf_more_info_string);
exit(1);
}
cmd = argv[0];
/*
* We use PATH to find perf commands, but we prepend some higher
* precedence paths: the "--exec-path" option, the PERF_EXEC_PATH
* environment, and the $(perfexecdir) from the Makefile at build
* time.
*/
setup_path();
/*
* Block SIGWINCH notifications so that the thread that wants it can
* unblock and get syscalls like select interrupted instead of waiting
* forever while the signal goes to some other non interested thread.
*/
pthread__block_sigwinch();
while (1) {
static int done_help;
static int was_alias;
was_alias = run_argv(&argc, &argv);
if (errno != ENOENT)
break;
if (was_alias) {
fprintf(stderr, "Expansion of alias '%s' failed; "
"'%s' is not a perf-command\n",
cmd, argv[0]);
exit(1);
}
if (!done_help) {
cmd = argv[0] = help_unknown_cmd(cmd);
done_help = 1;
} else
break;
}
fprintf(stderr, "Failed to run command '%s': %s\n",
cmd, strerror(errno));
return 1;
}
| gpl-2.0 |
renaudallard/nexus5_kernel | drivers/net/ethernet/i825xx/sun3_82586.c | 4861 | 32823 | /*
* Sun3 i82586 Ethernet driver
*
* Cloned from ni52.c for the Sun3 by Sam Creasey (sammy@sammy.net)
*
* Original copyright follows:
* --------------------------
*
* net-3-driver for the NI5210 card (i82586 Ethernet chip)
*
* This is an extension to the Linux operating system, and is covered by the
* same Gnu Public License that covers that work.
*
* Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later)
* Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de)
* --------------------------
*
* Consult ni52.c for further notes from the original driver.
*
* This incarnation currently supports the OBIO version of the i82586 chip
* used in certain sun3 models. It should be fairly doable to expand this
* to support VME if I should every acquire such a board.
*
*/
static int debuglevel = 0; /* debug-printk 0: off 1: a few 2: more */
static int automatic_resume = 0; /* experimental .. better should be zero */
static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */
static int fifo=0x8; /* don't change */
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/idprom.h>
#include <asm/machines.h>
#include <asm/sun3mmu.h>
#include <asm/dvma.h>
#include <asm/byteorder.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include "sun3_82586.h"
#define DRV_NAME "sun3_82586"
#define DEBUG /* debug on */
#define SYSBUSVAL 0 /* 16 Bit */
#define SUN3_82586_TOTAL_SIZE PAGE_SIZE
#define sun3_attn586() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_ATTEN; *(volatile unsigned char *)(dev->base_addr) &= ~IEOB_ATTEN;}
#define sun3_reset586() {*(volatile unsigned char *)(dev->base_addr) = 0; udelay(100); *(volatile unsigned char *)(dev->base_addr) = IEOB_NORSET;}
#define sun3_disint() {*(volatile unsigned char *)(dev->base_addr) &= ~IEOB_IENAB;}
#define sun3_enaint() {*(volatile unsigned char *)(dev->base_addr) |= IEOB_IENAB;}
#define sun3_active() {*(volatile unsigned char *)(dev->base_addr) |= (IEOB_IENAB|IEOB_ONAIR|IEOB_NORSET);}
#define make32(ptr16) (p->memtop + (swab16((unsigned short) (ptr16))) )
#define make24(ptr32) (char *)swab32(( ((unsigned long) (ptr32)) - p->base))
#define make16(ptr32) (swab16((unsigned short) ((unsigned long)(ptr32) - (unsigned long) p->memtop )))
/******************* how to calculate the buffers *****************************
* IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
* --------------- in a different (more stable?) mode. Only in this mode it's
* possible to configure the driver with 'NO_NOPCOMMANDS'
sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
sizeof(rfd) = 24; sizeof(rbd) = 12;
sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
sizeof(nop_cmd) = 8;
* if you don't know the driver, better do not change these values: */
#define RECV_BUFF_SIZE 1536 /* slightly oversized */
#define XMIT_BUFF_SIZE 1536 /* slightly oversized */
#define NUM_XMIT_BUFFS 1 /* config for 32K shmem */
#define NUM_RECV_BUFFS_8 4 /* config for 32K shared mem */
#define NUM_RECV_BUFFS_16 9 /* config for 32K shared mem */
#define NUM_RECV_BUFFS_32 16 /* config for 32K shared mem */
#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
/**************************************************************************/
/* different DELAYs */
#define DELAY(x) mdelay(32 * x);
#define DELAY_16(); { udelay(16); }
#define DELAY_18(); { udelay(4); }
/* wait for command with timeout: */
#define WAIT_4_SCB_CMD() \
{ int i; \
for(i=0;i<16384;i++) { \
if(!p->scb->cmd_cuc) break; \
DELAY_18(); \
if(i == 16383) { \
printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \
if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } }
#define WAIT_4_SCB_CMD_RUC() { int i; \
for(i=0;i<16384;i++) { \
if(!p->scb->cmd_ruc) break; \
DELAY_18(); \
if(i == 16383) { \
printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \
if(!p->reseted) { p->reseted = 1; sun3_reset586(); } } } }
#define WAIT_4_STAT_COMPL(addr) { int i; \
for(i=0;i<32767;i++) { \
if(swab16((addr)->cmd_status) & STAT_COMPL) break; \
DELAY_16(); DELAY_16(); } }
static int sun3_82586_probe1(struct net_device *dev,int ioaddr);
static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id);
static int sun3_82586_open(struct net_device *dev);
static int sun3_82586_close(struct net_device *dev);
static int sun3_82586_send_packet(struct sk_buff *,struct net_device *);
static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static void sun3_82586_timeout(struct net_device *dev);
#if 0
static void sun3_82586_dump(struct net_device *,void *);
#endif
/* helper-functions */
static int init586(struct net_device *dev);
static int check586(struct net_device *dev,char *where,unsigned size);
static void alloc586(struct net_device *dev);
static void startrecv586(struct net_device *dev);
static void *alloc_rfa(struct net_device *dev,void *ptr);
static void sun3_82586_rcv_int(struct net_device *dev);
static void sun3_82586_xmt_int(struct net_device *dev);
static void sun3_82586_rnr_int(struct net_device *dev);
struct priv
{
unsigned long base;
char *memtop;
long int lock;
int reseted;
volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first;
volatile struct scp_struct *scp; /* volatile is important */
volatile struct iscp_struct *iscp; /* volatile is important */
volatile struct scb_struct *scb; /* volatile is important */
volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
#if (NUM_XMIT_BUFFS == 1)
volatile struct nop_cmd_struct *nop_cmds[2];
#else
volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
#endif
volatile int nop_point,num_recv_buffs;
volatile char *xmit_cbuffs[NUM_XMIT_BUFFS];
volatile int xmit_count,xmit_last;
};
/**********************************************
* close device
*/
static int sun3_82586_close(struct net_device *dev)
{
free_irq(dev->irq, dev);
sun3_reset586(); /* the hard way to stop the receiver */
netif_stop_queue(dev);
return 0;
}
/**********************************************
* open device
*/
static int sun3_82586_open(struct net_device *dev)
{
int ret;
sun3_disint();
alloc586(dev);
init586(dev);
startrecv586(dev);
sun3_enaint();
ret = request_irq(dev->irq, sun3_82586_interrupt,0,dev->name,dev);
if (ret)
{
sun3_reset586();
return ret;
}
netif_start_queue(dev);
return 0; /* most done by init */
}
/**********************************************
* Check to see if there's an 82586 out there.
*/
static int check586(struct net_device *dev,char *where,unsigned size)
{
struct priv pb;
struct priv *p = &pb;
char *iscp_addr;
int i;
p->base = (unsigned long) dvma_btov(0);
p->memtop = (char *)dvma_btov((unsigned long)where);
p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
memset((char *)p->scp,0, sizeof(struct scp_struct));
for(i=0;i<sizeof(struct scp_struct);i++) /* memory was writeable? */
if(((char *)p->scp)[i])
return 0;
p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */
if(p->scp->sysbus != SYSBUSVAL)
return 0;
iscp_addr = (char *)dvma_btov((unsigned long)where);
p->iscp = (struct iscp_struct *) iscp_addr;
memset((char *)p->iscp,0, sizeof(struct iscp_struct));
p->scp->iscp = make24(p->iscp);
p->iscp->busy = 1;
sun3_reset586();
sun3_attn586();
DELAY(1); /* wait a while... */
if(p->iscp->busy) /* i82586 clears 'busy' after successful init */
return 0;
return 1;
}
/******************************************************************
* set iscp at the right place, called by sun3_82586_probe1 and open586.
*/
static void alloc586(struct net_device *dev)
{
struct priv *p = netdev_priv(dev);
sun3_reset586();
DELAY(1);
p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
p->iscp = (struct iscp_struct *) dvma_btov(dev->mem_start);
p->scb = (struct scb_struct *) ((char *)p->iscp + sizeof(struct iscp_struct));
memset((char *) p->iscp,0,sizeof(struct iscp_struct));
memset((char *) p->scp ,0,sizeof(struct scp_struct));
p->scp->iscp = make24(p->iscp);
p->scp->sysbus = SYSBUSVAL;
p->iscp->scb_offset = make16(p->scb);
p->iscp->scb_base = make24(dvma_btov(dev->mem_start));
p->iscp->busy = 1;
sun3_reset586();
sun3_attn586();
DELAY(1);
if(p->iscp->busy)
printk("%s: Init-Problems (alloc).\n",dev->name);
p->reseted = 0;
memset((char *)p->scb,0,sizeof(struct scb_struct));
}
struct net_device * __init sun3_82586_probe(int unit)
{
struct net_device *dev;
unsigned long ioaddr;
static int found = 0;
int err = -ENOMEM;
/* check that this machine has an onboard 82586 */
switch(idprom->id_machtype) {
case SM_SUN3|SM_3_160:
case SM_SUN3|SM_3_260:
/* these machines have 82586 */
break;
default:
return ERR_PTR(-ENODEV);
}
if (found)
return ERR_PTR(-ENODEV);
ioaddr = (unsigned long)ioremap(IE_OBIO, SUN3_82586_TOTAL_SIZE);
if (!ioaddr)
return ERR_PTR(-ENOMEM);
found = 1;
dev = alloc_etherdev(sizeof(struct priv));
if (!dev)
goto out;
if (unit >= 0) {
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
}
dev->irq = IE_IRQ;
dev->base_addr = ioaddr;
err = sun3_82586_probe1(dev, ioaddr);
if (err)
goto out1;
err = register_netdev(dev);
if (err)
goto out2;
return dev;
out2:
release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
out1:
free_netdev(dev);
out:
iounmap((void __iomem *)ioaddr);
return ERR_PTR(err);
}
static const struct net_device_ops sun3_82586_netdev_ops = {
.ndo_open = sun3_82586_open,
.ndo_stop = sun3_82586_close,
.ndo_start_xmit = sun3_82586_send_packet,
.ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = sun3_82586_timeout,
.ndo_get_stats = sun3_82586_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
};
static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
{
int i, size, retval;
if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME))
return -EBUSY;
/* copy in the ethernet address from the prom */
for(i = 0; i < 6 ; i++)
dev->dev_addr[i] = idprom->id_ethaddr[i];
printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr);
/*
* check (or search) IO-Memory, 32K
*/
size = 0x8000;
dev->mem_start = (unsigned long)dvma_malloc_align(0x8000, 0x1000);
dev->mem_end = dev->mem_start + size;
if(size != 0x2000 && size != 0x4000 && size != 0x8000) {
printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 or 0x8000 bytes.\n",dev->name,size);
retval = -ENODEV;
goto out;
}
if(!check586(dev,(char *) dev->mem_start,size)) {
printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size);
retval = -ENODEV;
goto out;
}
((struct priv *)netdev_priv(dev))->memtop =
(char *)dvma_btov(dev->mem_start);
((struct priv *)netdev_priv(dev))->base = (unsigned long) dvma_btov(0);
alloc586(dev);
/* set number of receive-buffs according to memsize */
if(size == 0x2000)
((struct priv *)netdev_priv(dev))->num_recv_buffs =
NUM_RECV_BUFFS_8;
else if(size == 0x4000)
((struct priv *)netdev_priv(dev))->num_recv_buffs =
NUM_RECV_BUFFS_16;
else
((struct priv *)netdev_priv(dev))->num_recv_buffs =
NUM_RECV_BUFFS_32;
printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq);
dev->netdev_ops = &sun3_82586_netdev_ops;
dev->watchdog_timeo = HZ/20;
dev->if_port = 0;
return 0;
out:
release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
return retval;
}
static int init586(struct net_device *dev)
{
void *ptr;
int i,result=0;
struct priv *p = netdev_priv(dev);
volatile struct configure_cmd_struct *cfg_cmd;
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
struct netdev_hw_addr *ha;
int num_addrs=netdev_mc_count(dev);
ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */
cfg_cmd->cmd_status = 0;
cfg_cmd->cmd_cmd = swab16(CMD_CONFIGURE | CMD_LAST);
cfg_cmd->cmd_link = 0xffff;
cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */
cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */
cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */
cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */
cfg_cmd->priority = 0x00;
cfg_cmd->ifs = 0x60;
cfg_cmd->time_low = 0x00;
cfg_cmd->time_high = 0xf2;
cfg_cmd->promisc = 0;
if(dev->flags & IFF_ALLMULTI) {
int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
if(num_addrs > len) {
printk("%s: switching to promisc. mode\n",dev->name);
cfg_cmd->promisc = 1;
}
}
if(dev->flags&IFF_PROMISC)
cfg_cmd->promisc = 1;
cfg_cmd->carr_coll = 0x00;
p->scb->cbl_offset = make16(cfg_cmd);
p->scb->cmd_ruc = 0;
p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
sun3_attn586();
WAIT_4_STAT_COMPL(cfg_cmd);
if((swab16(cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK))
{
printk("%s: configure command failed: %x\n",dev->name,swab16(cfg_cmd->cmd_status));
return 1;
}
/*
* individual address setup
*/
ias_cmd = (struct iasetup_cmd_struct *)ptr;
ias_cmd->cmd_status = 0;
ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST);
ias_cmd->cmd_link = 0xffff;
memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN);
p->scb->cbl_offset = make16(ias_cmd);
p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
sun3_attn586();
WAIT_4_STAT_COMPL(ias_cmd);
if((swab16(ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) {
printk("%s (82586): individual address setup command failed: %04x\n",dev->name,swab16(ias_cmd->cmd_status));
return 1;
}
/*
* TDR, wire check .. e.g. no resistor e.t.c
*/
tdr_cmd = (struct tdr_cmd_struct *)ptr;
tdr_cmd->cmd_status = 0;
tdr_cmd->cmd_cmd = swab16(CMD_TDR | CMD_LAST);
tdr_cmd->cmd_link = 0xffff;
tdr_cmd->status = 0;
p->scb->cbl_offset = make16(tdr_cmd);
p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
sun3_attn586();
WAIT_4_STAT_COMPL(tdr_cmd);
if(!(swab16(tdr_cmd->cmd_status) & STAT_COMPL))
{
printk("%s: Problems while running the TDR.\n",dev->name);
}
else
{
DELAY_16(); /* wait for result */
result = swab16(tdr_cmd->status);
p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
sun3_attn586(); /* ack the interrupts */
if(result & TDR_LNK_OK)
;
else if(result & TDR_XCVR_PRB)
printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name);
else if(result & TDR_ET_OPN)
printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
else if(result & TDR_ET_SRT)
{
if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */
printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK);
}
else
printk("%s: TDR: Unknown status %04x\n",dev->name,result);
}
/*
* Multicast setup
*/
if(num_addrs && !(dev->flags & IFF_PROMISC) )
{
mc_cmd = (struct mcsetup_cmd_struct *) ptr;
mc_cmd->cmd_status = 0;
mc_cmd->cmd_cmd = swab16(CMD_MCSETUP | CMD_LAST);
mc_cmd->cmd_link = 0xffff;
mc_cmd->mc_cnt = swab16(num_addrs * 6);
i = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy((char *) mc_cmd->mc_list[i++],
ha->addr, ETH_ALEN);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd_cuc = CUC_START;
sun3_attn586();
WAIT_4_STAT_COMPL(mc_cmd);
if( (swab16(mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
printk("%s: Can't apply multicast-address-list.\n",dev->name);
}
/*
* alloc nop/xmit-cmds
*/
#if (NUM_XMIT_BUFFS == 1)
for(i=0;i<2;i++)
{
p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP);
p->nop_cmds[i]->cmd_status = 0;
p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
}
#else
for(i=0;i<NUM_XMIT_BUFFS;i++)
{
p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
p->nop_cmds[i]->cmd_cmd = swab16(CMD_NOP);
p->nop_cmds[i]->cmd_status = 0;
p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
}
#endif
ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */
/*
* alloc xmit-buffs / init xmit_cmds
*/
for(i=0;i<NUM_XMIT_BUFFS;i++)
{
p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/
ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */
ptr = (char *) ptr + XMIT_BUFF_SIZE;
p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
ptr = (char *) ptr + sizeof(struct tbd_struct);
if((void *)ptr > (void *)dev->mem_end)
{
printk("%s: not enough shared-mem for your configuration!\n",dev->name);
return 1;
}
memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct));
memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct));
p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]);
p->xmit_cmds[i]->cmd_status = swab16(STAT_COMPL);
p->xmit_cmds[i]->cmd_cmd = swab16(CMD_XMIT | CMD_INT);
p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i]));
p->xmit_buffs[i]->next = 0xffff;
p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i]));
}
p->xmit_count = 0;
p->xmit_last = 0;
#ifndef NO_NOPCOMMANDS
p->nop_point = 0;
#endif
/*
* 'start transmitter'
*/
#ifndef NO_NOPCOMMANDS
p->scb->cbl_offset = make16(p->nop_cmds[0]);
p->scb->cmd_cuc = CUC_START;
sun3_attn586();
WAIT_4_SCB_CMD();
#else
p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]);
p->xmit_cmds[0]->cmd_cmd = swab16(CMD_XMIT | CMD_SUSPEND | CMD_INT);
#endif
/*
* ack. interrupts
*/
p->scb->cmd_cuc = p->scb->cus & STAT_MASK;
sun3_attn586();
DELAY_16();
sun3_enaint();
sun3_active();
return 0;
}
/******************************************************
* This is a helper routine for sun3_82586_rnr_int() and init586().
* It sets up the Receive Frame Area (RFA).
*/
static void *alloc_rfa(struct net_device *dev,void *ptr)
{
volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr;
volatile struct rbd_struct *rbd;
int i;
struct priv *p = netdev_priv(dev);
memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd));
p->rfd_first = rfd;
for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) {
rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) );
rfd[i].rbd_offset = 0xffff;
}
rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */
ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) );
rbd = (struct rbd_struct *) ptr;
ptr = (void *) (rbd + p->num_recv_buffs);
/* clr descriptors */
memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs));
for(i=0;i<p->num_recv_buffs;i++)
{
rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs));
rbd[i].size = swab16(RECV_BUFF_SIZE);
rbd[i].buffer = make24(ptr);
ptr = (char *) ptr + RECV_BUFF_SIZE;
}
p->rfd_top = p->rfd_first;
p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
p->scb->rfa_offset = make16(p->rfd_first);
p->rfd_first->rbd_offset = make16(rbd);
return ptr;
}
/**************************************************
* Interrupt Handler ...
*/
static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id)
{
struct net_device *dev = dev_id;
unsigned short stat;
int cnt=0;
struct priv *p;
if (!dev) {
printk ("sun3_82586-interrupt: irq %d for unknown device.\n",irq);
return IRQ_NONE;
}
p = netdev_priv(dev);
if(debuglevel > 1)
printk("I");
WAIT_4_SCB_CMD(); /* wait for last command */
while((stat=p->scb->cus & STAT_MASK))
{
p->scb->cmd_cuc = stat;
sun3_attn586();
if(stat & STAT_FR) /* received a frame */
sun3_82586_rcv_int(dev);
if(stat & STAT_RNR) /* RU went 'not ready' */
{
printk("(R)");
if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */
{
WAIT_4_SCB_CMD();
p->scb->cmd_ruc = RUC_RESUME;
sun3_attn586();
WAIT_4_SCB_CMD_RUC();
}
else
{
printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus);
sun3_82586_rnr_int(dev);
}
}
if(stat & STAT_CX) /* command with I-bit set complete */
sun3_82586_xmt_int(dev);
#ifndef NO_NOPCOMMANDS
if(stat & STAT_CNA) /* CU went 'not ready' */
{
if(netif_running(dev))
printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus);
}
#endif
if(debuglevel > 1)
printk("%d",cnt++);
WAIT_4_SCB_CMD(); /* wait for ack. (sun3_82586_xmt_int can be faster than ack!!) */
if(p->scb->cmd_cuc) /* timed out? */
{
printk("%s: Acknowledge timed out.\n",dev->name);
sun3_disint();
break;
}
}
if(debuglevel > 1)
printk("i");
return IRQ_HANDLED;
}
/*******************************************************
* receive-interrupt
*/
static void sun3_82586_rcv_int(struct net_device *dev)
{
int status,cnt=0;
unsigned short totlen;
struct sk_buff *skb;
struct rbd_struct *rbd;
struct priv *p = netdev_priv(dev);
if(debuglevel > 0)
printk("R");
for(;(status = p->rfd_top->stat_high) & RFD_COMPL;)
{
rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
if(status & RFD_OK) /* frame received without error? */
{
if( (totlen = swab16(rbd->status)) & RBD_LAST) /* the first and the last buffer? */
{
totlen &= RBD_MASK; /* length of this frame */
rbd->status = 0;
skb = netdev_alloc_skb(dev, totlen + 2);
if(skb != NULL)
{
skb_reserve(skb,2);
skb_put(skb,totlen);
skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
dev->stats.rx_packets++;
}
else
dev->stats.rx_dropped++;
}
else
{
int rstat;
/* free all RBD's until RBD_LAST is set */
totlen = 0;
while(!((rstat=swab16(rbd->status)) & RBD_LAST))
{
totlen += rstat & RBD_MASK;
if(!rstat)
{
printk("%s: Whoops .. no end mark in RBD list\n",dev->name);
break;
}
rbd->status = 0;
rbd = (struct rbd_struct *) make32(rbd->next);
}
totlen += rstat & RBD_MASK;
rbd->status = 0;
printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
dev->stats.rx_dropped++;
}
}
else /* frame !(ok), only with 'save-bad-frames' */
{
printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
dev->stats.rx_errors++;
}
p->rfd_top->stat_high = 0;
p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */
p->rfd_top->rbd_offset = 0xffff;
p->rfd_last->last = 0; /* delete RFD_SUSP */
p->rfd_last = p->rfd_top;
p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
p->scb->rfa_offset = make16(p->rfd_top);
if(debuglevel > 0)
printk("%d",cnt++);
}
if(automatic_resume)
{
WAIT_4_SCB_CMD();
p->scb->cmd_ruc = RUC_RESUME;
sun3_attn586();
WAIT_4_SCB_CMD_RUC();
}
#ifdef WAIT_4_BUSY
{
int i;
for(i=0;i<1024;i++)
{
if(p->rfd_top->status)
break;
DELAY_16();
if(i == 1023)
printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name);
}
}
#endif
#if 0
if(!at_least_one)
{
int i;
volatile struct rfd_struct *rfds=p->rfd_top;
volatile struct rbd_struct *rbds;
printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least);
for(i=0;i< (p->num_recv_buffs+4);i++)
{
rbds = (struct rbd_struct *) make32(rfds->rbd_offset);
printk("%04x:%04x ",rfds->status,rbds->status);
rfds = (struct rfd_struct *) make32(rfds->next);
}
printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status);
printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus);
}
old_at_least = at_least_one;
#endif
if(debuglevel > 0)
printk("r");
}
/**********************************************************
* handle 'Receiver went not ready'.
*/
static void sun3_82586_rnr_int(struct net_device *dev)
{
struct priv *p = netdev_priv(dev);
dev->stats.rx_errors++;
WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */
sun3_attn586();
WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */
alloc_rfa(dev,(char *)p->rfd_first);
/* maybe add a check here, before restarting the RU */
startrecv586(dev); /* restart RU */
printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus);
}
/**********************************************************
* handle xmit - interrupt
*/
static void sun3_82586_xmt_int(struct net_device *dev)
{
int status;
struct priv *p = netdev_priv(dev);
if(debuglevel > 0)
printk("X");
status = swab16(p->xmit_cmds[p->xmit_last]->cmd_status);
if(!(status & STAT_COMPL))
printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name);
if(status & STAT_OK)
{
dev->stats.tx_packets++;
dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
}
else
{
dev->stats.tx_errors++;
if(status & TCMD_LATECOLL) {
printk("%s: late collision detected.\n",dev->name);
dev->stats.collisions++;
}
else if(status & TCMD_NOCARRIER) {
dev->stats.tx_carrier_errors++;
printk("%s: no carrier detected.\n",dev->name);
}
else if(status & TCMD_LOSTCTS)
printk("%s: loss of CTS detected.\n",dev->name);
else if(status & TCMD_UNDERRUN) {
dev->stats.tx_fifo_errors++;
printk("%s: DMA underrun detected.\n",dev->name);
}
else if(status & TCMD_MAXCOLL) {
printk("%s: Max. collisions exceeded.\n",dev->name);
dev->stats.collisions += 16;
}
}
#if (NUM_XMIT_BUFFS > 1)
if( (++p->xmit_last) == NUM_XMIT_BUFFS)
p->xmit_last = 0;
#endif
netif_wake_queue(dev);
}
/***********************************************************
* (re)start the receiver
*/
static void startrecv586(struct net_device *dev)
{
struct priv *p = netdev_priv(dev);
WAIT_4_SCB_CMD();
WAIT_4_SCB_CMD_RUC();
p->scb->rfa_offset = make16(p->rfd_first);
p->scb->cmd_ruc = RUC_START;
sun3_attn586(); /* start cmd. */
WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */
}
static void sun3_82586_timeout(struct net_device *dev)
{
struct priv *p = netdev_priv(dev);
#ifndef NO_NOPCOMMANDS
if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */
{
netif_wake_queue(dev);
#ifdef DEBUG
printk("%s: strange ... timeout with CU active?!?\n",dev->name);
printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)swab16(p->xmit_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[1]->cmd_status),(int)p->nop_point);
#endif
p->scb->cmd_cuc = CUC_ABORT;
sun3_attn586();
WAIT_4_SCB_CMD();
p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
p->scb->cmd_cuc = CUC_START;
sun3_attn586();
WAIT_4_SCB_CMD();
dev->trans_start = jiffies; /* prevent tx timeout */
return 0;
}
#endif
{
#ifdef DEBUG
printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status));
printk("%s: check, whether you set the right interrupt number!\n",dev->name);
#endif
sun3_82586_close(dev);
sun3_82586_open(dev);
}
dev->trans_start = jiffies; /* prevent tx timeout */
}
/******************************************************
* send frame
*/
static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
{
int len,i;
#ifndef NO_NOPCOMMANDS
int next_nop;
#endif
struct priv *p = netdev_priv(dev);
if(skb->len > XMIT_BUFF_SIZE)
{
printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
return NETDEV_TX_OK;
}
netif_stop_queue(dev);
#if(NUM_XMIT_BUFFS > 1)
if(test_and_set_bit(0,(void *) &p->lock)) {
printk("%s: Queue was locked\n",dev->name);
return NETDEV_TX_BUSY;
}
else
#endif
{
len = skb->len;
if (len < ETH_ZLEN) {
memset((void *)p->xmit_cbuffs[p->xmit_count], 0,
ETH_ZLEN);
len = ETH_ZLEN;
}
skb_copy_from_linear_data(skb, (void *)p->xmit_cbuffs[p->xmit_count], skb->len);
#if (NUM_XMIT_BUFFS == 1)
# ifdef NO_NOPCOMMANDS
#ifdef DEBUG
if(p->scb->cus & CU_ACTIVE)
{
printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name);
printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,swab16(p->xmit_cmds[0]->cmd_status));
}
#endif
p->xmit_buffs[0]->size = swab16(TBD_LAST | len);
for(i=0;i<16;i++)
{
p->xmit_cmds[0]->cmd_status = 0;
WAIT_4_SCB_CMD();
if( (p->scb->cus & CU_STATUS) == CU_SUSPEND)
p->scb->cmd_cuc = CUC_RESUME;
else
{
p->scb->cbl_offset = make16(p->xmit_cmds[0]);
p->scb->cmd_cuc = CUC_START;
}
sun3_attn586();
if(!i)
dev_kfree_skb(skb);
WAIT_4_SCB_CMD();
if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */
break;
if(p->xmit_cmds[0]->cmd_status)
break;
if(i==15)
printk("%s: Can't start transmit-command.\n",dev->name);
}
# else
next_nop = (p->nop_point + 1) & 0x1;
p->xmit_buffs[0]->size = swab16(TBD_LAST | len);
p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
= make16((p->nop_cmds[next_nop]));
p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
p->nop_point = next_nop;
dev_kfree_skb(skb);
# endif
#else
p->xmit_buffs[p->xmit_count]->size = swab16(TBD_LAST | len);
if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS )
next_nop = 0;
p->xmit_cmds[p->xmit_count]->cmd_status = 0;
/* linkpointer of xmit-command already points to next nop cmd */
p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop]));
p->nop_cmds[next_nop]->cmd_status = 0;
p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
p->xmit_count = next_nop;
{
unsigned long flags;
local_irq_save(flags);
if(p->xmit_count != p->xmit_last)
netif_wake_queue(dev);
p->lock = 0;
local_irq_restore(flags);
}
dev_kfree_skb(skb);
#endif
}
return NETDEV_TX_OK;
}
/*******************************************
* Someone wanna have the statistics
*/
static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev)
{
struct priv *p = netdev_priv(dev);
unsigned short crc,aln,rsc,ovrn;
crc = swab16(p->scb->crc_errs); /* get error-statistic from the ni82586 */
p->scb->crc_errs = 0;
aln = swab16(p->scb->aln_errs);
p->scb->aln_errs = 0;
rsc = swab16(p->scb->rsc_errs);
p->scb->rsc_errs = 0;
ovrn = swab16(p->scb->ovrn_errs);
p->scb->ovrn_errs = 0;
dev->stats.rx_crc_errors += crc;
dev->stats.rx_fifo_errors += ovrn;
dev->stats.rx_frame_errors += aln;
dev->stats.rx_dropped += rsc;
return &dev->stats;
}
/********************************************************
* Set MC list ..
*/
static void set_multicast_list(struct net_device *dev)
{
netif_stop_queue(dev);
sun3_disint();
alloc586(dev);
init586(dev);
startrecv586(dev);
sun3_enaint();
netif_wake_queue(dev);
}
#if 0
/*
* DUMP .. we expect a not running CMD unit and enough space
*/
void sun3_82586_dump(struct net_device *dev,void *ptr)
{
struct priv *p = netdev_priv(dev);
struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr;
int i;
p->scb->cmd_cuc = CUC_ABORT;
sun3_attn586();
WAIT_4_SCB_CMD();
WAIT_4_SCB_CMD_RUC();
dump_cmd->cmd_status = 0;
dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST;
dump_cmd->dump_offset = make16((dump_cmd + 1));
dump_cmd->cmd_link = 0xffff;
p->scb->cbl_offset = make16(dump_cmd);
p->scb->cmd_cuc = CUC_START;
sun3_attn586();
WAIT_4_STAT_COMPL(dump_cmd);
if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
printk("%s: Can't get dump information.\n",dev->name);
for(i=0;i<170;i++) {
printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]);
if(i % 24 == 23)
printk("\n");
}
printk("\n");
}
#endif
| gpl-2.0 |
naufragoweb/android_kernel_samsung_kyleopen | drivers/watchdog/w83697hf_wdt.c | 4861 | 10188 | /*
* w83697hf/hg WDT driver
*
* (c) Copyright 2006 Samuel Tardieu <sam@rfc1149.net>
* (c) Copyright 2006 Marcus Junker <junker@anduras.de>
*
* Based on w83627hf_wdt.c which is based on advantechwdt.c
* which is based on wdt.c.
* Original copyright messages:
*
* (c) Copyright 2003 Pádraig Brady <P@draigBrady.com>
*
* (c) Copyright 2000-2001 Marek Michalkiewicz <marekm@linux.org.pl>
*
* (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Neither Marcus Junker nor ANDURAS AG admit liability nor provide
* warranty for any of this software. This material is provided
* "AS-IS" and at no charge.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/fs.h>
#include <linux/ioport.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <asm/system.h>
#define WATCHDOG_NAME "w83697hf/hg WDT"
#define PFX WATCHDOG_NAME ": "
#define WATCHDOG_TIMEOUT 60 /* 60 sec default timeout */
#define WATCHDOG_EARLY_DISABLE 1 /* Disable until userland kicks in */
static unsigned long wdt_is_open;
static char expect_close;
static DEFINE_SPINLOCK(io_lock);
/* You must set this - there is no sane way to probe for this board. */
static int wdt_io = 0x2e;
module_param(wdt_io, int, 0);
MODULE_PARM_DESC(wdt_io,
"w83697hf/hg WDT io port (default 0x2e, 0 = autodetect)");
static int timeout = WATCHDOG_TIMEOUT; /* in seconds */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in seconds. 1<= timeout <=255 (default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ")");
static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static int early_disable = WATCHDOG_EARLY_DISABLE;
module_param(early_disable, int, 0);
MODULE_PARM_DESC(early_disable,
"Watchdog gets disabled at boot time (default="
__MODULE_STRING(WATCHDOG_EARLY_DISABLE) ")");
/*
* Kernel methods.
*/
#define W83697HF_EFER (wdt_io + 0) /* Extended Function Enable Register */
#define W83697HF_EFIR (wdt_io + 0) /* Extended Function Index Register
(same as EFER) */
#define W83697HF_EFDR (wdt_io + 1) /* Extended Function Data Register */
static inline void w83697hf_unlock(void)
{
outb_p(0x87, W83697HF_EFER); /* Enter extended function mode */
outb_p(0x87, W83697HF_EFER); /* Again according to manual */
}
static inline void w83697hf_lock(void)
{
outb_p(0xAA, W83697HF_EFER); /* Leave extended function mode */
}
/*
* The three functions w83697hf_get_reg(), w83697hf_set_reg() and
* w83697hf_write_timeout() must be called with the device unlocked.
*/
static unsigned char w83697hf_get_reg(unsigned char reg)
{
outb_p(reg, W83697HF_EFIR);
return inb_p(W83697HF_EFDR);
}
static void w83697hf_set_reg(unsigned char reg, unsigned char data)
{
outb_p(reg, W83697HF_EFIR);
outb_p(data, W83697HF_EFDR);
}
static void w83697hf_write_timeout(int timeout)
{
/* Write Timeout counter to CRF4 */
w83697hf_set_reg(0xF4, timeout);
}
static void w83697hf_select_wdt(void)
{
w83697hf_unlock();
w83697hf_set_reg(0x07, 0x08); /* Switch to logic device 8 (GPIO2) */
}
static inline void w83697hf_deselect_wdt(void)
{
w83697hf_lock();
}
static void w83697hf_init(void)
{
unsigned char bbuf;
w83697hf_select_wdt();
bbuf = w83697hf_get_reg(0x29);
bbuf &= ~0x60;
bbuf |= 0x20;
/* Set pin 119 to WDTO# mode (= CR29, WDT0) */
w83697hf_set_reg(0x29, bbuf);
bbuf = w83697hf_get_reg(0xF3);
bbuf &= ~0x04;
w83697hf_set_reg(0xF3, bbuf); /* Count mode is seconds */
w83697hf_deselect_wdt();
}
static void wdt_ping(void)
{
spin_lock(&io_lock);
w83697hf_select_wdt();
w83697hf_write_timeout(timeout);
w83697hf_deselect_wdt();
spin_unlock(&io_lock);
}
static void wdt_enable(void)
{
spin_lock(&io_lock);
w83697hf_select_wdt();
w83697hf_write_timeout(timeout);
w83697hf_set_reg(0x30, 1); /* Enable timer */
w83697hf_deselect_wdt();
spin_unlock(&io_lock);
}
static void wdt_disable(void)
{
spin_lock(&io_lock);
w83697hf_select_wdt();
w83697hf_set_reg(0x30, 0); /* Disable timer */
w83697hf_write_timeout(0);
w83697hf_deselect_wdt();
spin_unlock(&io_lock);
}
static unsigned char wdt_running(void)
{
unsigned char t;
spin_lock(&io_lock);
w83697hf_select_wdt();
t = w83697hf_get_reg(0xF4); /* Read timer */
w83697hf_deselect_wdt();
spin_unlock(&io_lock);
return t;
}
static int wdt_set_heartbeat(int t)
{
if (t < 1 || t > 255)
return -EINVAL;
timeout = t;
return 0;
}
static ssize_t wdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
if (count) {
if (!nowayout) {
size_t i;
expect_close = 0;
for (i = 0; i != count; i++) {
char c;
if (get_user(c, buf + i))
return -EFAULT;
if (c == 'V')
expect_close = 42;
}
}
wdt_ping();
}
return count;
}
static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
int new_timeout;
static const struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT
| WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "W83697HF WDT",
};
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user(argp, &ident, sizeof(ident)))
return -EFAULT;
break;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_SETOPTIONS:
{
int options, retval = -EINVAL;
if (get_user(options, p))
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
wdt_disable();
retval = 0;
}
if (options & WDIOS_ENABLECARD) {
wdt_enable();
retval = 0;
}
return retval;
}
case WDIOC_KEEPALIVE:
wdt_ping();
break;
case WDIOC_SETTIMEOUT:
if (get_user(new_timeout, p))
return -EFAULT;
if (wdt_set_heartbeat(new_timeout))
return -EINVAL;
wdt_ping();
/* Fall */
case WDIOC_GETTIMEOUT:
return put_user(timeout, p);
default:
return -ENOTTY;
}
return 0;
}
static int wdt_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(0, &wdt_is_open))
return -EBUSY;
/*
* Activate
*/
wdt_enable();
return nonseekable_open(inode, file);
}
static int wdt_close(struct inode *inode, struct file *file)
{
if (expect_close == 42)
wdt_disable();
else {
printk(KERN_CRIT PFX
"Unexpected close, not stopping watchdog!\n");
wdt_ping();
}
expect_close = 0;
clear_bit(0, &wdt_is_open);
return 0;
}
/*
* Notifier for system down
*/
static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
wdt_disable(); /* Turn the WDT off */
return NOTIFY_DONE;
}
/*
* Kernel Interfaces
*/
static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = wdt_write,
.unlocked_ioctl = wdt_ioctl,
.open = wdt_open,
.release = wdt_close,
};
static struct miscdevice wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &wdt_fops,
};
/*
* The WDT needs to learn about soft shutdowns in order to
* turn the timebomb registers off.
*/
static struct notifier_block wdt_notifier = {
.notifier_call = wdt_notify_sys,
};
static int w83697hf_check_wdt(void)
{
if (!request_region(wdt_io, 2, WATCHDOG_NAME)) {
printk(KERN_ERR PFX
"I/O address 0x%x already in use\n", wdt_io);
return -EIO;
}
printk(KERN_DEBUG PFX
"Looking for watchdog at address 0x%x\n", wdt_io);
w83697hf_unlock();
if (w83697hf_get_reg(0x20) == 0x60) {
printk(KERN_INFO PFX
"watchdog found at address 0x%x\n", wdt_io);
w83697hf_lock();
return 0;
}
/* Reprotect in case it was a compatible device */
w83697hf_lock();
printk(KERN_INFO PFX "watchdog not found at address 0x%x\n", wdt_io);
release_region(wdt_io, 2);
return -EIO;
}
static int w83697hf_ioports[] = { 0x2e, 0x4e, 0x00 };
static int __init wdt_init(void)
{
int ret, i, found = 0;
printk(KERN_INFO PFX "WDT driver for W83697HF/HG initializing\n");
if (wdt_io == 0) {
/* we will autodetect the W83697HF/HG watchdog */
for (i = 0; ((!found) && (w83697hf_ioports[i] != 0)); i++) {
wdt_io = w83697hf_ioports[i];
if (!w83697hf_check_wdt())
found++;
}
} else {
if (!w83697hf_check_wdt())
found++;
}
if (!found) {
printk(KERN_ERR PFX "No W83697HF/HG could be found\n");
ret = -EIO;
goto out;
}
w83697hf_init();
if (early_disable) {
if (wdt_running())
printk(KERN_WARNING PFX "Stopping previously enabled "
"watchdog until userland kicks in\n");
wdt_disable();
}
if (wdt_set_heartbeat(timeout)) {
wdt_set_heartbeat(WATCHDOG_TIMEOUT);
printk(KERN_INFO PFX
"timeout value must be 1 <= timeout <= 255, using %d\n",
WATCHDOG_TIMEOUT);
}
ret = register_reboot_notifier(&wdt_notifier);
if (ret != 0) {
printk(KERN_ERR PFX
"cannot register reboot notifier (err=%d)\n", ret);
goto unreg_regions;
}
ret = misc_register(&wdt_miscdev);
if (ret != 0) {
printk(KERN_ERR PFX
"cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
goto unreg_reboot;
}
printk(KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d)\n",
timeout, nowayout);
out:
return ret;
unreg_reboot:
unregister_reboot_notifier(&wdt_notifier);
unreg_regions:
release_region(wdt_io, 2);
goto out;
}
static void __exit wdt_exit(void)
{
misc_deregister(&wdt_miscdev);
unregister_reboot_notifier(&wdt_notifier);
release_region(wdt_io, 2);
}
module_init(wdt_init);
module_exit(wdt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marcus Junker <junker@anduras.de>, "
"Samuel Tardieu <sam@rfc1149.net>");
MODULE_DESCRIPTION("w83697hf/hg WDT driver");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
| gpl-2.0 |
julianschweizer/kernel_23.0.1.A.0.xxx | drivers/net/ethernet/aeroflex/greth.c | 4861 | 40788 | /*
* Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
*
* 2005-2010 (c) Aeroflex Gaisler AB
*
* This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
* available in the GRLIB VHDL IP core library.
*
* Full documentation of both cores can be found here:
* http://www.gaisler.com/products/grlib/grip.pdf
*
* The Gigabit version supports scatter/gather DMA, any alignment of
* buffers and checksum offloading.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Contributors: Kristoffer Glembo
* Daniel Hellstrom
* Marko Isomaki
*/
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/io.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <asm/byteorder.h>
#ifdef CONFIG_SPARC
#include <asm/idprom.h>
#endif
#include "greth.h"
#define GRETH_DEF_MSG_ENABLE \
(NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
NETIF_MSG_LINK | \
NETIF_MSG_IFDOWN | \
NETIF_MSG_IFUP | \
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
static int greth_debug = -1; /* -1 == use GRETH_DEF_MSG_ENABLE as value */
module_param(greth_debug, int, 0);
MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
/* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
static int macaddr[6];
module_param_array(macaddr, int, NULL, 0);
MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
static int greth_edcl = 1;
module_param(greth_edcl, int, 0);
MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
static int greth_open(struct net_device *dev);
static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
struct net_device *dev);
static int greth_rx(struct net_device *dev, int limit);
static int greth_rx_gbit(struct net_device *dev, int limit);
static void greth_clean_tx(struct net_device *dev);
static void greth_clean_tx_gbit(struct net_device *dev);
static irqreturn_t greth_interrupt(int irq, void *dev_id);
static int greth_close(struct net_device *dev);
static int greth_set_mac_add(struct net_device *dev, void *p);
static void greth_set_multicast_list(struct net_device *dev);
#define GRETH_REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
#define GRETH_REGSAVE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
#define GRETH_REGORIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
#define GRETH_REGANDIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
#define NEXT_TX(N) (((N) + 1) & GRETH_TXBD_NUM_MASK)
#define SKIP_TX(N, C) (((N) + C) & GRETH_TXBD_NUM_MASK)
#define NEXT_RX(N) (((N) + 1) & GRETH_RXBD_NUM_MASK)
static void greth_print_rx_packet(void *addr, int len)
{
print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
addr, len, true);
}
static void greth_print_tx_packet(struct sk_buff *skb)
{
int i;
int length;
if (skb_shinfo(skb)->nr_frags == 0)
length = skb->len;
else
length = skb_headlen(skb);
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, length, true);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
skb_frag_address(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].size, true);
}
}
static inline void greth_enable_tx(struct greth_private *greth)
{
wmb();
GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
}
static inline void greth_disable_tx(struct greth_private *greth)
{
GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
}
static inline void greth_enable_rx(struct greth_private *greth)
{
wmb();
GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
}
static inline void greth_disable_rx(struct greth_private *greth)
{
GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
}
static inline void greth_enable_irqs(struct greth_private *greth)
{
GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
}
static inline void greth_disable_irqs(struct greth_private *greth)
{
GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
}
static inline void greth_write_bd(u32 *bd, u32 val)
{
__raw_writel(cpu_to_be32(val), bd);
}
static inline u32 greth_read_bd(u32 *bd)
{
return be32_to_cpu(__raw_readl(bd));
}
static void greth_clean_rings(struct greth_private *greth)
{
int i;
struct greth_bd *rx_bdp = greth->rx_bd_base;
struct greth_bd *tx_bdp = greth->tx_bd_base;
if (greth->gbit_mac) {
/* Free and unmap RX buffers */
for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
if (greth->rx_skbuff[i] != NULL) {
dev_kfree_skb(greth->rx_skbuff[i]);
dma_unmap_single(greth->dev,
greth_read_bd(&rx_bdp->addr),
MAX_FRAME_SIZE+NET_IP_ALIGN,
DMA_FROM_DEVICE);
}
}
/* TX buffers */
while (greth->tx_free < GRETH_TXBD_NUM) {
struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
int nr_frags = skb_shinfo(skb)->nr_frags;
tx_bdp = greth->tx_bd_base + greth->tx_last;
greth->tx_last = NEXT_TX(greth->tx_last);
dma_unmap_single(greth->dev,
greth_read_bd(&tx_bdp->addr),
skb_headlen(skb),
DMA_TO_DEVICE);
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tx_bdp = greth->tx_bd_base + greth->tx_last;
dma_unmap_page(greth->dev,
greth_read_bd(&tx_bdp->addr),
skb_frag_size(frag),
DMA_TO_DEVICE);
greth->tx_last = NEXT_TX(greth->tx_last);
}
greth->tx_free += nr_frags+1;
dev_kfree_skb(skb);
}
} else { /* 10/100 Mbps MAC */
for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
kfree(greth->rx_bufs[i]);
dma_unmap_single(greth->dev,
greth_read_bd(&rx_bdp->addr),
MAX_FRAME_SIZE,
DMA_FROM_DEVICE);
}
for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
kfree(greth->tx_bufs[i]);
dma_unmap_single(greth->dev,
greth_read_bd(&tx_bdp->addr),
MAX_FRAME_SIZE,
DMA_TO_DEVICE);
}
}
}
static int greth_init_rings(struct greth_private *greth)
{
struct sk_buff *skb;
struct greth_bd *rx_bd, *tx_bd;
u32 dma_addr;
int i;
rx_bd = greth->rx_bd_base;
tx_bd = greth->tx_bd_base;
/* Initialize descriptor rings and buffers */
if (greth->gbit_mac) {
for (i = 0; i < GRETH_RXBD_NUM; i++) {
skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
if (skb == NULL) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Error allocating DMA ring.\n");
goto cleanup;
}
skb_reserve(skb, NET_IP_ALIGN);
dma_addr = dma_map_single(greth->dev,
skb->data,
MAX_FRAME_SIZE+NET_IP_ALIGN,
DMA_FROM_DEVICE);
if (dma_mapping_error(greth->dev, dma_addr)) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Could not create initial DMA mapping\n");
goto cleanup;
}
greth->rx_skbuff[i] = skb;
greth_write_bd(&rx_bd[i].addr, dma_addr);
greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
}
} else {
/* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
for (i = 0; i < GRETH_RXBD_NUM; i++) {
greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
if (greth->rx_bufs[i] == NULL) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Error allocating DMA ring.\n");
goto cleanup;
}
dma_addr = dma_map_single(greth->dev,
greth->rx_bufs[i],
MAX_FRAME_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(greth->dev, dma_addr)) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Could not create initial DMA mapping\n");
goto cleanup;
}
greth_write_bd(&rx_bd[i].addr, dma_addr);
greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
}
for (i = 0; i < GRETH_TXBD_NUM; i++) {
greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
if (greth->tx_bufs[i] == NULL) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Error allocating DMA ring.\n");
goto cleanup;
}
dma_addr = dma_map_single(greth->dev,
greth->tx_bufs[i],
MAX_FRAME_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(greth->dev, dma_addr)) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Could not create initial DMA mapping\n");
goto cleanup;
}
greth_write_bd(&tx_bd[i].addr, dma_addr);
greth_write_bd(&tx_bd[i].stat, 0);
}
}
greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
/* Initialize pointers. */
greth->rx_cur = 0;
greth->tx_next = 0;
greth->tx_last = 0;
greth->tx_free = GRETH_TXBD_NUM;
/* Initialize descriptor base address */
GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
return 0;
cleanup:
greth_clean_rings(greth);
return -ENOMEM;
}
static int greth_open(struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
int err;
err = greth_init_rings(greth);
if (err) {
if (netif_msg_ifup(greth))
dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
return err;
}
err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
if (err) {
if (netif_msg_ifup(greth))
dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
greth_clean_rings(greth);
return err;
}
if (netif_msg_ifup(greth))
dev_dbg(&dev->dev, " starting queue\n");
netif_start_queue(dev);
GRETH_REGSAVE(greth->regs->status, 0xFF);
napi_enable(&greth->napi);
greth_enable_irqs(greth);
greth_enable_tx(greth);
greth_enable_rx(greth);
return 0;
}
static int greth_close(struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
napi_disable(&greth->napi);
greth_disable_irqs(greth);
greth_disable_tx(greth);
greth_disable_rx(greth);
netif_stop_queue(dev);
free_irq(greth->irq, (void *) dev);
greth_clean_rings(greth);
return 0;
}
static netdev_tx_t
greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
struct greth_bd *bdp;
int err = NETDEV_TX_OK;
u32 status, dma_addr, ctrl;
unsigned long flags;
/* Clean TX Ring */
greth_clean_tx(greth->netdev);
if (unlikely(greth->tx_free <= 0)) {
spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
ctrl = GRETH_REGLOAD(greth->regs->control);
/* Enable TX IRQ only if not already in poll() routine */
if (ctrl & GRETH_RXI)
GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
netif_stop_queue(dev);
spin_unlock_irqrestore(&greth->devlock, flags);
return NETDEV_TX_BUSY;
}
if (netif_msg_pktdata(greth))
greth_print_tx_packet(skb);
if (unlikely(skb->len > MAX_FRAME_SIZE)) {
dev->stats.tx_errors++;
goto out;
}
bdp = greth->tx_bd_base + greth->tx_next;
dma_addr = greth_read_bd(&bdp->addr);
memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
/* Wrap around descriptor ring */
if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
status |= GRETH_BD_WR;
}
greth->tx_next = NEXT_TX(greth->tx_next);
greth->tx_free--;
/* Write descriptor control word and enable transmission */
greth_write_bd(&bdp->stat, status);
spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
greth_enable_tx(greth);
spin_unlock_irqrestore(&greth->devlock, flags);
out:
dev_kfree_skb(skb);
return err;
}
static netdev_tx_t
greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
struct greth_bd *bdp;
u32 status = 0, dma_addr, ctrl;
int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
unsigned long flags;
nr_frags = skb_shinfo(skb)->nr_frags;
/* Clean TX Ring */
greth_clean_tx_gbit(dev);
if (greth->tx_free < nr_frags + 1) {
spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
ctrl = GRETH_REGLOAD(greth->regs->control);
/* Enable TX IRQ only if not already in poll() routine */
if (ctrl & GRETH_RXI)
GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
netif_stop_queue(dev);
spin_unlock_irqrestore(&greth->devlock, flags);
err = NETDEV_TX_BUSY;
goto out;
}
if (netif_msg_pktdata(greth))
greth_print_tx_packet(skb);
if (unlikely(skb->len > MAX_FRAME_SIZE)) {
dev->stats.tx_errors++;
goto out;
}
/* Save skb pointer. */
greth->tx_skbuff[greth->tx_next] = skb;
/* Linear buf */
if (nr_frags != 0)
status = GRETH_TXBD_MORE;
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= GRETH_TXBD_CSALL;
status |= skb_headlen(skb) & GRETH_BD_LEN;
if (greth->tx_next == GRETH_TXBD_NUM_MASK)
status |= GRETH_BD_WR;
bdp = greth->tx_bd_base + greth->tx_next;
greth_write_bd(&bdp->stat, status);
dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
goto map_error;
greth_write_bd(&bdp->addr, dma_addr);
curr_tx = NEXT_TX(greth->tx_next);
/* Frags */
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
greth->tx_skbuff[curr_tx] = NULL;
bdp = greth->tx_bd_base + curr_tx;
status = GRETH_BD_EN;
if (skb->ip_summed == CHECKSUM_PARTIAL)
status |= GRETH_TXBD_CSALL;
status |= skb_frag_size(frag) & GRETH_BD_LEN;
/* Wrap around descriptor ring */
if (curr_tx == GRETH_TXBD_NUM_MASK)
status |= GRETH_BD_WR;
/* More fragments left */
if (i < nr_frags - 1)
status |= GRETH_TXBD_MORE;
else
status |= GRETH_BD_IE; /* enable IRQ on last fragment */
greth_write_bd(&bdp->stat, status);
dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
goto frag_map_error;
greth_write_bd(&bdp->addr, dma_addr);
curr_tx = NEXT_TX(curr_tx);
}
wmb();
/* Enable the descriptor chain by enabling the first descriptor */
bdp = greth->tx_bd_base + greth->tx_next;
greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
greth->tx_next = curr_tx;
greth->tx_free -= nr_frags + 1;
wmb();
spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
greth_enable_tx(greth);
spin_unlock_irqrestore(&greth->devlock, flags);
return NETDEV_TX_OK;
frag_map_error:
/* Unmap SKB mappings that succeeded and disable descriptor */
for (i = 0; greth->tx_next + i != curr_tx; i++) {
bdp = greth->tx_bd_base + greth->tx_next + i;
dma_unmap_single(greth->dev,
greth_read_bd(&bdp->addr),
greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
DMA_TO_DEVICE);
greth_write_bd(&bdp->stat, 0);
}
map_error:
if (net_ratelimit())
dev_warn(greth->dev, "Could not create TX DMA mapping\n");
dev_kfree_skb(skb);
out:
return err;
}
static irqreturn_t greth_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct greth_private *greth;
u32 status, ctrl;
irqreturn_t retval = IRQ_NONE;
greth = netdev_priv(dev);
spin_lock(&greth->devlock);
/* Get the interrupt events that caused us to be here. */
status = GRETH_REGLOAD(greth->regs->status);
/* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
* set regardless of whether IRQ is enabled or not. Especially
* important when shared IRQ.
*/
ctrl = GRETH_REGLOAD(greth->regs->control);
/* Handle rx and tx interrupts through poll */
if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
retval = IRQ_HANDLED;
/* Disable interrupts and schedule poll() */
greth_disable_irqs(greth);
napi_schedule(&greth->napi);
}
mmiowb();
spin_unlock(&greth->devlock);
return retval;
}
static void greth_clean_tx(struct net_device *dev)
{
struct greth_private *greth;
struct greth_bd *bdp;
u32 stat;
greth = netdev_priv(dev);
while (1) {
bdp = greth->tx_bd_base + greth->tx_last;
GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
mb();
stat = greth_read_bd(&bdp->stat);
if (unlikely(stat & GRETH_BD_EN))
break;
if (greth->tx_free == GRETH_TXBD_NUM)
break;
/* Check status for errors */
if (unlikely(stat & GRETH_TXBD_STATUS)) {
dev->stats.tx_errors++;
if (stat & GRETH_TXBD_ERR_AL)
dev->stats.tx_aborted_errors++;
if (stat & GRETH_TXBD_ERR_UE)
dev->stats.tx_fifo_errors++;
}
dev->stats.tx_packets++;
dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
greth->tx_last = NEXT_TX(greth->tx_last);
greth->tx_free++;
}
if (greth->tx_free > 0) {
netif_wake_queue(dev);
}
}
static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
{
/* Check status for errors */
if (unlikely(stat & GRETH_TXBD_STATUS)) {
dev->stats.tx_errors++;
if (stat & GRETH_TXBD_ERR_AL)
dev->stats.tx_aborted_errors++;
if (stat & GRETH_TXBD_ERR_UE)
dev->stats.tx_fifo_errors++;
if (stat & GRETH_TXBD_ERR_LC)
dev->stats.tx_aborted_errors++;
}
dev->stats.tx_packets++;
}
static void greth_clean_tx_gbit(struct net_device *dev)
{
struct greth_private *greth;
struct greth_bd *bdp, *bdp_last_frag;
struct sk_buff *skb;
u32 stat;
int nr_frags, i;
greth = netdev_priv(dev);
while (greth->tx_free < GRETH_TXBD_NUM) {
skb = greth->tx_skbuff[greth->tx_last];
nr_frags = skb_shinfo(skb)->nr_frags;
/* We only clean fully completed SKBs */
bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
mb();
stat = greth_read_bd(&bdp_last_frag->stat);
if (stat & GRETH_BD_EN)
break;
greth->tx_skbuff[greth->tx_last] = NULL;
greth_update_tx_stats(dev, stat);
dev->stats.tx_bytes += skb->len;
bdp = greth->tx_bd_base + greth->tx_last;
greth->tx_last = NEXT_TX(greth->tx_last);
dma_unmap_single(greth->dev,
greth_read_bd(&bdp->addr),
skb_headlen(skb),
DMA_TO_DEVICE);
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
bdp = greth->tx_bd_base + greth->tx_last;
dma_unmap_page(greth->dev,
greth_read_bd(&bdp->addr),
skb_frag_size(frag),
DMA_TO_DEVICE);
greth->tx_last = NEXT_TX(greth->tx_last);
}
greth->tx_free += nr_frags+1;
dev_kfree_skb(skb);
}
if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
netif_wake_queue(dev);
}
static int greth_rx(struct net_device *dev, int limit)
{
struct greth_private *greth;
struct greth_bd *bdp;
struct sk_buff *skb;
int pkt_len;
int bad, count;
u32 status, dma_addr;
unsigned long flags;
greth = netdev_priv(dev);
for (count = 0; count < limit; ++count) {
bdp = greth->rx_bd_base + greth->rx_cur;
GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
mb();
status = greth_read_bd(&bdp->stat);
if (unlikely(status & GRETH_BD_EN)) {
break;
}
dma_addr = greth_read_bd(&bdp->addr);
bad = 0;
/* Check status for errors. */
if (unlikely(status & GRETH_RXBD_STATUS)) {
if (status & GRETH_RXBD_ERR_FT) {
dev->stats.rx_length_errors++;
bad = 1;
}
if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
dev->stats.rx_frame_errors++;
bad = 1;
}
if (status & GRETH_RXBD_ERR_CRC) {
dev->stats.rx_crc_errors++;
bad = 1;
}
}
if (unlikely(bad)) {
dev->stats.rx_errors++;
} else {
pkt_len = status & GRETH_BD_LEN;
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
if (unlikely(skb == NULL)) {
if (net_ratelimit())
dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
dev->stats.rx_dropped++;
} else {
skb_reserve(skb, NET_IP_ALIGN);
dma_sync_single_for_cpu(greth->dev,
dma_addr,
pkt_len,
DMA_FROM_DEVICE);
if (netif_msg_pktdata(greth))
greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_bytes += pkt_len;
dev->stats.rx_packets++;
netif_receive_skb(skb);
}
}
status = GRETH_BD_EN | GRETH_BD_IE;
if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
status |= GRETH_BD_WR;
}
wmb();
greth_write_bd(&bdp->stat, status);
dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
greth_enable_rx(greth);
spin_unlock_irqrestore(&greth->devlock, flags);
greth->rx_cur = NEXT_RX(greth->rx_cur);
}
return count;
}
static inline int hw_checksummed(u32 status)
{
if (status & GRETH_RXBD_IP_FRAG)
return 0;
if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
return 0;
if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
return 0;
if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
return 0;
return 1;
}
static int greth_rx_gbit(struct net_device *dev, int limit)
{
struct greth_private *greth;
struct greth_bd *bdp;
struct sk_buff *skb, *newskb;
int pkt_len;
int bad, count = 0;
u32 status, dma_addr;
unsigned long flags;
greth = netdev_priv(dev);
for (count = 0; count < limit; ++count) {
bdp = greth->rx_bd_base + greth->rx_cur;
skb = greth->rx_skbuff[greth->rx_cur];
GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
mb();
status = greth_read_bd(&bdp->stat);
bad = 0;
if (status & GRETH_BD_EN)
break;
/* Check status for errors. */
if (unlikely(status & GRETH_RXBD_STATUS)) {
if (status & GRETH_RXBD_ERR_FT) {
dev->stats.rx_length_errors++;
bad = 1;
} else if (status &
(GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
dev->stats.rx_frame_errors++;
bad = 1;
} else if (status & GRETH_RXBD_ERR_CRC) {
dev->stats.rx_crc_errors++;
bad = 1;
}
}
/* Allocate new skb to replace current, not needed if the
* current skb can be reused */
if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
skb_reserve(newskb, NET_IP_ALIGN);
dma_addr = dma_map_single(greth->dev,
newskb->data,
MAX_FRAME_SIZE + NET_IP_ALIGN,
DMA_FROM_DEVICE);
if (!dma_mapping_error(greth->dev, dma_addr)) {
/* Process the incoming frame. */
pkt_len = status & GRETH_BD_LEN;
dma_unmap_single(greth->dev,
greth_read_bd(&bdp->addr),
MAX_FRAME_SIZE + NET_IP_ALIGN,
DMA_FROM_DEVICE);
if (netif_msg_pktdata(greth))
greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
skb_put(skb, pkt_len);
if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
netif_receive_skb(skb);
greth->rx_skbuff[greth->rx_cur] = newskb;
greth_write_bd(&bdp->addr, dma_addr);
} else {
if (net_ratelimit())
dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
dev_kfree_skb(newskb);
/* reusing current skb, so it is a drop */
dev->stats.rx_dropped++;
}
} else if (bad) {
/* Bad Frame transfer, the skb is reused */
dev->stats.rx_dropped++;
} else {
/* Failed Allocating a new skb. This is rather stupid
* but the current "filled" skb is reused, as if
* transfer failure. One could argue that RX descriptor
* table handling should be divided into cleaning and
* filling as the TX part of the driver
*/
if (net_ratelimit())
dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
/* reusing current skb, so it is a drop */
dev->stats.rx_dropped++;
}
status = GRETH_BD_EN | GRETH_BD_IE;
if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
status |= GRETH_BD_WR;
}
wmb();
greth_write_bd(&bdp->stat, status);
spin_lock_irqsave(&greth->devlock, flags);
greth_enable_rx(greth);
spin_unlock_irqrestore(&greth->devlock, flags);
greth->rx_cur = NEXT_RX(greth->rx_cur);
}
return count;
}
static int greth_poll(struct napi_struct *napi, int budget)
{
struct greth_private *greth;
int work_done = 0;
unsigned long flags;
u32 mask, ctrl;
greth = container_of(napi, struct greth_private, napi);
restart_txrx_poll:
if (netif_queue_stopped(greth->netdev)) {
if (greth->gbit_mac)
greth_clean_tx_gbit(greth->netdev);
else
greth_clean_tx(greth->netdev);
}
if (greth->gbit_mac) {
work_done += greth_rx_gbit(greth->netdev, budget - work_done);
} else {
work_done += greth_rx(greth->netdev, budget - work_done);
}
if (work_done < budget) {
spin_lock_irqsave(&greth->devlock, flags);
ctrl = GRETH_REGLOAD(greth->regs->control);
if (netif_queue_stopped(greth->netdev)) {
GRETH_REGSAVE(greth->regs->control,
ctrl | GRETH_TXI | GRETH_RXI);
mask = GRETH_INT_RX | GRETH_INT_RE |
GRETH_INT_TX | GRETH_INT_TE;
} else {
GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
mask = GRETH_INT_RX | GRETH_INT_RE;
}
if (GRETH_REGLOAD(greth->regs->status) & mask) {
GRETH_REGSAVE(greth->regs->control, ctrl);
spin_unlock_irqrestore(&greth->devlock, flags);
goto restart_txrx_poll;
} else {
__napi_complete(napi);
spin_unlock_irqrestore(&greth->devlock, flags);
}
}
return work_done;
}
static int greth_set_mac_add(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct greth_private *greth;
struct greth_regs *regs;
greth = netdev_priv(dev);
regs = (struct greth_regs *) greth->regs;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
dev->dev_addr[4] << 8 | dev->dev_addr[5]);
return 0;
}
static u32 greth_hash_get_index(__u8 *addr)
{
return (ether_crc(6, addr)) & 0x3F;
}
static void greth_set_hash_filter(struct net_device *dev)
{
struct netdev_hw_addr *ha;
struct greth_private *greth = netdev_priv(dev);
struct greth_regs *regs = (struct greth_regs *) greth->regs;
u32 mc_filter[2];
unsigned int bitnr;
mc_filter[0] = mc_filter[1] = 0;
netdev_for_each_mc_addr(ha, dev) {
bitnr = greth_hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
}
static void greth_set_multicast_list(struct net_device *dev)
{
int cfg;
struct greth_private *greth = netdev_priv(dev);
struct greth_regs *regs = (struct greth_regs *) greth->regs;
cfg = GRETH_REGLOAD(regs->control);
if (dev->flags & IFF_PROMISC)
cfg |= GRETH_CTRL_PR;
else
cfg &= ~GRETH_CTRL_PR;
if (greth->multicast) {
if (dev->flags & IFF_ALLMULTI) {
GRETH_REGSAVE(regs->hash_msb, -1);
GRETH_REGSAVE(regs->hash_lsb, -1);
cfg |= GRETH_CTRL_MCEN;
GRETH_REGSAVE(regs->control, cfg);
return;
}
if (netdev_mc_empty(dev)) {
cfg &= ~GRETH_CTRL_MCEN;
GRETH_REGSAVE(regs->control, cfg);
return;
}
/* Setup multicast filter */
greth_set_hash_filter(dev);
cfg |= GRETH_CTRL_MCEN;
}
GRETH_REGSAVE(regs->control, cfg);
}
static u32 greth_get_msglevel(struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
return greth->msg_enable;
}
static void greth_set_msglevel(struct net_device *dev, u32 value)
{
struct greth_private *greth = netdev_priv(dev);
greth->msg_enable = value;
}
static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct greth_private *greth = netdev_priv(dev);
struct phy_device *phy = greth->phy;
if (!phy)
return -ENODEV;
return phy_ethtool_gset(phy, cmd);
}
static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct greth_private *greth = netdev_priv(dev);
struct phy_device *phy = greth->phy;
if (!phy)
return -ENODEV;
return phy_ethtool_sset(phy, cmd);
}
static int greth_get_regs_len(struct net_device *dev)
{
return sizeof(struct greth_regs);
}
static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct greth_private *greth = netdev_priv(dev);
strncpy(info->driver, dev_driver_string(greth->dev), 32);
strncpy(info->version, "revision: 1.0", 32);
strncpy(info->bus_info, greth->dev->bus->name, 32);
strncpy(info->fw_version, "N/A", 32);
info->eedump_len = 0;
info->regdump_len = sizeof(struct greth_regs);
}
static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
{
int i;
struct greth_private *greth = netdev_priv(dev);
u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
u32 *buff = p;
for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
buff[i] = greth_read_bd(&greth_regs[i]);
}
static const struct ethtool_ops greth_ethtool_ops = {
.get_msglevel = greth_get_msglevel,
.set_msglevel = greth_set_msglevel,
.get_settings = greth_get_settings,
.set_settings = greth_set_settings,
.get_drvinfo = greth_get_drvinfo,
.get_regs_len = greth_get_regs_len,
.get_regs = greth_get_regs,
.get_link = ethtool_op_get_link,
};
static struct net_device_ops greth_netdev_ops = {
.ndo_open = greth_open,
.ndo_stop = greth_close,
.ndo_start_xmit = greth_start_xmit,
.ndo_set_mac_address = greth_set_mac_add,
.ndo_validate_addr = eth_validate_addr,
};
static inline int wait_for_mdio(struct greth_private *greth)
{
unsigned long timeout = jiffies + 4*HZ/100;
while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
if (time_after(jiffies, timeout))
return 0;
}
return 1;
}
static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
{
struct greth_private *greth = bus->priv;
int data;
if (!wait_for_mdio(greth))
return -EBUSY;
GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
if (!wait_for_mdio(greth))
return -EBUSY;
if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
return data;
} else {
return -1;
}
}
static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct greth_private *greth = bus->priv;
if (!wait_for_mdio(greth))
return -EBUSY;
GRETH_REGSAVE(greth->regs->mdio,
((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
if (!wait_for_mdio(greth))
return -EBUSY;
return 0;
}
static int greth_mdio_reset(struct mii_bus *bus)
{
return 0;
}
static void greth_link_change(struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
struct phy_device *phydev = greth->phy;
unsigned long flags;
int status_change = 0;
u32 ctrl;
spin_lock_irqsave(&greth->devlock, flags);
if (phydev->link) {
if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
ctrl = GRETH_REGLOAD(greth->regs->control) &
~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
if (phydev->duplex)
ctrl |= GRETH_CTRL_FD;
if (phydev->speed == SPEED_100)
ctrl |= GRETH_CTRL_SP;
else if (phydev->speed == SPEED_1000)
ctrl |= GRETH_CTRL_GB;
GRETH_REGSAVE(greth->regs->control, ctrl);
greth->speed = phydev->speed;
greth->duplex = phydev->duplex;
status_change = 1;
}
}
if (phydev->link != greth->link) {
if (!phydev->link) {
greth->speed = 0;
greth->duplex = -1;
}
greth->link = phydev->link;
status_change = 1;
}
spin_unlock_irqrestore(&greth->devlock, flags);
if (status_change) {
if (phydev->link)
pr_debug("%s: link up (%d/%s)\n",
dev->name, phydev->speed,
DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
else
pr_debug("%s: link down\n", dev->name);
}
}
static int greth_mdio_probe(struct net_device *dev)
{
struct greth_private *greth = netdev_priv(dev);
struct phy_device *phy = NULL;
int ret;
/* Find the first PHY */
phy = phy_find_first(greth->mdio);
if (!phy) {
if (netif_msg_probe(greth))
dev_err(&dev->dev, "no PHY found\n");
return -ENXIO;
}
ret = phy_connect_direct(dev, phy, &greth_link_change,
0, greth->gbit_mac ?
PHY_INTERFACE_MODE_GMII :
PHY_INTERFACE_MODE_MII);
if (ret) {
if (netif_msg_ifup(greth))
dev_err(&dev->dev, "could not attach to PHY\n");
return ret;
}
if (greth->gbit_mac)
phy->supported &= PHY_GBIT_FEATURES;
else
phy->supported &= PHY_BASIC_FEATURES;
phy->advertising = phy->supported;
greth->link = 0;
greth->speed = 0;
greth->duplex = -1;
greth->phy = phy;
return 0;
}
static inline int phy_aneg_done(struct phy_device *phydev)
{
int retval;
retval = phy_read(phydev, MII_BMSR);
return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
}
static int greth_mdio_init(struct greth_private *greth)
{
int ret, phy;
unsigned long timeout;
greth->mdio = mdiobus_alloc();
if (!greth->mdio) {
return -ENOMEM;
}
greth->mdio->name = "greth-mdio";
snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
greth->mdio->read = greth_mdio_read;
greth->mdio->write = greth_mdio_write;
greth->mdio->reset = greth_mdio_reset;
greth->mdio->priv = greth;
greth->mdio->irq = greth->mdio_irqs;
for (phy = 0; phy < PHY_MAX_ADDR; phy++)
greth->mdio->irq[phy] = PHY_POLL;
ret = mdiobus_register(greth->mdio);
if (ret) {
goto error;
}
ret = greth_mdio_probe(greth->netdev);
if (ret) {
if (netif_msg_probe(greth))
dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
goto unreg_mdio;
}
phy_start(greth->phy);
/* If Ethernet debug link is used make autoneg happen right away */
if (greth->edcl && greth_edcl == 1) {
phy_start_aneg(greth->phy);
timeout = jiffies + 6*HZ;
while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
}
genphy_read_status(greth->phy);
greth_link_change(greth->netdev);
}
return 0;
unreg_mdio:
mdiobus_unregister(greth->mdio);
error:
mdiobus_free(greth->mdio);
return ret;
}
/* Initialize the GRETH MAC */
static int __devinit greth_of_probe(struct platform_device *ofdev)
{
struct net_device *dev;
struct greth_private *greth;
struct greth_regs *regs;
int i;
int err;
int tmp;
unsigned long timeout;
dev = alloc_etherdev(sizeof(struct greth_private));
if (dev == NULL)
return -ENOMEM;
greth = netdev_priv(dev);
greth->netdev = dev;
greth->dev = &ofdev->dev;
if (greth_debug > 0)
greth->msg_enable = greth_debug;
else
greth->msg_enable = GRETH_DEF_MSG_ENABLE;
spin_lock_init(&greth->devlock);
greth->regs = of_ioremap(&ofdev->resource[0], 0,
resource_size(&ofdev->resource[0]),
"grlib-greth regs");
if (greth->regs == NULL) {
if (netif_msg_probe(greth))
dev_err(greth->dev, "ioremap failure.\n");
err = -EIO;
goto error1;
}
regs = (struct greth_regs *) greth->regs;
greth->irq = ofdev->archdata.irqs[0];
dev_set_drvdata(greth->dev, dev);
SET_NETDEV_DEV(dev, greth->dev);
if (netif_msg_probe(greth))
dev_dbg(greth->dev, "resetting controller.\n");
/* Reset the controller. */
GRETH_REGSAVE(regs->control, GRETH_RESET);
/* Wait for MAC to reset itself */
timeout = jiffies + HZ/100;
while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
if (time_after(jiffies, timeout)) {
err = -EIO;
if (netif_msg_probe(greth))
dev_err(greth->dev, "timeout when waiting for reset.\n");
goto error2;
}
}
/* Get default PHY address */
greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
/* Check if we have GBIT capable MAC */
tmp = GRETH_REGLOAD(regs->control);
greth->gbit_mac = (tmp >> 27) & 1;
/* Check for multicast capability */
greth->multicast = (tmp >> 25) & 1;
greth->edcl = (tmp >> 31) & 1;
/* If we have EDCL we disable the EDCL speed-duplex FSM so
* it doesn't interfere with the software */
if (greth->edcl != 0)
GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
/* Check if MAC can handle MDIO interrupts */
greth->mdio_int_en = (tmp >> 26) & 1;
err = greth_mdio_init(greth);
if (err) {
if (netif_msg_probe(greth))
dev_err(greth->dev, "failed to register MDIO bus\n");
goto error2;
}
/* Allocate TX descriptor ring in coherent memory */
greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1024,
&greth->tx_bd_base_phys,
GFP_KERNEL);
if (!greth->tx_bd_base) {
if (netif_msg_probe(greth))
dev_err(&dev->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM;
goto error3;
}
memset(greth->tx_bd_base, 0, 1024);
/* Allocate RX descriptor ring in coherent memory */
greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1024,
&greth->rx_bd_base_phys,
GFP_KERNEL);
if (!greth->rx_bd_base) {
if (netif_msg_probe(greth))
dev_err(greth->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM;
goto error4;
}
memset(greth->rx_bd_base, 0, 1024);
/* Get MAC address from: module param, OF property or ID prom */
for (i = 0; i < 6; i++) {
if (macaddr[i] != 0)
break;
}
if (i == 6) {
const unsigned char *addr;
int len;
addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
&len);
if (addr != NULL && len == 6) {
for (i = 0; i < 6; i++)
macaddr[i] = (unsigned int) addr[i];
} else {
#ifdef CONFIG_SPARC
for (i = 0; i < 6; i++)
macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
#endif
}
}
for (i = 0; i < 6; i++)
dev->dev_addr[i] = macaddr[i];
macaddr[5]++;
if (!is_valid_ether_addr(&dev->dev_addr[0])) {
if (netif_msg_probe(greth))
dev_err(greth->dev, "no valid ethernet address, aborting.\n");
err = -EINVAL;
goto error5;
}
GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
dev->dev_addr[4] << 8 | dev->dev_addr[5]);
/* Clear all pending interrupts except PHY irq */
GRETH_REGSAVE(regs->status, 0xFF);
if (greth->gbit_mac) {
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_RXCSUM;
dev->features = dev->hw_features | NETIF_F_HIGHDMA;
greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
}
if (greth->multicast) {
greth_netdev_ops.ndo_set_rx_mode = greth_set_multicast_list;
dev->flags |= IFF_MULTICAST;
} else {
dev->flags &= ~IFF_MULTICAST;
}
dev->netdev_ops = &greth_netdev_ops;
dev->ethtool_ops = &greth_ethtool_ops;
err = register_netdev(dev);
if (err) {
if (netif_msg_probe(greth))
dev_err(greth->dev, "netdevice registration failed.\n");
goto error5;
}
/* setup NAPI */
netif_napi_add(dev, &greth->napi, greth_poll, 64);
return 0;
error5:
dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
error4:
dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
error3:
mdiobus_unregister(greth->mdio);
error2:
of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
error1:
free_netdev(dev);
return err;
}
static int __devexit greth_of_remove(struct platform_device *of_dev)
{
struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
struct greth_private *greth = netdev_priv(ndev);
/* Free descriptor areas */
dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
dev_set_drvdata(&of_dev->dev, NULL);
if (greth->phy)
phy_stop(greth->phy);
mdiobus_unregister(greth->mdio);
unregister_netdev(ndev);
free_netdev(ndev);
of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
return 0;
}
static struct of_device_id greth_of_match[] = {
{
.name = "GAISLER_ETHMAC",
},
{
.name = "01_01d",
},
{},
};
MODULE_DEVICE_TABLE(of, greth_of_match);
static struct platform_driver greth_of_driver = {
.driver = {
.name = "grlib-greth",
.owner = THIS_MODULE,
.of_match_table = greth_of_match,
},
.probe = greth_of_probe,
.remove = __devexit_p(greth_of_remove),
};
module_platform_driver(greth_of_driver);
MODULE_AUTHOR("Aeroflex Gaisler AB.");
MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
savoca/furnace-g3 | net/sunrpc/bc_svc.c | 5629 | 2024 | /******************************************************************************
(c) 2007 Network Appliance, Inc. All Rights Reserved.
(c) 2009 NetApp. All Rights Reserved.
NetApp provides this source code under the GPL v2 License.
The GPL v2 license is available at
http://opensource.org/licenses/gpl-license.php.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*
* The NFSv4.1 callback service helper routines.
* They implement the transport level processing required to send the
* reply over an existing open connection previously established by the client.
*/
#include <linux/module.h>
#include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/bc_xprt.h>
#define RPCDBG_FACILITY RPCDBG_SVCDSP
/* Empty callback ops */
static const struct rpc_call_ops nfs41_callback_ops = {
};
/*
* Send the callback reply
*/
int bc_send(struct rpc_rqst *req)
{
struct rpc_task *task;
int ret;
dprintk("RPC: bc_send req= %p\n", req);
task = rpc_run_bc_task(req, &nfs41_callback_ops);
if (IS_ERR(task))
ret = PTR_ERR(task);
else {
BUG_ON(atomic_read(&task->tk_count) != 1);
ret = task->tk_status;
rpc_put_task(task);
}
dprintk("RPC: bc_send ret= %d\n", ret);
return ret;
}
| gpl-2.0 |
khusika/samsung-kernel-msm7x30 | drivers/mca/mca-device.c | 9981 | 6771 | /* -*- mode: c; c-basic-offset: 8 -*- */
/*
* MCA device support functions
*
* These functions support the ongoing device access API.
*
* (C) 2002 James Bottomley <James.Bottomley@HansenPartnership.com>
*
**-----------------------------------------------------------------------------
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mca.h>
#include <linux/string.h>
/**
* mca_device_read_stored_pos - read POS register from stored data
* @mca_dev: device to read from
* @reg: register to read from
*
* Fetch a POS value that was stored at boot time by the kernel
* when it scanned the MCA space. The register value is returned.
* Missing or invalid registers report 0.
*/
unsigned char mca_device_read_stored_pos(struct mca_device *mca_dev, int reg)
{
if(reg < 0 || reg >= 8)
return 0;
return mca_dev->pos[reg];
}
EXPORT_SYMBOL(mca_device_read_stored_pos);
/**
* mca_device_read_pos - read POS register from card
* @mca_dev: device to read from
* @reg: register to read from
*
* Fetch a POS value directly from the hardware to obtain the
* current value. This is much slower than
* mca_device_read_stored_pos and may not be invoked from
* interrupt context. It handles the deep magic required for
* onboard devices transparently.
*/
unsigned char mca_device_read_pos(struct mca_device *mca_dev, int reg)
{
struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent);
return mca_bus->f.mca_read_pos(mca_dev, reg);
return mca_dev->pos[reg];
}
EXPORT_SYMBOL(mca_device_read_pos);
/**
* mca_device_write_pos - read POS register from card
* @mca_dev: device to write pos register to
* @reg: register to write to
* @byte: byte to write to the POS registers
*
* Store a POS value directly to the hardware. You should not
* normally need to use this function and should have a very good
* knowledge of MCA bus before you do so. Doing this wrongly can
* damage the hardware.
*
* This function may not be used from interrupt context.
*
*/
void mca_device_write_pos(struct mca_device *mca_dev, int reg,
unsigned char byte)
{
struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent);
mca_bus->f.mca_write_pos(mca_dev, reg, byte);
}
EXPORT_SYMBOL(mca_device_write_pos);
/**
* mca_device_transform_irq - transform the ADF obtained IRQ
* @mca_device: device whose irq needs transforming
* @irq: input irq from ADF
*
* MCA Adapter Definition Files (ADF) contain irq, ioport, memory
* etc. definitions. In systems with more than one bus, these need
* to be transformed through bus mapping functions to get the real
* system global quantities.
*
* This function transforms the interrupt number and returns the
* transformed system global interrupt
*/
int mca_device_transform_irq(struct mca_device *mca_dev, int irq)
{
struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent);
return mca_bus->f.mca_transform_irq(mca_dev, irq);
}
EXPORT_SYMBOL(mca_device_transform_irq);
/**
* mca_device_transform_ioport - transform the ADF obtained I/O port
* @mca_device: device whose port needs transforming
* @ioport: input I/O port from ADF
*
* MCA Adapter Definition Files (ADF) contain irq, ioport, memory
* etc. definitions. In systems with more than one bus, these need
* to be transformed through bus mapping functions to get the real
* system global quantities.
*
* This function transforms the I/O port number and returns the
* transformed system global port number.
*
* This transformation can be assumed to be linear for port ranges.
*/
int mca_device_transform_ioport(struct mca_device *mca_dev, int port)
{
struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent);
return mca_bus->f.mca_transform_ioport(mca_dev, port);
}
EXPORT_SYMBOL(mca_device_transform_ioport);
/**
* mca_device_transform_memory - transform the ADF obtained memory
* @mca_device: device whose memory region needs transforming
* @mem: memory region start from ADF
*
* MCA Adapter Definition Files (ADF) contain irq, ioport, memory
* etc. definitions. In systems with more than one bus, these need
* to be transformed through bus mapping functions to get the real
* system global quantities.
*
* This function transforms the memory region start and returns the
* transformed system global memory region (physical).
*
* This transformation can be assumed to be linear for region ranges.
*/
void *mca_device_transform_memory(struct mca_device *mca_dev, void *mem)
{
struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent);
return mca_bus->f.mca_transform_memory(mca_dev, mem);
}
EXPORT_SYMBOL(mca_device_transform_memory);
/**
* mca_device_claimed - check if claimed by driver
* @mca_dev: device to check
*
* Returns 1 if the slot has been claimed by a driver
*/
int mca_device_claimed(struct mca_device *mca_dev)
{
return mca_dev->driver_loaded;
}
EXPORT_SYMBOL(mca_device_claimed);
/**
* mca_device_set_claim - set the claim value of the driver
* @mca_dev: device to set value for
* @val: claim value to set (1 claimed, 0 unclaimed)
*/
void mca_device_set_claim(struct mca_device *mca_dev, int val)
{
mca_dev->driver_loaded = val;
}
EXPORT_SYMBOL(mca_device_set_claim);
/**
* mca_device_status - get the status of the device
* @mca_device: device to get
*
* returns an enumeration of the device status:
*
* MCA_ADAPTER_NORMAL adapter is OK.
* MCA_ADAPTER_NONE no adapter at device (should never happen).
* MCA_ADAPTER_DISABLED adapter is disabled.
* MCA_ADAPTER_ERROR adapter cannot be initialised.
*/
enum MCA_AdapterStatus mca_device_status(struct mca_device *mca_dev)
{
return mca_dev->status;
}
EXPORT_SYMBOL(mca_device_status);
/**
* mca_device_set_name - set the name of the device
* @mca_device: device to set the name of
* @name: name to set
*/
void mca_device_set_name(struct mca_device *mca_dev, const char *name)
{
if(!mca_dev)
return;
strlcpy(mca_dev->name, name, sizeof(mca_dev->name));
}
EXPORT_SYMBOL(mca_device_set_name);
| gpl-2.0 |
phenomx4/android_kernel_lge_fx3 | drivers/net/sb1000.c | 11773 | 32210 | /* sb1000.c: A General Instruments SB1000 driver for linux. */
/*
Written 1998 by Franco Venturi.
Copyright 1998 by Franco Venturi.
Copyright 1994,1995 by Donald Becker.
Copyright 1993 United States Government as represented by the
Director, National Security Agency.
This driver is for the General Instruments SB1000 (internal SURFboard)
The author may be reached as fventuri@mediaone.net
This program is free software; you can redistribute it
and/or modify it under the terms of the GNU General
Public License as published by the Free Software
Foundation; either version 2 of the License, or (at
your option) any later version.
Changes:
981115 Steven Hirsch <shirsch@adelphia.net>
Linus changed the timer interface. Should work on all recent
development kernels.
980608 Steven Hirsch <shirsch@adelphia.net>
Small changes to make it work with 2.1.x kernels. Hopefully,
nothing major will change before official release of Linux 2.2.
Merged with 2.2 - Alan Cox
*/
static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n";
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/if_cablemodem.h> /* for SIOGCM/SIOSCM stuff */
#include <linux/in.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/delay.h> /* for udelay() */
#include <linux/etherdevice.h>
#include <linux/pnp.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#ifdef SB1000_DEBUG
static int sb1000_debug = SB1000_DEBUG;
#else
static const int sb1000_debug = 1;
#endif
static const int SB1000_IO_EXTENT = 8;
/* SB1000 Maximum Receive Unit */
static const int SB1000_MRU = 1500; /* octects */
#define NPIDS 4
struct sb1000_private {
struct sk_buff *rx_skb[NPIDS];
short rx_dlen[NPIDS];
unsigned int rx_frames;
short rx_error_count;
short rx_error_dpc_count;
unsigned char rx_session_id[NPIDS];
unsigned char rx_frame_id[NPIDS];
unsigned char rx_pkt_type[NPIDS];
};
/* prototypes for Linux interface */
extern int sb1000_probe(struct net_device *dev);
static int sb1000_open(struct net_device *dev);
static int sb1000_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd);
static netdev_tx_t sb1000_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t sb1000_interrupt(int irq, void *dev_id);
static int sb1000_close(struct net_device *dev);
/* SB1000 hardware routines to be used during open/configuration phases */
static int card_wait_for_busy_clear(const int ioaddr[],
const char* name);
static int card_wait_for_ready(const int ioaddr[], const char* name,
unsigned char in[]);
static int card_send_command(const int ioaddr[], const char* name,
const unsigned char out[], unsigned char in[]);
/* SB1000 hardware routines to be used during frame rx interrupt */
static int sb1000_wait_for_ready(const int ioaddr[], const char* name);
static int sb1000_wait_for_ready_clear(const int ioaddr[],
const char* name);
static void sb1000_send_command(const int ioaddr[], const char* name,
const unsigned char out[]);
static void sb1000_read_status(const int ioaddr[], unsigned char in[]);
static void sb1000_issue_read_command(const int ioaddr[],
const char* name);
/* SB1000 commands for open/configuration */
static int sb1000_reset(const int ioaddr[], const char* name);
static int sb1000_check_CRC(const int ioaddr[], const char* name);
static inline int sb1000_start_get_set_command(const int ioaddr[],
const char* name);
static int sb1000_end_get_set_command(const int ioaddr[],
const char* name);
static int sb1000_activate(const int ioaddr[], const char* name);
static int sb1000_get_firmware_version(const int ioaddr[],
const char* name, unsigned char version[], int do_end);
static int sb1000_get_frequency(const int ioaddr[], const char* name,
int* frequency);
static int sb1000_set_frequency(const int ioaddr[], const char* name,
int frequency);
static int sb1000_get_PIDs(const int ioaddr[], const char* name,
short PID[]);
static int sb1000_set_PIDs(const int ioaddr[], const char* name,
const short PID[]);
/* SB1000 commands for frame rx interrupt */
static int sb1000_rx(struct net_device *dev);
static void sb1000_error_dpc(struct net_device *dev);
static const struct pnp_device_id sb1000_pnp_ids[] = {
{ "GIC1000", 0 },
{ "", 0 }
};
MODULE_DEVICE_TABLE(pnp, sb1000_pnp_ids);
static const struct net_device_ops sb1000_netdev_ops = {
.ndo_open = sb1000_open,
.ndo_start_xmit = sb1000_start_xmit,
.ndo_do_ioctl = sb1000_dev_ioctl,
.ndo_stop = sb1000_close,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int
sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id)
{
struct net_device *dev;
unsigned short ioaddr[2], irq;
unsigned int serial_number;
int error = -ENODEV;
if (pnp_device_attach(pdev) < 0)
return -ENODEV;
if (pnp_activate_dev(pdev) < 0)
goto out_detach;
if (!pnp_port_valid(pdev, 0) || !pnp_port_valid(pdev, 1))
goto out_disable;
if (!pnp_irq_valid(pdev, 0))
goto out_disable;
serial_number = pdev->card->serial;
ioaddr[0] = pnp_port_start(pdev, 0);
ioaddr[1] = pnp_port_start(pdev, 0);
irq = pnp_irq(pdev, 0);
if (!request_region(ioaddr[0], 16, "sb1000"))
goto out_disable;
if (!request_region(ioaddr[1], 16, "sb1000"))
goto out_release_region0;
dev = alloc_etherdev(sizeof(struct sb1000_private));
if (!dev) {
error = -ENOMEM;
goto out_release_regions;
}
dev->base_addr = ioaddr[0];
/* mem_start holds the second I/O address */
dev->mem_start = ioaddr[1];
dev->irq = irq;
if (sb1000_debug > 0)
printk(KERN_NOTICE "%s: sb1000 at (%#3.3lx,%#3.3lx), "
"S/N %#8.8x, IRQ %d.\n", dev->name, dev->base_addr,
dev->mem_start, serial_number, dev->irq);
/*
* The SB1000 is an rx-only cable modem device. The uplink is a modem
* and we do not want to arp on it.
*/
dev->flags = IFF_POINTOPOINT|IFF_NOARP;
SET_NETDEV_DEV(dev, &pdev->dev);
if (sb1000_debug > 0)
printk(KERN_NOTICE "%s", version);
dev->netdev_ops = &sb1000_netdev_ops;
/* hardware address is 0:0:serial_number */
dev->dev_addr[2] = serial_number >> 24 & 0xff;
dev->dev_addr[3] = serial_number >> 16 & 0xff;
dev->dev_addr[4] = serial_number >> 8 & 0xff;
dev->dev_addr[5] = serial_number >> 0 & 0xff;
pnp_set_drvdata(pdev, dev);
error = register_netdev(dev);
if (error)
goto out_free_netdev;
return 0;
out_free_netdev:
free_netdev(dev);
out_release_regions:
release_region(ioaddr[1], 16);
out_release_region0:
release_region(ioaddr[0], 16);
out_disable:
pnp_disable_dev(pdev);
out_detach:
pnp_device_detach(pdev);
return error;
}
static void
sb1000_remove_one(struct pnp_dev *pdev)
{
struct net_device *dev = pnp_get_drvdata(pdev);
unregister_netdev(dev);
release_region(dev->base_addr, 16);
release_region(dev->mem_start, 16);
free_netdev(dev);
}
static struct pnp_driver sb1000_driver = {
.name = "sb1000",
.id_table = sb1000_pnp_ids,
.probe = sb1000_probe_one,
.remove = sb1000_remove_one,
};
/*
* SB1000 hardware routines to be used during open/configuration phases
*/
static const int TimeOutJiffies = (875 * HZ) / 100;
/* Card Wait For Busy Clear (cannot be used during an interrupt) */
static int
card_wait_for_busy_clear(const int ioaddr[], const char* name)
{
unsigned char a;
unsigned long timeout;
a = inb(ioaddr[0] + 7);
timeout = jiffies + TimeOutJiffies;
while (a & 0x80 || a & 0x40) {
/* a little sleep */
yield();
a = inb(ioaddr[0] + 7);
if (time_after_eq(jiffies, timeout)) {
printk(KERN_WARNING "%s: card_wait_for_busy_clear timeout\n",
name);
return -ETIME;
}
}
return 0;
}
/* Card Wait For Ready (cannot be used during an interrupt) */
static int
card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[])
{
unsigned char a;
unsigned long timeout;
a = inb(ioaddr[1] + 6);
timeout = jiffies + TimeOutJiffies;
while (a & 0x80 || !(a & 0x40)) {
/* a little sleep */
yield();
a = inb(ioaddr[1] + 6);
if (time_after_eq(jiffies, timeout)) {
printk(KERN_WARNING "%s: card_wait_for_ready timeout\n",
name);
return -ETIME;
}
}
in[1] = inb(ioaddr[0] + 1);
in[2] = inb(ioaddr[0] + 2);
in[3] = inb(ioaddr[0] + 3);
in[4] = inb(ioaddr[0] + 4);
in[0] = inb(ioaddr[0] + 5);
in[6] = inb(ioaddr[0] + 6);
in[5] = inb(ioaddr[1] + 6);
return 0;
}
/* Card Send Command (cannot be used during an interrupt) */
static int
card_send_command(const int ioaddr[], const char* name,
const unsigned char out[], unsigned char in[])
{
int status, x;
if ((status = card_wait_for_busy_clear(ioaddr, name)))
return status;
outb(0xa0, ioaddr[0] + 6);
outb(out[2], ioaddr[0] + 1);
outb(out[3], ioaddr[0] + 2);
outb(out[4], ioaddr[0] + 3);
outb(out[5], ioaddr[0] + 4);
outb(out[1], ioaddr[0] + 5);
outb(0xa0, ioaddr[0] + 6);
outb(out[0], ioaddr[0] + 7);
if (out[0] != 0x20 && out[0] != 0x30) {
if ((status = card_wait_for_ready(ioaddr, name, in)))
return status;
inb(ioaddr[0] + 7);
if (sb1000_debug > 3)
printk(KERN_DEBUG "%s: card_send_command "
"out: %02x%02x%02x%02x%02x%02x "
"in: %02x%02x%02x%02x%02x%02x%02x\n", name,
out[0], out[1], out[2], out[3], out[4], out[5],
in[0], in[1], in[2], in[3], in[4], in[5], in[6]);
} else {
if (sb1000_debug > 3)
printk(KERN_DEBUG "%s: card_send_command "
"out: %02x%02x%02x%02x%02x%02x\n", name,
out[0], out[1], out[2], out[3], out[4], out[5]);
}
if (out[1] == 0x1b) {
x = (out[2] == 0x02);
} else {
if (out[0] >= 0x80 && in[0] != (out[1] | 0x80))
return -EIO;
}
return 0;
}
/*
* SB1000 hardware routines to be used during frame rx interrupt
*/
static const int Sb1000TimeOutJiffies = 7 * HZ;
/* Card Wait For Ready (to be used during frame rx) */
static int
sb1000_wait_for_ready(const int ioaddr[], const char* name)
{
unsigned long timeout;
timeout = jiffies + Sb1000TimeOutJiffies;
while (inb(ioaddr[1] + 6) & 0x80) {
if (time_after_eq(jiffies, timeout)) {
printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n",
name);
return -ETIME;
}
}
timeout = jiffies + Sb1000TimeOutJiffies;
while (!(inb(ioaddr[1] + 6) & 0x40)) {
if (time_after_eq(jiffies, timeout)) {
printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n",
name);
return -ETIME;
}
}
inb(ioaddr[0] + 7);
return 0;
}
/* Card Wait For Ready Clear (to be used during frame rx) */
static int
sb1000_wait_for_ready_clear(const int ioaddr[], const char* name)
{
unsigned long timeout;
timeout = jiffies + Sb1000TimeOutJiffies;
while (inb(ioaddr[1] + 6) & 0x80) {
if (time_after_eq(jiffies, timeout)) {
printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n",
name);
return -ETIME;
}
}
timeout = jiffies + Sb1000TimeOutJiffies;
while (inb(ioaddr[1] + 6) & 0x40) {
if (time_after_eq(jiffies, timeout)) {
printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n",
name);
return -ETIME;
}
}
return 0;
}
/* Card Send Command (to be used during frame rx) */
static void
sb1000_send_command(const int ioaddr[], const char* name,
const unsigned char out[])
{
outb(out[2], ioaddr[0] + 1);
outb(out[3], ioaddr[0] + 2);
outb(out[4], ioaddr[0] + 3);
outb(out[5], ioaddr[0] + 4);
outb(out[1], ioaddr[0] + 5);
outb(out[0], ioaddr[0] + 7);
if (sb1000_debug > 3)
printk(KERN_DEBUG "%s: sb1000_send_command out: %02x%02x%02x%02x"
"%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5]);
}
/* Card Read Status (to be used during frame rx) */
static void
sb1000_read_status(const int ioaddr[], unsigned char in[])
{
in[1] = inb(ioaddr[0] + 1);
in[2] = inb(ioaddr[0] + 2);
in[3] = inb(ioaddr[0] + 3);
in[4] = inb(ioaddr[0] + 4);
in[0] = inb(ioaddr[0] + 5);
}
/* Issue Read Command (to be used during frame rx) */
static void
sb1000_issue_read_command(const int ioaddr[], const char* name)
{
static const unsigned char Command0[6] = {0x20, 0x00, 0x00, 0x01, 0x00, 0x00};
sb1000_wait_for_ready_clear(ioaddr, name);
outb(0xa0, ioaddr[0] + 6);
sb1000_send_command(ioaddr, name, Command0);
}
/*
* SB1000 commands for open/configuration
*/
/* reset SB1000 card */
static int
sb1000_reset(const int ioaddr[], const char* name)
{
static const unsigned char Command0[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
unsigned char st[7];
int port, status;
port = ioaddr[1] + 6;
outb(0x4, port);
inb(port);
udelay(1000);
outb(0x0, port);
inb(port);
ssleep(1);
outb(0x4, port);
inb(port);
udelay(1000);
outb(0x0, port);
inb(port);
udelay(0);
if ((status = card_send_command(ioaddr, name, Command0, st)))
return status;
if (st[3] != 0xf0)
return -EIO;
return 0;
}
/* check SB1000 firmware CRC */
static int
sb1000_check_CRC(const int ioaddr[], const char* name)
{
static const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00};
unsigned char st[7];
int crc, status;
/* check CRC */
if ((status = card_send_command(ioaddr, name, Command0, st)))
return status;
if (st[1] != st[3] || st[2] != st[4])
return -EIO;
crc = st[1] << 8 | st[2];
return 0;
}
static inline int
sb1000_start_get_set_command(const int ioaddr[], const char* name)
{
static const unsigned char Command0[6] = {0x80, 0x1b, 0x00, 0x00, 0x00, 0x00};
unsigned char st[7];
return card_send_command(ioaddr, name, Command0, st);
}
static int
sb1000_end_get_set_command(const int ioaddr[], const char* name)
{
static const unsigned char Command0[6] = {0x80, 0x1b, 0x02, 0x00, 0x00, 0x00};
static const unsigned char Command1[6] = {0x20, 0x00, 0x00, 0x00, 0x00, 0x00};
unsigned char st[7];
int status;
if ((status = card_send_command(ioaddr, name, Command0, st)))
return status;
return card_send_command(ioaddr, name, Command1, st);
}
static int
sb1000_activate(const int ioaddr[], const char* name)
{
static const unsigned char Command0[6] = {0x80, 0x11, 0x00, 0x00, 0x00, 0x00};
static const unsigned char Command1[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
unsigned char st[7];
int status;
ssleep(1);
if ((status = card_send_command(ioaddr, name, Command0, st)))
return status;
if ((status = card_send_command(ioaddr, name, Command1, st)))
return status;
if (st[3] != 0xf1) {
if ((status = sb1000_start_get_set_command(ioaddr, name)))
return status;
return -EIO;
}
udelay(1000);
return sb1000_start_get_set_command(ioaddr, name);
}
/* get SB1000 firmware version */
static int
sb1000_get_firmware_version(const int ioaddr[], const char* name,
unsigned char version[], int do_end)
{
static const unsigned char Command0[6] = {0x80, 0x23, 0x00, 0x00, 0x00, 0x00};
unsigned char st[7];
int status;
if ((status = sb1000_start_get_set_command(ioaddr, name)))
return status;
if ((status = card_send_command(ioaddr, name, Command0, st)))
return status;
if (st[0] != 0xa3)
return -EIO;
version[0] = st[1];
version[1] = st[2];
if (do_end)
return sb1000_end_get_set_command(ioaddr, name);
else
return 0;
}
/* get SB1000 frequency */
static int
sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency)
{
static const unsigned char Command0[6] = {0x80, 0x44, 0x00, 0x00, 0x00, 0x00};
unsigned char st[7];
int status;
udelay(1000);
if ((status = sb1000_start_get_set_command(ioaddr, name)))
return status;
if ((status = card_send_command(ioaddr, name, Command0, st)))
return status;
*frequency = ((st[1] << 8 | st[2]) << 8 | st[3]) << 8 | st[4];
return sb1000_end_get_set_command(ioaddr, name);
}
/* set SB1000 frequency */
static int
sb1000_set_frequency(const int ioaddr[], const char* name, int frequency)
{
unsigned char st[7];
int status;
unsigned char Command0[6] = {0x80, 0x29, 0x00, 0x00, 0x00, 0x00};
const int FrequencyLowerLimit = 57000;
const int FrequencyUpperLimit = 804000;
if (frequency < FrequencyLowerLimit || frequency > FrequencyUpperLimit) {
printk(KERN_ERR "%s: frequency chosen (%d kHz) is not in the range "
"[%d,%d] kHz\n", name, frequency, FrequencyLowerLimit,
FrequencyUpperLimit);
return -EINVAL;
}
udelay(1000);
if ((status = sb1000_start_get_set_command(ioaddr, name)))
return status;
Command0[5] = frequency & 0xff;
frequency >>= 8;
Command0[4] = frequency & 0xff;
frequency >>= 8;
Command0[3] = frequency & 0xff;
frequency >>= 8;
Command0[2] = frequency & 0xff;
return card_send_command(ioaddr, name, Command0, st);
}
/* get SB1000 PIDs */
static int
sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[])
{
static const unsigned char Command0[6] = {0x80, 0x40, 0x00, 0x00, 0x00, 0x00};
static const unsigned char Command1[6] = {0x80, 0x41, 0x00, 0x00, 0x00, 0x00};
static const unsigned char Command2[6] = {0x80, 0x42, 0x00, 0x00, 0x00, 0x00};
static const unsigned char Command3[6] = {0x80, 0x43, 0x00, 0x00, 0x00, 0x00};
unsigned char st[7];
int status;
udelay(1000);
if ((status = sb1000_start_get_set_command(ioaddr, name)))
return status;
if ((status = card_send_command(ioaddr, name, Command0, st)))
return status;
PID[0] = st[1] << 8 | st[2];
if ((status = card_send_command(ioaddr, name, Command1, st)))
return status;
PID[1] = st[1] << 8 | st[2];
if ((status = card_send_command(ioaddr, name, Command2, st)))
return status;
PID[2] = st[1] << 8 | st[2];
if ((status = card_send_command(ioaddr, name, Command3, st)))
return status;
PID[3] = st[1] << 8 | st[2];
return sb1000_end_get_set_command(ioaddr, name);
}
/* set SB1000 PIDs */
static int
sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[])
{
static const unsigned char Command4[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
unsigned char st[7];
short p;
int status;
unsigned char Command0[6] = {0x80, 0x31, 0x00, 0x00, 0x00, 0x00};
unsigned char Command1[6] = {0x80, 0x32, 0x00, 0x00, 0x00, 0x00};
unsigned char Command2[6] = {0x80, 0x33, 0x00, 0x00, 0x00, 0x00};
unsigned char Command3[6] = {0x80, 0x34, 0x00, 0x00, 0x00, 0x00};
udelay(1000);
if ((status = sb1000_start_get_set_command(ioaddr, name)))
return status;
p = PID[0];
Command0[3] = p & 0xff;
p >>= 8;
Command0[2] = p & 0xff;
if ((status = card_send_command(ioaddr, name, Command0, st)))
return status;
p = PID[1];
Command1[3] = p & 0xff;
p >>= 8;
Command1[2] = p & 0xff;
if ((status = card_send_command(ioaddr, name, Command1, st)))
return status;
p = PID[2];
Command2[3] = p & 0xff;
p >>= 8;
Command2[2] = p & 0xff;
if ((status = card_send_command(ioaddr, name, Command2, st)))
return status;
p = PID[3];
Command3[3] = p & 0xff;
p >>= 8;
Command3[2] = p & 0xff;
if ((status = card_send_command(ioaddr, name, Command3, st)))
return status;
if ((status = card_send_command(ioaddr, name, Command4, st)))
return status;
return sb1000_end_get_set_command(ioaddr, name);
}
static void
sb1000_print_status_buffer(const char* name, unsigned char st[],
unsigned char buffer[], int size)
{
int i, j, k;
printk(KERN_DEBUG "%s: status: %02x %02x\n", name, st[0], st[1]);
if (buffer[24] == 0x08 && buffer[25] == 0x00 && buffer[26] == 0x45) {
printk(KERN_DEBUG "%s: length: %d protocol: %d from: %d.%d.%d.%d:%d "
"to %d.%d.%d.%d:%d\n", name, buffer[28] << 8 | buffer[29],
buffer[35], buffer[38], buffer[39], buffer[40], buffer[41],
buffer[46] << 8 | buffer[47],
buffer[42], buffer[43], buffer[44], buffer[45],
buffer[48] << 8 | buffer[49]);
} else {
for (i = 0, k = 0; i < (size + 7) / 8; i++) {
printk(KERN_DEBUG "%s: %s", name, i ? " " : "buffer:");
for (j = 0; j < 8 && k < size; j++, k++)
printk(" %02x", buffer[k]);
printk("\n");
}
}
}
/*
* SB1000 commands for frame rx interrupt
*/
/* receive a single frame and assemble datagram
* (this is the heart of the interrupt routine)
*/
static int
sb1000_rx(struct net_device *dev)
{
#define FRAMESIZE 184
unsigned char st[2], buffer[FRAMESIZE], session_id, frame_id;
short dlen;
int ioaddr, ns;
unsigned int skbsize;
struct sk_buff *skb;
struct sb1000_private *lp = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
/* SB1000 frame constants */
const int FrameSize = FRAMESIZE;
const int NewDatagramHeaderSkip = 8;
const int NewDatagramHeaderSize = NewDatagramHeaderSkip + 18;
const int NewDatagramDataSize = FrameSize - NewDatagramHeaderSize;
const int ContDatagramHeaderSkip = 7;
const int ContDatagramHeaderSize = ContDatagramHeaderSkip + 1;
const int ContDatagramDataSize = FrameSize - ContDatagramHeaderSize;
const int TrailerSize = 4;
ioaddr = dev->base_addr;
insw(ioaddr, (unsigned short*) st, 1);
#ifdef XXXDEBUG
printk("cm0: received: %02x %02x\n", st[0], st[1]);
#endif /* XXXDEBUG */
lp->rx_frames++;
/* decide if it is a good or bad frame */
for (ns = 0; ns < NPIDS; ns++) {
session_id = lp->rx_session_id[ns];
frame_id = lp->rx_frame_id[ns];
if (st[0] == session_id) {
if (st[1] == frame_id || (!frame_id && (st[1] & 0xf0) == 0x30)) {
goto good_frame;
} else if ((st[1] & 0xf0) == 0x30 && (st[0] & 0x40)) {
goto skipped_frame;
} else {
goto bad_frame;
}
} else if (st[0] == (session_id | 0x40)) {
if ((st[1] & 0xf0) == 0x30) {
goto skipped_frame;
} else {
goto bad_frame;
}
}
}
goto bad_frame;
skipped_frame:
stats->rx_frame_errors++;
skb = lp->rx_skb[ns];
if (sb1000_debug > 1)
printk(KERN_WARNING "%s: missing frame(s): got %02x %02x "
"expecting %02x %02x\n", dev->name, st[0], st[1],
skb ? session_id : session_id | 0x40, frame_id);
if (skb) {
dev_kfree_skb(skb);
skb = NULL;
}
good_frame:
lp->rx_frame_id[ns] = 0x30 | ((st[1] + 1) & 0x0f);
/* new datagram */
if (st[0] & 0x40) {
/* get data length */
insw(ioaddr, buffer, NewDatagramHeaderSize / 2);
#ifdef XXXDEBUG
printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[30], buffer[31], buffer[32], buffer[33]);
#endif /* XXXDEBUG */
if (buffer[0] != NewDatagramHeaderSkip) {
if (sb1000_debug > 1)
printk(KERN_WARNING "%s: new datagram header skip error: "
"got %02x expecting %02x\n", dev->name, buffer[0],
NewDatagramHeaderSkip);
stats->rx_length_errors++;
insw(ioaddr, buffer, NewDatagramDataSize / 2);
goto bad_frame_next;
}
dlen = ((buffer[NewDatagramHeaderSkip + 3] & 0x0f) << 8 |
buffer[NewDatagramHeaderSkip + 4]) - 17;
if (dlen > SB1000_MRU) {
if (sb1000_debug > 1)
printk(KERN_WARNING "%s: datagram length (%d) greater "
"than MRU (%d)\n", dev->name, dlen, SB1000_MRU);
stats->rx_length_errors++;
insw(ioaddr, buffer, NewDatagramDataSize / 2);
goto bad_frame_next;
}
lp->rx_dlen[ns] = dlen;
/* compute size to allocate for datagram */
skbsize = dlen + FrameSize;
if ((skb = alloc_skb(skbsize, GFP_ATOMIC)) == NULL) {
if (sb1000_debug > 1)
printk(KERN_WARNING "%s: can't allocate %d bytes long "
"skbuff\n", dev->name, skbsize);
stats->rx_dropped++;
insw(ioaddr, buffer, NewDatagramDataSize / 2);
goto dropped_frame;
}
skb->dev = dev;
skb_reset_mac_header(skb);
skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16];
insw(ioaddr, skb_put(skb, NewDatagramDataSize),
NewDatagramDataSize / 2);
lp->rx_skb[ns] = skb;
} else {
/* continuation of previous datagram */
insw(ioaddr, buffer, ContDatagramHeaderSize / 2);
if (buffer[0] != ContDatagramHeaderSkip) {
if (sb1000_debug > 1)
printk(KERN_WARNING "%s: cont datagram header skip error: "
"got %02x expecting %02x\n", dev->name, buffer[0],
ContDatagramHeaderSkip);
stats->rx_length_errors++;
insw(ioaddr, buffer, ContDatagramDataSize / 2);
goto bad_frame_next;
}
skb = lp->rx_skb[ns];
insw(ioaddr, skb_put(skb, ContDatagramDataSize),
ContDatagramDataSize / 2);
dlen = lp->rx_dlen[ns];
}
if (skb->len < dlen + TrailerSize) {
lp->rx_session_id[ns] &= ~0x40;
return 0;
}
/* datagram completed: send to upper level */
skb_trim(skb, dlen);
netif_rx(skb);
stats->rx_bytes+=dlen;
stats->rx_packets++;
lp->rx_skb[ns] = NULL;
lp->rx_session_id[ns] |= 0x40;
return 0;
bad_frame:
insw(ioaddr, buffer, FrameSize / 2);
if (sb1000_debug > 1)
printk(KERN_WARNING "%s: frame error: got %02x %02x\n",
dev->name, st[0], st[1]);
stats->rx_frame_errors++;
bad_frame_next:
if (sb1000_debug > 2)
sb1000_print_status_buffer(dev->name, st, buffer, FrameSize);
dropped_frame:
stats->rx_errors++;
if (ns < NPIDS) {
if ((skb = lp->rx_skb[ns])) {
dev_kfree_skb(skb);
lp->rx_skb[ns] = NULL;
}
lp->rx_session_id[ns] |= 0x40;
}
return -1;
}
static void
sb1000_error_dpc(struct net_device *dev)
{
static const unsigned char Command0[6] = {0x80, 0x26, 0x00, 0x00, 0x00, 0x00};
char *name;
unsigned char st[5];
int ioaddr[2];
struct sb1000_private *lp = netdev_priv(dev);
const int ErrorDpcCounterInitialize = 200;
ioaddr[0] = dev->base_addr;
/* mem_start holds the second I/O address */
ioaddr[1] = dev->mem_start;
name = dev->name;
sb1000_wait_for_ready_clear(ioaddr, name);
sb1000_send_command(ioaddr, name, Command0);
sb1000_wait_for_ready(ioaddr, name);
sb1000_read_status(ioaddr, st);
if (st[1] & 0x10)
lp->rx_error_dpc_count = ErrorDpcCounterInitialize;
}
/*
* Linux interface functions
*/
static int
sb1000_open(struct net_device *dev)
{
char *name;
int ioaddr[2], status;
struct sb1000_private *lp = netdev_priv(dev);
const unsigned short FirmwareVersion[] = {0x01, 0x01};
ioaddr[0] = dev->base_addr;
/* mem_start holds the second I/O address */
ioaddr[1] = dev->mem_start;
name = dev->name;
/* initialize sb1000 */
if ((status = sb1000_reset(ioaddr, name)))
return status;
ssleep(1);
if ((status = sb1000_check_CRC(ioaddr, name)))
return status;
/* initialize private data before board can catch interrupts */
lp->rx_skb[0] = NULL;
lp->rx_skb[1] = NULL;
lp->rx_skb[2] = NULL;
lp->rx_skb[3] = NULL;
lp->rx_dlen[0] = 0;
lp->rx_dlen[1] = 0;
lp->rx_dlen[2] = 0;
lp->rx_dlen[3] = 0;
lp->rx_frames = 0;
lp->rx_error_count = 0;
lp->rx_error_dpc_count = 0;
lp->rx_session_id[0] = 0x50;
lp->rx_session_id[1] = 0x48;
lp->rx_session_id[2] = 0x44;
lp->rx_session_id[3] = 0x42;
lp->rx_frame_id[0] = 0;
lp->rx_frame_id[1] = 0;
lp->rx_frame_id[2] = 0;
lp->rx_frame_id[3] = 0;
if (request_irq(dev->irq, sb1000_interrupt, 0, "sb1000", dev)) {
return -EAGAIN;
}
if (sb1000_debug > 2)
printk(KERN_DEBUG "%s: Opening, IRQ %d\n", name, dev->irq);
/* Activate board and check firmware version */
udelay(1000);
if ((status = sb1000_activate(ioaddr, name)))
return status;
udelay(0);
if ((status = sb1000_get_firmware_version(ioaddr, name, version, 0)))
return status;
if (version[0] != FirmwareVersion[0] || version[1] != FirmwareVersion[1])
printk(KERN_WARNING "%s: found firmware version %x.%02x "
"(should be %x.%02x)\n", name, version[0], version[1],
FirmwareVersion[0], FirmwareVersion[1]);
netif_start_queue(dev);
return 0; /* Always succeed */
}
static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
char* name;
unsigned char version[2];
short PID[4];
int ioaddr[2], status, frequency;
unsigned int stats[5];
struct sb1000_private *lp = netdev_priv(dev);
if (!(dev && dev->flags & IFF_UP))
return -ENODEV;
ioaddr[0] = dev->base_addr;
/* mem_start holds the second I/O address */
ioaddr[1] = dev->mem_start;
name = dev->name;
switch (cmd) {
case SIOCGCMSTATS: /* get statistics */
stats[0] = dev->stats.rx_bytes;
stats[1] = lp->rx_frames;
stats[2] = dev->stats.rx_packets;
stats[3] = dev->stats.rx_errors;
stats[4] = dev->stats.rx_dropped;
if(copy_to_user(ifr->ifr_data, stats, sizeof(stats)))
return -EFAULT;
status = 0;
break;
case SIOCGCMFIRMWARE: /* get firmware version */
if ((status = sb1000_get_firmware_version(ioaddr, name, version, 1)))
return status;
if(copy_to_user(ifr->ifr_data, version, sizeof(version)))
return -EFAULT;
break;
case SIOCGCMFREQUENCY: /* get frequency */
if ((status = sb1000_get_frequency(ioaddr, name, &frequency)))
return status;
if(put_user(frequency, (int __user *) ifr->ifr_data))
return -EFAULT;
break;
case SIOCSCMFREQUENCY: /* set frequency */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if(get_user(frequency, (int __user *) ifr->ifr_data))
return -EFAULT;
if ((status = sb1000_set_frequency(ioaddr, name, frequency)))
return status;
break;
case SIOCGCMPIDS: /* get PIDs */
if ((status = sb1000_get_PIDs(ioaddr, name, PID)))
return status;
if(copy_to_user(ifr->ifr_data, PID, sizeof(PID)))
return -EFAULT;
break;
case SIOCSCMPIDS: /* set PIDs */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if(copy_from_user(PID, ifr->ifr_data, sizeof(PID)))
return -EFAULT;
if ((status = sb1000_set_PIDs(ioaddr, name, PID)))
return status;
/* set session_id, frame_id and pkt_type too */
lp->rx_session_id[0] = 0x50 | (PID[0] & 0x0f);
lp->rx_session_id[1] = 0x48;
lp->rx_session_id[2] = 0x44;
lp->rx_session_id[3] = 0x42;
lp->rx_frame_id[0] = 0;
lp->rx_frame_id[1] = 0;
lp->rx_frame_id[2] = 0;
lp->rx_frame_id[3] = 0;
break;
default:
status = -EINVAL;
break;
}
return status;
}
/* transmit function: do nothing since SB1000 can't send anything out */
static netdev_tx_t
sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
printk(KERN_WARNING "%s: trying to transmit!!!\n", dev->name);
/* sb1000 can't xmit datagrams */
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/* SB1000 interrupt handler. */
static irqreturn_t sb1000_interrupt(int irq, void *dev_id)
{
static const unsigned char Command0[6] = {0x80, 0x2c, 0x00, 0x00, 0x00, 0x00};
static const unsigned char Command1[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
char *name;
unsigned char st;
int ioaddr[2];
struct net_device *dev = dev_id;
struct sb1000_private *lp = netdev_priv(dev);
const int MaxRxErrorCount = 6;
ioaddr[0] = dev->base_addr;
/* mem_start holds the second I/O address */
ioaddr[1] = dev->mem_start;
name = dev->name;
/* is it a good interrupt? */
st = inb(ioaddr[1] + 6);
if (!(st & 0x08 && st & 0x20)) {
return IRQ_NONE;
}
if (sb1000_debug > 3)
printk(KERN_DEBUG "%s: entering interrupt\n", dev->name);
st = inb(ioaddr[0] + 7);
if (sb1000_rx(dev))
lp->rx_error_count++;
#ifdef SB1000_DELAY
udelay(SB1000_DELAY);
#endif /* SB1000_DELAY */
sb1000_issue_read_command(ioaddr, name);
if (st & 0x01) {
sb1000_error_dpc(dev);
sb1000_issue_read_command(ioaddr, name);
}
if (lp->rx_error_dpc_count && !(--lp->rx_error_dpc_count)) {
sb1000_wait_for_ready_clear(ioaddr, name);
sb1000_send_command(ioaddr, name, Command0);
sb1000_wait_for_ready(ioaddr, name);
sb1000_issue_read_command(ioaddr, name);
}
if (lp->rx_error_count >= MaxRxErrorCount) {
sb1000_wait_for_ready_clear(ioaddr, name);
sb1000_send_command(ioaddr, name, Command1);
sb1000_wait_for_ready(ioaddr, name);
sb1000_issue_read_command(ioaddr, name);
lp->rx_error_count = 0;
}
return IRQ_HANDLED;
}
static int sb1000_close(struct net_device *dev)
{
int i;
int ioaddr[2];
struct sb1000_private *lp = netdev_priv(dev);
if (sb1000_debug > 2)
printk(KERN_DEBUG "%s: Shutting down sb1000.\n", dev->name);
netif_stop_queue(dev);
ioaddr[0] = dev->base_addr;
/* mem_start holds the second I/O address */
ioaddr[1] = dev->mem_start;
free_irq(dev->irq, dev);
/* If we don't do this, we can't re-insmod it later. */
release_region(ioaddr[1], SB1000_IO_EXTENT);
release_region(ioaddr[0], SB1000_IO_EXTENT);
/* free rx_skb's if needed */
for (i=0; i<4; i++) {
if (lp->rx_skb[i]) {
dev_kfree_skb(lp->rx_skb[i]);
}
}
return 0;
}
MODULE_AUTHOR("Franco Venturi <fventuri@mediaone.net>");
MODULE_DESCRIPTION("General Instruments SB1000 driver");
MODULE_LICENSE("GPL");
static int __init
sb1000_init(void)
{
return pnp_register_driver(&sb1000_driver);
}
static void __exit
sb1000_exit(void)
{
pnp_unregister_driver(&sb1000_driver);
}
module_init(sb1000_init);
module_exit(sb1000_exit);
| gpl-2.0 |
sub77/kernel_samsung_matisse | arch/arm/mach-msm/board-sprd6500-spi.c | 254 | 3252 | /* arch/arm/mach-capri/board-baffin-spi.c
*
* Copyright (C) 2011 Samsung Electronics Co, Ltd.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/spi/spi.h>
//#include <mach/msm8930-gpio.h>
#include <linux/platform_device.h>
#include <mach/gpiomux.h>
/* These gpios are defined in msm8974pro-mif_kctc-r01.dtsi */
/*
#define GPIO_IPC_MRDY 105
#define GPIO_IPC_SUB_MRDY 106
#define GPIO_IPC_SRDY 117
#define GPIO_IPC_SUB_SRDY 104
#define GPIO_CP_DUMP_INT 119
*/
struct ipc_spi_platform_data {
const char *name;
unsigned gpio_ipc_mrdy;
unsigned gpio_ipc_srdy;
unsigned gpio_ipc_sub_mrdy;
unsigned gpio_ipc_sub_srdy;
unsigned gpio_cp_dump_int;
void (*cfg_gpio)(void);
};
static struct ipc_spi_platform_data spi_modem_data;
static struct platform_device spi_modem = {
.name = "if_spi_platform_driver",
.id = -1,
.dev = {
.platform_data = &spi_modem_data,
},
};
#if 0
void spi_modem_cfg_gpio(void)
{
int ret;
ret=gpio_request(GPIO_IPC_MRDY, "GPIO_IPC_MRDY");
if (ret) {
pr_err("gspi_modem_cfg: gpio_request "
"failed for %d\n",GPIO_IPC_MRDY);
;
}
gpio_tlmm_config(GPIO_CFG(GPIO_IPC_MRDY, GPIOMUX_FUNC_GPIO,
GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA),
GPIO_CFG_ENABLE);
gpio_set_value(GPIO_IPC_MRDY, 0);
ret= gpio_request(GPIO_IPC_SUB_MRDY, "GPIO_IPC_SUB_MRDY");
if (ret) {
pr_err("spi_modem_cfg: gpio_request "
"failed for %d\n",GPIO_IPC_SUB_MRDY);
;
}
gpio_tlmm_config(GPIO_CFG(GPIO_IPC_SUB_MRDY, GPIOMUX_FUNC_GPIO,
GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA),
GPIO_CFG_ENABLE);
gpio_set_value(GPIO_IPC_SUB_MRDY, 0);
ret=gpio_request(GPIO_IPC_SRDY, "GPIO_IPC_SRDY");
if (ret) {
pr_err("spi_modem_cfg_: gpio_request "
"failed for %d\n",GPIO_IPC_SRDY);
;
}
gpio_tlmm_config(GPIO_CFG(GPIO_IPC_SRDY, GPIOMUX_FUNC_GPIO,
GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_16MA),
GPIO_CFG_ENABLE);
ret=gpio_request(GPIO_IPC_SUB_SRDY, "GPIO_IPC_SUB_SRDY");
if (ret) {
pr_err("spi_modem_cfg: gpio_request "
"failed for %d\n",GPIO_IPC_SUB_SRDY);
;
}
gpio_tlmm_config(GPIO_CFG(GPIO_IPC_SUB_SRDY, GPIOMUX_FUNC_GPIO,
GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_16MA),
GPIO_CFG_ENABLE);
spi_modem_data.gpio_ipc_mrdy = GPIO_IPC_MRDY;
spi_modem_data.gpio_ipc_srdy = GPIO_IPC_SRDY;
spi_modem_data.gpio_ipc_sub_mrdy = GPIO_IPC_SUB_MRDY;
spi_modem_data.gpio_ipc_sub_srdy = GPIO_IPC_SUB_SRDY;
spi_modem_data.gpio_cp_dump_int = GPIO_CP_DUMP_INT;
pr_info("[SPI] %s done\n", __func__);
}
#endif
static int __init init_spi(void)
{
pr_info("[SPI] %s\n", __func__);
//spi_modem_cfg_gpio();
platform_device_register(&spi_modem);
/*
spi_register_board_info(modem_if_spi_device,
ARRAY_SIZE(modem_if_spi_device));
*/
return 0;
}
module_init(init_spi);
| gpl-2.0 |
qnhoang81/Intercept_Kernel | drivers/staging/otus/wrap_pkt.c | 510 | 4797 | /*
* Copyright (c) 2007-2008 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* */
/* Module Name : wrap_pkt.c */
/* */
/* Abstract */
/* This module contains wrapper functions for packet handling */
/* */
/* NOTES */
/* Platform dependent. */
/* */
/************************************************************************/
#include "oal_dt.h"
#include "usbdrv.h"
#include <linux/netlink.h>
#include <net/iw_handler.h>
//extern struct zsWdsStruct wds[ZM_WDS_PORT_NUMBER];
extern struct zsVapStruct vap[ZM_VAP_PORT_NUMBER];
/***** Rx *****/
void zfLnxRecv80211(zdev_t* dev, zbuf_t* buf, struct zsAdditionInfo* addInfo)
{
u16_t frameType;
u16_t frameCtrl;
u16_t frameSubtype;
zbuf_t *skb1;
struct usbdrv_private *macp = dev->ml_priv;
//frameCtrl = zmw_buf_readb(dev, buf, 0);
frameCtrl = *(u8_t*)((u8_t*)buf->data);
frameType = frameCtrl & 0xf;
frameSubtype = frameCtrl & 0xf0;
if ((frameType == 0x0) && (macp->forwardMgmt))
{
switch (frameSubtype)
{
/* Beacon */
case 0x80 :
/* Probe response */
case 0x50 :
skb1 = skb_copy(buf, GFP_ATOMIC);
if(skb1 != NULL)
{
skb1->dev = dev;
skb1->mac_header = skb1->data;
skb1->ip_summed = CHECKSUM_NONE;
skb1->pkt_type = PACKET_OTHERHOST;
skb1->protocol = __constant_htons(0x0019); /* ETH_P_80211_RAW */
netif_rx(skb1);
}
break;
default:
break;
}
}
zfiRecv80211(dev, buf, addInfo);
return;
}
#define ZM_AVOID_UDP_LARGE_PACKET_FAIL
void zfLnxRecvEth(zdev_t* dev, zbuf_t* buf, u16_t port)
{
struct usbdrv_private *macp = dev->ml_priv;
#ifdef ZM_AVOID_UDP_LARGE_PACKET_FAIL
zbuf_t *new_buf;
//new_buf = dev_alloc_skb(2048);
new_buf = dev_alloc_skb(buf->len);
#ifdef NET_SKBUFF_DATA_USES_OFFSET
new_buf->tail = 0;
new_buf->len = 0;
#else
new_buf->tail = new_buf->data;
new_buf->len = 0;
#endif
skb_put(new_buf, buf->len);
memcpy(new_buf->data, buf->data, buf->len);
/* Free buffer */
dev_kfree_skb_any(buf);
if (port == 0)
{
new_buf->dev = dev;
new_buf->protocol = eth_type_trans(new_buf, dev);
}
else
{
/* VAP */
if (vap[0].dev != NULL)
{
new_buf->dev = vap[0].dev;
new_buf->protocol = eth_type_trans(new_buf, vap[0].dev);
}
else
{
new_buf->dev = dev;
new_buf->protocol = eth_type_trans(new_buf, dev);
}
}
new_buf->ip_summed = CHECKSUM_NONE;
dev->last_rx = jiffies;
switch(netif_rx(new_buf))
#else
if (port == 0)
{
buf->dev = dev;
buf->protocol = eth_type_trans(buf, dev);
}
else
{
/* VAP */
if (vap[0].dev != NULL)
{
buf->dev = vap[0].dev;
buf->protocol = eth_type_trans(buf, vap[0].dev);
}
else
{
buf->dev = dev;
buf->protocol = eth_type_trans(buf, dev);
}
}
buf->ip_summed = CHECKSUM_NONE;
dev->last_rx = jiffies;
switch(netif_rx(buf))
#endif
{
case NET_RX_DROP:
break;
default:
macp->drv_stats.net_stats.rx_packets++;
macp->drv_stats.net_stats.rx_bytes += buf->len;
break;
}
return;
}
/* Leave an empty line below to remove warning message on some compiler */
| gpl-2.0 |
omnirom/android_kernel_asus_tegra3 | arch/arm/kernel/kprobes-arm.c | 510 | 37104 | /*
* arch/arm/kernel/kprobes-decode.c
*
* Copyright (C) 2006, 2007 Motorola Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* We do not have hardware single-stepping on ARM, This
* effort is further complicated by the ARM not having a
* "next PC" register. Instructions that change the PC
* can't be safely single-stepped in a MP environment, so
* we have a lot of work to do:
*
* In the prepare phase:
* *) If it is an instruction that does anything
* with the CPU mode, we reject it for a kprobe.
* (This is out of laziness rather than need. The
* instructions could be simulated.)
*
* *) Otherwise, decode the instruction rewriting its
* registers to take fixed, ordered registers and
* setting a handler for it to run the instruction.
*
* In the execution phase by an instruction's handler:
*
* *) If the PC is written to by the instruction, the
* instruction must be fully simulated in software.
*
* *) Otherwise, a modified form of the instruction is
* directly executed. Its handler calls the
* instruction in insn[0]. In insn[1] is a
* "mov pc, lr" to return.
*
* Before calling, load up the reordered registers
* from the original instruction's registers. If one
* of the original input registers is the PC, compute
* and adjust the appropriate input register.
*
* After call completes, copy the output registers to
* the original instruction's original registers.
*
* We don't use a real breakpoint instruction since that
* would have us in the kernel go from SVC mode to SVC
* mode losing the link register. Instead we use an
* undefined instruction. To simplify processing, the
* undefined instruction used for kprobes must be reserved
* exclusively for kprobes use.
*
* TODO: ifdef out some instruction decoding based on architecture.
*/
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include "kprobes.h"
#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit)))))
#define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25)
#if __LINUX_ARM_ARCH__ >= 6
#define BLX(reg) "blx "reg" \n\t"
#else
#define BLX(reg) "mov lr, pc \n\t" \
"mov pc, "reg" \n\t"
#endif
/*
* To avoid the complications of mimicing single-stepping on a
* processor without a Next-PC or a single-step mode, and to
* avoid having to deal with the side-effects of boosting, we
* simulate or emulate (almost) all ARM instructions.
*
* "Simulation" is where the instruction's behavior is duplicated in
* C code. "Emulation" is where the original instruction is rewritten
* and executed, often by altering its registers.
*
* By having all behavior of the kprobe'd instruction completed before
* returning from the kprobe_handler(), all locks (scheduler and
* interrupt) can safely be released. There is no need for secondary
* breakpoints, no race with MP or preemptable kernels, nor having to
* clean up resources counts at a later time impacting overall system
* performance. By rewriting the instruction, only the minimum registers
* need to be loaded and saved back optimizing performance.
*
* Calling the insnslot_*_rwflags version of a function doesn't hurt
* anything even when the CPSR flags aren't updated by the
* instruction. It's just a little slower in return for saving
* a little space by not having a duplicate function that doesn't
* update the flags. (The same optimization can be said for
* instructions that do or don't perform register writeback)
* Also, instructions can either read the flags, only write the
* flags, or read and write the flags. To save combinations
* rather than for sheer performance, flag functions just assume
* read and write of flags.
*/
static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
long iaddr = (long)p->addr;
int disp = branch_displacement(insn);
if (insn & (1 << 24))
regs->ARM_lr = iaddr + 4;
regs->ARM_pc = iaddr + 8 + disp;
}
static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
long iaddr = (long)p->addr;
int disp = branch_displacement(insn);
regs->ARM_lr = iaddr + 4;
regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2);
regs->ARM_cpsr |= PSR_T_BIT;
}
static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
int rm = insn & 0xf;
long rmv = regs->uregs[rm];
if (insn & (1 << 5))
regs->ARM_lr = (long)p->addr + 4;
regs->ARM_pc = rmv & ~0x1;
regs->ARM_cpsr &= ~PSR_T_BIT;
if (rmv & 0x1)
regs->ARM_cpsr |= PSR_T_BIT;
}
static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
int rd = (insn >> 12) & 0xf;
unsigned long mask = 0xf8ff03df; /* Mask out execution state */
regs->uregs[rd] = regs->ARM_cpsr & mask;
}
static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs)
{
regs->uregs[12] = regs->uregs[13];
}
static void __kprobes
emulate_ldrdstrd(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
unsigned long pc = (unsigned long)p->addr + 8;
int rt = (insn >> 12) & 0xf;
int rn = (insn >> 16) & 0xf;
int rm = insn & 0xf;
register unsigned long rtv asm("r0") = regs->uregs[rt];
register unsigned long rt2v asm("r1") = regs->uregs[rt+1];
register unsigned long rnv asm("r2") = (rn == 15) ? pc
: regs->uregs[rn];
register unsigned long rmv asm("r3") = regs->uregs[rm];
__asm__ __volatile__ (
BLX("%[fn]")
: "=r" (rtv), "=r" (rt2v), "=r" (rnv)
: "0" (rtv), "1" (rt2v), "2" (rnv), "r" (rmv),
[fn] "r" (p->ainsn.insn_fn)
: "lr", "memory", "cc"
);
regs->uregs[rt] = rtv;
regs->uregs[rt+1] = rt2v;
if (is_writeback(insn))
regs->uregs[rn] = rnv;
}
static void __kprobes
emulate_ldr(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
unsigned long pc = (unsigned long)p->addr + 8;
int rt = (insn >> 12) & 0xf;
int rn = (insn >> 16) & 0xf;
int rm = insn & 0xf;
register unsigned long rtv asm("r0");
register unsigned long rnv asm("r2") = (rn == 15) ? pc
: regs->uregs[rn];
register unsigned long rmv asm("r3") = regs->uregs[rm];
__asm__ __volatile__ (
BLX("%[fn]")
: "=r" (rtv), "=r" (rnv)
: "1" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
: "lr", "memory", "cc"
);
if (rt == 15)
load_write_pc(rtv, regs);
else
regs->uregs[rt] = rtv;
if (is_writeback(insn))
regs->uregs[rn] = rnv;
}
static void __kprobes
emulate_str(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
unsigned long rtpc = (unsigned long)p->addr + str_pc_offset;
unsigned long rnpc = (unsigned long)p->addr + 8;
int rt = (insn >> 12) & 0xf;
int rn = (insn >> 16) & 0xf;
int rm = insn & 0xf;
register unsigned long rtv asm("r0") = (rt == 15) ? rtpc
: regs->uregs[rt];
register unsigned long rnv asm("r2") = (rn == 15) ? rnpc
: regs->uregs[rn];
register unsigned long rmv asm("r3") = regs->uregs[rm];
__asm__ __volatile__ (
BLX("%[fn]")
: "=r" (rnv)
: "r" (rtv), "0" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
: "lr", "memory", "cc"
);
if (is_writeback(insn))
regs->uregs[rn] = rnv;
}
static void __kprobes
emulate_rd12rn16rm0rs8_rwflags(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
unsigned long pc = (unsigned long)p->addr + 8;
int rd = (insn >> 12) & 0xf;
int rn = (insn >> 16) & 0xf;
int rm = insn & 0xf;
int rs = (insn >> 8) & 0xf;
register unsigned long rdv asm("r0") = regs->uregs[rd];
register unsigned long rnv asm("r2") = (rn == 15) ? pc
: regs->uregs[rn];
register unsigned long rmv asm("r3") = (rm == 15) ? pc
: regs->uregs[rm];
register unsigned long rsv asm("r1") = regs->uregs[rs];
unsigned long cpsr = regs->ARM_cpsr;
__asm__ __volatile__ (
"msr cpsr_fs, %[cpsr] \n\t"
BLX("%[fn]")
"mrs %[cpsr], cpsr \n\t"
: "=r" (rdv), [cpsr] "=r" (cpsr)
: "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv),
"1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
: "lr", "memory", "cc"
);
if (rd == 15)
alu_write_pc(rdv, regs);
else
regs->uregs[rd] = rdv;
regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
}
static void __kprobes
emulate_rd12rn16rm0_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
int rd = (insn >> 12) & 0xf;
int rn = (insn >> 16) & 0xf;
int rm = insn & 0xf;
register unsigned long rdv asm("r0") = regs->uregs[rd];
register unsigned long rnv asm("r2") = regs->uregs[rn];
register unsigned long rmv asm("r3") = regs->uregs[rm];
unsigned long cpsr = regs->ARM_cpsr;
__asm__ __volatile__ (
"msr cpsr_fs, %[cpsr] \n\t"
BLX("%[fn]")
"mrs %[cpsr], cpsr \n\t"
: "=r" (rdv), [cpsr] "=r" (cpsr)
: "0" (rdv), "r" (rnv), "r" (rmv),
"1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
: "lr", "memory", "cc"
);
regs->uregs[rd] = rdv;
regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
}
static void __kprobes
emulate_rd16rn12rm0rs8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
int rd = (insn >> 16) & 0xf;
int rn = (insn >> 12) & 0xf;
int rm = insn & 0xf;
int rs = (insn >> 8) & 0xf;
register unsigned long rdv asm("r2") = regs->uregs[rd];
register unsigned long rnv asm("r0") = regs->uregs[rn];
register unsigned long rmv asm("r3") = regs->uregs[rm];
register unsigned long rsv asm("r1") = regs->uregs[rs];
unsigned long cpsr = regs->ARM_cpsr;
__asm__ __volatile__ (
"msr cpsr_fs, %[cpsr] \n\t"
BLX("%[fn]")
"mrs %[cpsr], cpsr \n\t"
: "=r" (rdv), [cpsr] "=r" (cpsr)
: "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv),
"1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
: "lr", "memory", "cc"
);
regs->uregs[rd] = rdv;
regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
}
static void __kprobes
emulate_rd12rm0_noflags_nopc(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
int rd = (insn >> 12) & 0xf;
int rm = insn & 0xf;
register unsigned long rdv asm("r0") = regs->uregs[rd];
register unsigned long rmv asm("r3") = regs->uregs[rm];
__asm__ __volatile__ (
BLX("%[fn]")
: "=r" (rdv)
: "0" (rdv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
: "lr", "memory", "cc"
);
regs->uregs[rd] = rdv;
}
static void __kprobes
emulate_rdlo12rdhi16rn0rm8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
int rdlo = (insn >> 12) & 0xf;
int rdhi = (insn >> 16) & 0xf;
int rn = insn & 0xf;
int rm = (insn >> 8) & 0xf;
register unsigned long rdlov asm("r0") = regs->uregs[rdlo];
register unsigned long rdhiv asm("r2") = regs->uregs[rdhi];
register unsigned long rnv asm("r3") = regs->uregs[rn];
register unsigned long rmv asm("r1") = regs->uregs[rm];
unsigned long cpsr = regs->ARM_cpsr;
__asm__ __volatile__ (
"msr cpsr_fs, %[cpsr] \n\t"
BLX("%[fn]")
"mrs %[cpsr], cpsr \n\t"
: "=r" (rdlov), "=r" (rdhiv), [cpsr] "=r" (cpsr)
: "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv),
"2" (cpsr), [fn] "r" (p->ainsn.insn_fn)
: "lr", "memory", "cc"
);
regs->uregs[rdlo] = rdlov;
regs->uregs[rdhi] = rdhiv;
regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
}
/*
* For the instruction masking and comparisons in all the "space_*"
* functions below, Do _not_ rearrange the order of tests unless
* you're very, very sure of what you are doing. For the sake of
* efficiency, the masks for some tests sometimes assume other test
* have been done prior to them so the number of patterns to test
* for an instruction set can be as broad as possible to reduce the
* number of tests needed.
*/
static const union decode_item arm_1111_table[] = {
/* Unconditional instructions */
/* memory hint 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx */
/* PLDI (immediate) 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx */
/* PLDW (immediate) 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx */
/* PLD (immediate) 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx */
DECODE_SIMULATE (0xfe300000, 0xf4100000, kprobe_simulate_nop),
/* memory hint 1111 0110 x001 xxxx xxxx xxxx xxx0 xxxx */
/* PLDI (register) 1111 0110 x101 xxxx xxxx xxxx xxx0 xxxx */
/* PLDW (register) 1111 0111 x001 xxxx xxxx xxxx xxx0 xxxx */
/* PLD (register) 1111 0111 x101 xxxx xxxx xxxx xxx0 xxxx */
DECODE_SIMULATE (0xfe300010, 0xf6100000, kprobe_simulate_nop),
/* BLX (immediate) 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx */
DECODE_SIMULATE (0xfe000000, 0xfa000000, simulate_blx1),
/* CPS 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */
/* SETEND 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */
/* SRS 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
/* RFE 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
/* Coprocessor instructions... */
/* MCRR2 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx */
/* MRRC2 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx */
/* LDC2 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
/* STC2 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
/* CDP2 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
/* MCR2 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
/* MRC2 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
/* Other unallocated instructions... */
DECODE_END
};
static const union decode_item arm_cccc_0001_0xx0____0xxx_table[] = {
/* Miscellaneous instructions */
/* MRS cpsr cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */
DECODE_SIMULATEX(0x0ff000f0, 0x01000000, simulate_mrs,
REGS(0, NOPC, 0, 0, 0)),
/* BX cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */
DECODE_SIMULATE (0x0ff000f0, 0x01200010, simulate_blx2bx),
/* BLX (register) cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */
DECODE_SIMULATEX(0x0ff000f0, 0x01200030, simulate_blx2bx,
REGS(0, 0, 0, 0, NOPC)),
/* CLZ cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */
DECODE_EMULATEX (0x0ff000f0, 0x01600010, emulate_rd12rm0_noflags_nopc,
REGS(0, NOPC, 0, 0, NOPC)),
/* QADD cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx */
/* QSUB cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx */
/* QDADD cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx */
/* QDSUB cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx */
DECODE_EMULATEX (0x0f9000f0, 0x01000050, emulate_rd12rn16rm0_rwflags_nopc,
REGS(NOPC, NOPC, 0, 0, NOPC)),
/* BXJ cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */
/* MSR cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */
/* MRS spsr cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */
/* BKPT 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */
/* SMC cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */
/* And unallocated instructions... */
DECODE_END
};
static const union decode_item arm_cccc_0001_0xx0____1xx0_table[] = {
/* Halfword multiply and multiply-accumulate */
/* SMLALxy cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */
DECODE_EMULATEX (0x0ff00090, 0x01400080, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
REGS(NOPC, NOPC, NOPC, 0, NOPC)),
/* SMULWy cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */
DECODE_OR (0x0ff000b0, 0x012000a0),
/* SMULxy cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */
DECODE_EMULATEX (0x0ff00090, 0x01600080, emulate_rd16rn12rm0rs8_rwflags_nopc,
REGS(NOPC, 0, NOPC, 0, NOPC)),
/* SMLAxy cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx */
DECODE_OR (0x0ff00090, 0x01000080),
/* SMLAWy cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx */
DECODE_EMULATEX (0x0ff000b0, 0x01200080, emulate_rd16rn12rm0rs8_rwflags_nopc,
REGS(NOPC, NOPC, NOPC, 0, NOPC)),
DECODE_END
};
static const union decode_item arm_cccc_0000_____1001_table[] = {
/* Multiply and multiply-accumulate */
/* MUL cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx */
/* MULS cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx */
DECODE_EMULATEX (0x0fe000f0, 0x00000090, emulate_rd16rn12rm0rs8_rwflags_nopc,
REGS(NOPC, 0, NOPC, 0, NOPC)),
/* MLA cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx */
/* MLAS cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx */
DECODE_OR (0x0fe000f0, 0x00200090),
/* MLS cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx */
DECODE_EMULATEX (0x0ff000f0, 0x00600090, emulate_rd16rn12rm0rs8_rwflags_nopc,
REGS(NOPC, NOPC, NOPC, 0, NOPC)),
/* UMAAL cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx */
DECODE_OR (0x0ff000f0, 0x00400090),
/* UMULL cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx */
/* UMULLS cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx */
/* UMLAL cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx */
/* UMLALS cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx */
/* SMULL cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx */
/* SMULLS cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx */
/* SMLAL cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx */
/* SMLALS cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx */
DECODE_EMULATEX (0x0f8000f0, 0x00800090, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
REGS(NOPC, NOPC, NOPC, 0, NOPC)),
DECODE_END
};
static const union decode_item arm_cccc_0001_____1001_table[] = {
/* Synchronization primitives */
/* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */
DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc,
REGS(NOPC, NOPC, 0, 0, NOPC)),
/* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */
/* And unallocated instructions... */
DECODE_END
};
static const union decode_item arm_cccc_000x_____1xx1_table[] = {
/* Extra load/store instructions */
/* STRHT cccc 0000 xx10 xxxx xxxx xxxx 1011 xxxx */
/* ??? cccc 0000 xx10 xxxx xxxx xxxx 11x1 xxxx */
/* LDRHT cccc 0000 xx11 xxxx xxxx xxxx 1011 xxxx */
/* LDRSBT cccc 0000 xx11 xxxx xxxx xxxx 1101 xxxx */
/* LDRSHT cccc 0000 xx11 xxxx xxxx xxxx 1111 xxxx */
DECODE_REJECT (0x0f200090, 0x00200090),
/* LDRD/STRD lr,pc,{... cccc 000x x0x0 xxxx 111x xxxx 1101 xxxx */
DECODE_REJECT (0x0e10e0d0, 0x0000e0d0),
/* LDRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1101 xxxx */
/* STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx */
DECODE_EMULATEX (0x0e5000d0, 0x000000d0, emulate_ldrdstrd,
REGS(NOPCWB, NOPCX, 0, 0, NOPC)),
/* LDRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1101 xxxx */
/* STRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1111 xxxx */
DECODE_EMULATEX (0x0e5000d0, 0x004000d0, emulate_ldrdstrd,
REGS(NOPCWB, NOPCX, 0, 0, 0)),
/* STRH (register) cccc 000x x0x0 xxxx xxxx xxxx 1011 xxxx */
DECODE_EMULATEX (0x0e5000f0, 0x000000b0, emulate_str,
REGS(NOPCWB, NOPC, 0, 0, NOPC)),
/* LDRH (register) cccc 000x x0x1 xxxx xxxx xxxx 1011 xxxx */
/* LDRSB (register) cccc 000x x0x1 xxxx xxxx xxxx 1101 xxxx */
/* LDRSH (register) cccc 000x x0x1 xxxx xxxx xxxx 1111 xxxx */
DECODE_EMULATEX (0x0e500090, 0x00100090, emulate_ldr,
REGS(NOPCWB, NOPC, 0, 0, NOPC)),
/* STRH (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1011 xxxx */
DECODE_EMULATEX (0x0e5000f0, 0x004000b0, emulate_str,
REGS(NOPCWB, NOPC, 0, 0, 0)),
/* LDRH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1011 xxxx */
/* LDRSB (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1101 xxxx */
/* LDRSH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1111 xxxx */
DECODE_EMULATEX (0x0e500090, 0x00500090, emulate_ldr,
REGS(NOPCWB, NOPC, 0, 0, 0)),
DECODE_END
};
static const union decode_item arm_cccc_000x_table[] = {
/* Data-processing (register) */
/* <op>S PC, ... cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx */
DECODE_REJECT (0x0e10f000, 0x0010f000),
/* MOV IP, SP 1110 0001 1010 0000 1100 0000 0000 1101 */
DECODE_SIMULATE (0xffffffff, 0xe1a0c00d, simulate_mov_ipsp),
/* TST (register) cccc 0001 0001 xxxx xxxx xxxx xxx0 xxxx */
/* TEQ (register) cccc 0001 0011 xxxx xxxx xxxx xxx0 xxxx */
/* CMP (register) cccc 0001 0101 xxxx xxxx xxxx xxx0 xxxx */
/* CMN (register) cccc 0001 0111 xxxx xxxx xxxx xxx0 xxxx */
DECODE_EMULATEX (0x0f900010, 0x01100000, emulate_rd12rn16rm0rs8_rwflags,
REGS(ANY, 0, 0, 0, ANY)),
/* MOV (register) cccc 0001 101x xxxx xxxx xxxx xxx0 xxxx */
/* MVN (register) cccc 0001 111x xxxx xxxx xxxx xxx0 xxxx */
DECODE_EMULATEX (0x0fa00010, 0x01a00000, emulate_rd12rn16rm0rs8_rwflags,
REGS(0, ANY, 0, 0, ANY)),
/* AND (register) cccc 0000 000x xxxx xxxx xxxx xxx0 xxxx */
/* EOR (register) cccc 0000 001x xxxx xxxx xxxx xxx0 xxxx */
/* SUB (register) cccc 0000 010x xxxx xxxx xxxx xxx0 xxxx */
/* RSB (register) cccc 0000 011x xxxx xxxx xxxx xxx0 xxxx */
/* ADD (register) cccc 0000 100x xxxx xxxx xxxx xxx0 xxxx */
/* ADC (register) cccc 0000 101x xxxx xxxx xxxx xxx0 xxxx */
/* SBC (register) cccc 0000 110x xxxx xxxx xxxx xxx0 xxxx */
/* RSC (register) cccc 0000 111x xxxx xxxx xxxx xxx0 xxxx */
/* ORR (register) cccc 0001 100x xxxx xxxx xxxx xxx0 xxxx */
/* BIC (register) cccc 0001 110x xxxx xxxx xxxx xxx0 xxxx */
DECODE_EMULATEX (0x0e000010, 0x00000000, emulate_rd12rn16rm0rs8_rwflags,
REGS(ANY, ANY, 0, 0, ANY)),
/* TST (reg-shift reg) cccc 0001 0001 xxxx xxxx xxxx 0xx1 xxxx */
/* TEQ (reg-shift reg) cccc 0001 0011 xxxx xxxx xxxx 0xx1 xxxx */
/* CMP (reg-shift reg) cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */
/* CMN (reg-shift reg) cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */
DECODE_EMULATEX (0x0f900090, 0x01100010, emulate_rd12rn16rm0rs8_rwflags,
REGS(ANY, 0, NOPC, 0, ANY)),
/* MOV (reg-shift reg) cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */
/* MVN (reg-shift reg) cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */
DECODE_EMULATEX (0x0fa00090, 0x01a00010, emulate_rd12rn16rm0rs8_rwflags,
REGS(0, ANY, NOPC, 0, ANY)),
/* AND (reg-shift reg) cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */
/* EOR (reg-shift reg) cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */
/* SUB (reg-shift reg) cccc 0000 010x xxxx xxxx xxxx 0xx1 xxxx */
/* RSB (reg-shift reg) cccc 0000 011x xxxx xxxx xxxx 0xx1 xxxx */
/* ADD (reg-shift reg) cccc 0000 100x xxxx xxxx xxxx 0xx1 xxxx */
/* ADC (reg-shift reg) cccc 0000 101x xxxx xxxx xxxx 0xx1 xxxx */
/* SBC (reg-shift reg) cccc 0000 110x xxxx xxxx xxxx 0xx1 xxxx */
/* RSC (reg-shift reg) cccc 0000 111x xxxx xxxx xxxx 0xx1 xxxx */
/* ORR (reg-shift reg) cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */
/* BIC (reg-shift reg) cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */
DECODE_EMULATEX (0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags,
REGS(ANY, ANY, NOPC, 0, ANY)),
DECODE_END
};
static const union decode_item arm_cccc_001x_table[] = {
/* Data-processing (immediate) */
/* MOVW cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */
/* MOVT cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0x0fb00000, 0x03000000, emulate_rd12rm0_noflags_nopc,
REGS(0, NOPC, 0, 0, 0)),
/* YIELD cccc 0011 0010 0000 xxxx xxxx 0000 0001 */
DECODE_OR (0x0fff00ff, 0x03200001),
/* SEV cccc 0011 0010 0000 xxxx xxxx 0000 0100 */
DECODE_EMULATE (0x0fff00ff, 0x03200004, kprobe_emulate_none),
/* NOP cccc 0011 0010 0000 xxxx xxxx 0000 0000 */
/* WFE cccc 0011 0010 0000 xxxx xxxx 0000 0010 */
/* WFI cccc 0011 0010 0000 xxxx xxxx 0000 0011 */
DECODE_SIMULATE (0x0fff00fc, 0x03200000, kprobe_simulate_nop),
/* DBG cccc 0011 0010 0000 xxxx xxxx ffff xxxx */
/* unallocated hints cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */
/* MSR (immediate) cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx */
DECODE_REJECT (0x0fb00000, 0x03200000),
/* <op>S PC, ... cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */
DECODE_REJECT (0x0e10f000, 0x0210f000),
/* TST (immediate) cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx */
/* TEQ (immediate) cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx */
/* CMP (immediate) cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx */
/* CMN (immediate) cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0x0f900000, 0x03100000, emulate_rd12rn16rm0rs8_rwflags,
REGS(ANY, 0, 0, 0, 0)),
/* MOV (immediate) cccc 0011 101x xxxx xxxx xxxx xxxx xxxx */
/* MVN (immediate) cccc 0011 111x xxxx xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0x0fa00000, 0x03a00000, emulate_rd12rn16rm0rs8_rwflags,
REGS(0, ANY, 0, 0, 0)),
/* AND (immediate) cccc 0010 000x xxxx xxxx xxxx xxxx xxxx */
/* EOR (immediate) cccc 0010 001x xxxx xxxx xxxx xxxx xxxx */
/* SUB (immediate) cccc 0010 010x xxxx xxxx xxxx xxxx xxxx */
/* RSB (immediate) cccc 0010 011x xxxx xxxx xxxx xxxx xxxx */
/* ADD (immediate) cccc 0010 100x xxxx xxxx xxxx xxxx xxxx */
/* ADC (immediate) cccc 0010 101x xxxx xxxx xxxx xxxx xxxx */
/* SBC (immediate) cccc 0010 110x xxxx xxxx xxxx xxxx xxxx */
/* RSC (immediate) cccc 0010 111x xxxx xxxx xxxx xxxx xxxx */
/* ORR (immediate) cccc 0011 100x xxxx xxxx xxxx xxxx xxxx */
/* BIC (immediate) cccc 0011 110x xxxx xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0x0e000000, 0x02000000, emulate_rd12rn16rm0rs8_rwflags,
REGS(ANY, ANY, 0, 0, 0)),
DECODE_END
};
static const union decode_item arm_cccc_0110_____xxx1_table[] = {
/* Media instructions */
/* SEL cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx */
DECODE_EMULATEX (0x0ff000f0, 0x068000b0, emulate_rd12rn16rm0_rwflags_nopc,
REGS(NOPC, NOPC, 0, 0, NOPC)),
/* SSAT cccc 0110 101x xxxx xxxx xxxx xx01 xxxx */
/* USAT cccc 0110 111x xxxx xxxx xxxx xx01 xxxx */
DECODE_OR(0x0fa00030, 0x06a00010),
/* SSAT16 cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx */
/* USAT16 cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx */
DECODE_EMULATEX (0x0fb000f0, 0x06a00030, emulate_rd12rn16rm0_rwflags_nopc,
REGS(0, NOPC, 0, 0, NOPC)),
/* REV cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */
/* REV16 cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */
/* RBIT cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */
/* REVSH cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */
DECODE_EMULATEX (0x0fb00070, 0x06b00030, emulate_rd12rm0_noflags_nopc,
REGS(0, NOPC, 0, 0, NOPC)),
/* ??? cccc 0110 0x00 xxxx xxxx xxxx xxx1 xxxx */
DECODE_REJECT (0x0fb00010, 0x06000010),
/* ??? cccc 0110 0xxx xxxx xxxx xxxx 1011 xxxx */
DECODE_REJECT (0x0f8000f0, 0x060000b0),
/* ??? cccc 0110 0xxx xxxx xxxx xxxx 1101 xxxx */
DECODE_REJECT (0x0f8000f0, 0x060000d0),
/* SADD16 cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx */
/* SADDSUBX cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx */
/* SSUBADDX cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx */
/* SSUB16 cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx */
/* SADD8 cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx */
/* SSUB8 cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx */
/* QADD16 cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx */
/* QADDSUBX cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx */
/* QSUBADDX cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx */
/* QSUB16 cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx */
/* QADD8 cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx */
/* QSUB8 cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx */
/* SHADD16 cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx */
/* SHADDSUBX cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx */
/* SHSUBADDX cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx */
/* SHSUB16 cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx */
/* SHADD8 cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx */
/* SHSUB8 cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx */
/* UADD16 cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx */
/* UADDSUBX cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx */
/* USUBADDX cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx */
/* USUB16 cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx */
/* UADD8 cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx */
/* USUB8 cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx */
/* UQADD16 cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx */
/* UQADDSUBX cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx */
/* UQSUBADDX cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx */
/* UQSUB16 cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx */
/* UQADD8 cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx */
/* UQSUB8 cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx */
/* UHADD16 cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx */
/* UHADDSUBX cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx */
/* UHSUBADDX cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx */
/* UHSUB16 cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx */
/* UHADD8 cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx */
/* UHSUB8 cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx */
DECODE_EMULATEX (0x0f800010, 0x06000010, emulate_rd12rn16rm0_rwflags_nopc,
REGS(NOPC, NOPC, 0, 0, NOPC)),
/* PKHBT cccc 0110 1000 xxxx xxxx xxxx x001 xxxx */
/* PKHTB cccc 0110 1000 xxxx xxxx xxxx x101 xxxx */
DECODE_EMULATEX (0x0ff00030, 0x06800010, emulate_rd12rn16rm0_rwflags_nopc,
REGS(NOPC, NOPC, 0, 0, NOPC)),
/* ??? cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx */
/* ??? cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx */
DECODE_REJECT (0x0fb000f0, 0x06900070),
/* SXTB16 cccc 0110 1000 1111 xxxx xxxx 0111 xxxx */
/* SXTB cccc 0110 1010 1111 xxxx xxxx 0111 xxxx */
/* SXTH cccc 0110 1011 1111 xxxx xxxx 0111 xxxx */
/* UXTB16 cccc 0110 1100 1111 xxxx xxxx 0111 xxxx */
/* UXTB cccc 0110 1110 1111 xxxx xxxx 0111 xxxx */
/* UXTH cccc 0110 1111 1111 xxxx xxxx 0111 xxxx */
DECODE_EMULATEX (0x0f8f00f0, 0x068f0070, emulate_rd12rm0_noflags_nopc,
REGS(0, NOPC, 0, 0, NOPC)),
/* SXTAB16 cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx */
/* SXTAB cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx */
/* SXTAH cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx */
/* UXTAB16 cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx */
/* UXTAB cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx */
/* UXTAH cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx */
DECODE_EMULATEX (0x0f8000f0, 0x06800070, emulate_rd12rn16rm0_rwflags_nopc,
REGS(NOPCX, NOPC, 0, 0, NOPC)),
DECODE_END
};
static const union decode_item arm_cccc_0111_____xxx1_table[] = {
/* Media instructions */
/* UNDEFINED cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */
DECODE_REJECT (0x0ff000f0, 0x07f000f0),
/* SMLALD cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */
/* SMLSLD cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */
DECODE_EMULATEX (0x0ff00090, 0x07400010, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
REGS(NOPC, NOPC, NOPC, 0, NOPC)),
/* SMUAD cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx */
/* SMUSD cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx */
DECODE_OR (0x0ff0f090, 0x0700f010),
/* SMMUL cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx */
DECODE_OR (0x0ff0f0d0, 0x0750f010),
/* USAD8 cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */
DECODE_EMULATEX (0x0ff0f0f0, 0x0780f010, emulate_rd16rn12rm0rs8_rwflags_nopc,
REGS(NOPC, 0, NOPC, 0, NOPC)),
/* SMLAD cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx */
/* SMLSD cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx */
DECODE_OR (0x0ff00090, 0x07000010),
/* SMMLA cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx */
DECODE_OR (0x0ff000d0, 0x07500010),
/* USADA8 cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */
DECODE_EMULATEX (0x0ff000f0, 0x07800010, emulate_rd16rn12rm0rs8_rwflags_nopc,
REGS(NOPC, NOPCX, NOPC, 0, NOPC)),
/* SMMLS cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx */
DECODE_EMULATEX (0x0ff000d0, 0x075000d0, emulate_rd16rn12rm0rs8_rwflags_nopc,
REGS(NOPC, NOPC, NOPC, 0, NOPC)),
/* SBFX cccc 0111 101x xxxx xxxx xxxx x101 xxxx */
/* UBFX cccc 0111 111x xxxx xxxx xxxx x101 xxxx */
DECODE_EMULATEX (0x0fa00070, 0x07a00050, emulate_rd12rm0_noflags_nopc,
REGS(0, NOPC, 0, 0, NOPC)),
/* BFC cccc 0111 110x xxxx xxxx xxxx x001 1111 */
DECODE_EMULATEX (0x0fe0007f, 0x07c0001f, emulate_rd12rm0_noflags_nopc,
REGS(0, NOPC, 0, 0, 0)),
/* BFI cccc 0111 110x xxxx xxxx xxxx x001 xxxx */
DECODE_EMULATEX (0x0fe00070, 0x07c00010, emulate_rd12rm0_noflags_nopc,
REGS(0, NOPC, 0, 0, NOPCX)),
DECODE_END
};
static const union decode_item arm_cccc_01xx_table[] = {
/* Load/store word and unsigned byte */
/* LDRB/STRB pc,[...] cccc 01xx x0xx xxxx xxxx xxxx xxxx xxxx */
DECODE_REJECT (0x0c40f000, 0x0440f000),
/* STRT cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */
/* LDRT cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */
/* STRBT cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */
/* LDRBT cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */
DECODE_REJECT (0x0d200000, 0x04200000),
/* STR (immediate) cccc 010x x0x0 xxxx xxxx xxxx xxxx xxxx */
/* STRB (immediate) cccc 010x x1x0 xxxx xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0x0e100000, 0x04000000, emulate_str,
REGS(NOPCWB, ANY, 0, 0, 0)),
/* LDR (immediate) cccc 010x x0x1 xxxx xxxx xxxx xxxx xxxx */
/* LDRB (immediate) cccc 010x x1x1 xxxx xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0x0e100000, 0x04100000, emulate_ldr,
REGS(NOPCWB, ANY, 0, 0, 0)),
/* STR (register) cccc 011x x0x0 xxxx xxxx xxxx xxxx xxxx */
/* STRB (register) cccc 011x x1x0 xxxx xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0x0e100000, 0x06000000, emulate_str,
REGS(NOPCWB, ANY, 0, 0, NOPC)),
/* LDR (register) cccc 011x x0x1 xxxx xxxx xxxx xxxx xxxx */
/* LDRB (register) cccc 011x x1x1 xxxx xxxx xxxx xxxx xxxx */
DECODE_EMULATEX (0x0e100000, 0x06100000, emulate_ldr,
REGS(NOPCWB, ANY, 0, 0, NOPC)),
DECODE_END
};
static const union decode_item arm_cccc_100x_table[] = {
/* Block data transfer instructions */
/* LDM cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
/* STM cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */
DECODE_CUSTOM (0x0e400000, 0x08000000, kprobe_decode_ldmstm),
/* STM (user registers) cccc 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
/* LDM (user registers) cccc 100x x1x1 xxxx 0xxx xxxx xxxx xxxx */
/* LDM (exception ret) cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */
DECODE_END
};
const union decode_item kprobe_decode_arm_table[] = {
/*
* Unconditional instructions
* 1111 xxxx xxxx xxxx xxxx xxxx xxxx xxxx
*/
DECODE_TABLE (0xf0000000, 0xf0000000, arm_1111_table),
/*
* Miscellaneous instructions
* cccc 0001 0xx0 xxxx xxxx xxxx 0xxx xxxx
*/
DECODE_TABLE (0x0f900080, 0x01000000, arm_cccc_0001_0xx0____0xxx_table),
/*
* Halfword multiply and multiply-accumulate
* cccc 0001 0xx0 xxxx xxxx xxxx 1xx0 xxxx
*/
DECODE_TABLE (0x0f900090, 0x01000080, arm_cccc_0001_0xx0____1xx0_table),
/*
* Multiply and multiply-accumulate
* cccc 0000 xxxx xxxx xxxx xxxx 1001 xxxx
*/
DECODE_TABLE (0x0f0000f0, 0x00000090, arm_cccc_0000_____1001_table),
/*
* Synchronization primitives
* cccc 0001 xxxx xxxx xxxx xxxx 1001 xxxx
*/
DECODE_TABLE (0x0f0000f0, 0x01000090, arm_cccc_0001_____1001_table),
/*
* Extra load/store instructions
* cccc 000x xxxx xxxx xxxx xxxx 1xx1 xxxx
*/
DECODE_TABLE (0x0e000090, 0x00000090, arm_cccc_000x_____1xx1_table),
/*
* Data-processing (register)
* cccc 000x xxxx xxxx xxxx xxxx xxx0 xxxx
* Data-processing (register-shifted register)
* cccc 000x xxxx xxxx xxxx xxxx 0xx1 xxxx
*/
DECODE_TABLE (0x0e000000, 0x00000000, arm_cccc_000x_table),
/*
* Data-processing (immediate)
* cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx
*/
DECODE_TABLE (0x0e000000, 0x02000000, arm_cccc_001x_table),
/*
* Media instructions
* cccc 011x xxxx xxxx xxxx xxxx xxx1 xxxx
*/
DECODE_TABLE (0x0f000010, 0x06000010, arm_cccc_0110_____xxx1_table),
DECODE_TABLE (0x0f000010, 0x07000010, arm_cccc_0111_____xxx1_table),
/*
* Load/store word and unsigned byte
* cccc 01xx xxxx xxxx xxxx xxxx xxxx xxxx
*/
DECODE_TABLE (0x0c000000, 0x04000000, arm_cccc_01xx_table),
/*
* Block data transfer instructions
* cccc 100x xxxx xxxx xxxx xxxx xxxx xxxx
*/
DECODE_TABLE (0x0e000000, 0x08000000, arm_cccc_100x_table),
/* B cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */
/* BL cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */
DECODE_SIMULATE (0x0e000000, 0x0a000000, simulate_bbl),
/*
* Supervisor Call, and coprocessor instructions
*/
/* MCRR cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx */
/* MRRC cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx */
/* LDC cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
/* STC cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
/* CDP cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
/* MCR cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
/* MRC cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
/* SVC cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */
DECODE_REJECT (0x0c000000, 0x0c000000),
DECODE_END
};
static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs)
{
regs->ARM_pc += 4;
p->ainsn.insn_handler(p, regs);
}
/* Return:
* INSN_REJECTED If instruction is one not allowed to kprobe,
* INSN_GOOD If instruction is supported and uses instruction slot,
* INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
*
* For instructions we don't want to kprobe (INSN_REJECTED return result):
* These are generally ones that modify the processor state making
* them "hard" to simulate such as switches processor modes or
* make accesses in alternate modes. Any of these could be simulated
* if the work was put into it, but low return considering they
* should also be very rare.
*/
enum kprobe_insn __kprobes
arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
{
asi->insn_singlestep = arm_singlestep;
asi->insn_check_cc = kprobe_condition_checks[insn>>28];
return kprobe_decode_insn(insn, asi, kprobe_decode_arm_table, false);
}
| gpl-2.0 |
uplusplus/ls300_smdkc110 | drivers/staging/vme/vme.c | 510 | 33921 | /*
* VME Bridge Framework
*
* Author: Martyn Welch <martyn.welch@gefanuc.com>
* Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
*
* Based on work by Tom Armistead and Ajit Prem
* Copyright 2004 Motorola Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/version.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/syscalls.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include "vme.h"
#include "vme_bridge.h"
/* Bitmask and mutex to keep track of bridge numbers */
static unsigned int vme_bus_numbers;
DEFINE_MUTEX(vme_bus_num_mtx);
static void __exit vme_exit (void);
static int __init vme_init (void);
/*
* Find the bridge resource associated with a specific device resource
*/
static struct vme_bridge *dev_to_bridge(struct device *dev)
{
return dev->platform_data;
}
/*
* Find the bridge that the resource is associated with.
*/
static struct vme_bridge *find_bridge(struct vme_resource *resource)
{
/* Get list to search */
switch (resource->type) {
case VME_MASTER:
return list_entry(resource->entry, struct vme_master_resource,
list)->parent;
break;
case VME_SLAVE:
return list_entry(resource->entry, struct vme_slave_resource,
list)->parent;
break;
case VME_DMA:
return list_entry(resource->entry, struct vme_dma_resource,
list)->parent;
break;
case VME_LM:
return list_entry(resource->entry, struct vme_lm_resource,
list)->parent;
break;
default:
printk(KERN_ERR "Unknown resource type\n");
return NULL;
break;
}
}
/*
* Allocate a contiguous block of memory for use by the driver. This is used to
* create the buffers for the slave windows.
*
* XXX VME bridges could be available on buses other than PCI. At the momment
* this framework only supports PCI devices.
*/
void * vme_alloc_consistent(struct vme_resource *resource, size_t size,
dma_addr_t *dma)
{
struct vme_bridge *bridge;
struct pci_dev *pdev;
if(resource == NULL) {
printk("No resource\n");
return NULL;
}
bridge = find_bridge(resource);
if(bridge == NULL) {
printk("Can't find bridge\n");
return NULL;
}
/* Find pci_dev container of dev */
if (bridge->parent == NULL) {
printk("Dev entry NULL\n");
return NULL;
}
pdev = container_of(bridge->parent, struct pci_dev, dev);
return pci_alloc_consistent(pdev, size, dma);
}
EXPORT_SYMBOL(vme_alloc_consistent);
/*
* Free previously allocated contiguous block of memory.
*
* XXX VME bridges could be available on buses other than PCI. At the momment
* this framework only supports PCI devices.
*/
void vme_free_consistent(struct vme_resource *resource, size_t size,
void *vaddr, dma_addr_t dma)
{
struct vme_bridge *bridge;
struct pci_dev *pdev;
if(resource == NULL) {
printk("No resource\n");
return;
}
bridge = find_bridge(resource);
if(bridge == NULL) {
printk("Can't find bridge\n");
return;
}
/* Find pci_dev container of dev */
pdev = container_of(bridge->parent, struct pci_dev, dev);
pci_free_consistent(pdev, size, vaddr, dma);
}
EXPORT_SYMBOL(vme_free_consistent);
size_t vme_get_size(struct vme_resource *resource)
{
int enabled, retval;
unsigned long long base, size;
dma_addr_t buf_base;
vme_address_t aspace;
vme_cycle_t cycle;
vme_width_t dwidth;
switch (resource->type) {
case VME_MASTER:
retval = vme_master_get(resource, &enabled, &base, &size,
&aspace, &cycle, &dwidth);
return size;
break;
case VME_SLAVE:
retval = vme_slave_get(resource, &enabled, &base, &size,
&buf_base, &aspace, &cycle);
return size;
break;
case VME_DMA:
return 0;
break;
default:
printk(KERN_ERR "Unknown resource type\n");
return 0;
break;
}
}
EXPORT_SYMBOL(vme_get_size);
static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
unsigned long long size)
{
int retval = 0;
switch (aspace) {
case VME_A16:
if (((vme_base + size) > VME_A16_MAX) ||
(vme_base > VME_A16_MAX))
retval = -EFAULT;
break;
case VME_A24:
if (((vme_base + size) > VME_A24_MAX) ||
(vme_base > VME_A24_MAX))
retval = -EFAULT;
break;
case VME_A32:
if (((vme_base + size) > VME_A32_MAX) ||
(vme_base > VME_A32_MAX))
retval = -EFAULT;
break;
case VME_A64:
/*
* Any value held in an unsigned long long can be used as the
* base
*/
break;
case VME_CRCSR:
if (((vme_base + size) > VME_CRCSR_MAX) ||
(vme_base > VME_CRCSR_MAX))
retval = -EFAULT;
break;
case VME_USER1:
case VME_USER2:
case VME_USER3:
case VME_USER4:
/* User Defined */
break;
default:
printk("Invalid address space\n");
retval = -EINVAL;
break;
}
return retval;
}
/*
* Request a slave image with specific attributes, return some unique
* identifier.
*/
struct vme_resource * vme_slave_request(struct device *dev,
vme_address_t address, vme_cycle_t cycle)
{
struct vme_bridge *bridge;
struct list_head *slave_pos = NULL;
struct vme_slave_resource *allocated_image = NULL;
struct vme_slave_resource *slave_image = NULL;
struct vme_resource *resource = NULL;
bridge = dev_to_bridge(dev);
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
/* Loop through slave resources */
list_for_each(slave_pos, &(bridge->slave_resources)) {
slave_image = list_entry(slave_pos,
struct vme_slave_resource, list);
if (slave_image == NULL) {
printk("Registered NULL Slave resource\n");
continue;
}
/* Find an unlocked and compatible image */
mutex_lock(&(slave_image->mtx));
if(((slave_image->address_attr & address) == address) &&
((slave_image->cycle_attr & cycle) == cycle) &&
(slave_image->locked == 0)) {
slave_image->locked = 1;
mutex_unlock(&(slave_image->mtx));
allocated_image = slave_image;
break;
}
mutex_unlock(&(slave_image->mtx));
}
/* No free image */
if (allocated_image == NULL)
goto err_image;
resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
if (resource == NULL) {
printk(KERN_WARNING "Unable to allocate resource structure\n");
goto err_alloc;
}
resource->type = VME_SLAVE;
resource->entry = &(allocated_image->list);
return resource;
err_alloc:
/* Unlock image */
mutex_lock(&(slave_image->mtx));
slave_image->locked = 0;
mutex_unlock(&(slave_image->mtx));
err_image:
err_bus:
return NULL;
}
EXPORT_SYMBOL(vme_slave_request);
int vme_slave_set (struct vme_resource *resource, int enabled,
unsigned long long vme_base, unsigned long long size,
dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_slave_resource *image;
int retval;
if (resource->type != VME_SLAVE) {
printk("Not a slave resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_slave_resource, list);
if (bridge->slave_set == NULL) {
printk("Function not supported\n");
return -ENOSYS;
}
if(!(((image->address_attr & aspace) == aspace) &&
((image->cycle_attr & cycle) == cycle))) {
printk("Invalid attributes\n");
return -EINVAL;
}
retval = vme_check_window(aspace, vme_base, size);
if(retval)
return retval;
return bridge->slave_set(image, enabled, vme_base, size, buf_base,
aspace, cycle);
}
EXPORT_SYMBOL(vme_slave_set);
int vme_slave_get (struct vme_resource *resource, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_slave_resource *image;
if (resource->type != VME_SLAVE) {
printk("Not a slave resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_slave_resource, list);
if (bridge->slave_get == NULL) {
printk("vme_slave_get not supported\n");
return -EINVAL;
}
return bridge->slave_get(image, enabled, vme_base, size, buf_base,
aspace, cycle);
}
EXPORT_SYMBOL(vme_slave_get);
void vme_slave_free(struct vme_resource *resource)
{
struct vme_slave_resource *slave_image;
if (resource->type != VME_SLAVE) {
printk("Not a slave resource\n");
return;
}
slave_image = list_entry(resource->entry, struct vme_slave_resource,
list);
if (slave_image == NULL) {
printk("Can't find slave resource\n");
return;
}
/* Unlock image */
mutex_lock(&(slave_image->mtx));
if (slave_image->locked == 0)
printk(KERN_ERR "Image is already free\n");
slave_image->locked = 0;
mutex_unlock(&(slave_image->mtx));
/* Free up resource memory */
kfree(resource);
}
EXPORT_SYMBOL(vme_slave_free);
/*
* Request a master image with specific attributes, return some unique
* identifier.
*/
struct vme_resource * vme_master_request(struct device *dev,
vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
{
struct vme_bridge *bridge;
struct list_head *master_pos = NULL;
struct vme_master_resource *allocated_image = NULL;
struct vme_master_resource *master_image = NULL;
struct vme_resource *resource = NULL;
bridge = dev_to_bridge(dev);
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
/* Loop through master resources */
list_for_each(master_pos, &(bridge->master_resources)) {
master_image = list_entry(master_pos,
struct vme_master_resource, list);
if (master_image == NULL) {
printk(KERN_WARNING "Registered NULL master resource\n");
continue;
}
/* Find an unlocked and compatible image */
spin_lock(&(master_image->lock));
if(((master_image->address_attr & address) == address) &&
((master_image->cycle_attr & cycle) == cycle) &&
((master_image->width_attr & dwidth) == dwidth) &&
(master_image->locked == 0)) {
master_image->locked = 1;
spin_unlock(&(master_image->lock));
allocated_image = master_image;
break;
}
spin_unlock(&(master_image->lock));
}
/* Check to see if we found a resource */
if (allocated_image == NULL) {
printk(KERN_ERR "Can't find a suitable resource\n");
goto err_image;
}
resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
if (resource == NULL) {
printk(KERN_ERR "Unable to allocate resource structure\n");
goto err_alloc;
}
resource->type = VME_MASTER;
resource->entry = &(allocated_image->list);
return resource;
kfree(resource);
err_alloc:
/* Unlock image */
spin_lock(&(master_image->lock));
master_image->locked = 0;
spin_unlock(&(master_image->lock));
err_image:
err_bus:
return NULL;
}
EXPORT_SYMBOL(vme_master_request);
int vme_master_set (struct vme_resource *resource, int enabled,
unsigned long long vme_base, unsigned long long size,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
int retval;
if (resource->type != VME_MASTER) {
printk("Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
if (bridge->master_set == NULL) {
printk("vme_master_set not supported\n");
return -EINVAL;
}
if(!(((image->address_attr & aspace) == aspace) &&
((image->cycle_attr & cycle) == cycle) &&
((image->width_attr & dwidth) == dwidth))) {
printk("Invalid attributes\n");
return -EINVAL;
}
retval = vme_check_window(aspace, vme_base, size);
if(retval)
return retval;
return bridge->master_set(image, enabled, vme_base, size, aspace,
cycle, dwidth);
}
EXPORT_SYMBOL(vme_master_set);
int vme_master_get (struct vme_resource *resource, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
if (resource->type != VME_MASTER) {
printk("Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
if (bridge->master_get == NULL) {
printk("vme_master_set not supported\n");
return -EINVAL;
}
return bridge->master_get(image, enabled, vme_base, size, aspace,
cycle, dwidth);
}
EXPORT_SYMBOL(vme_master_get);
/*
* Read data out of VME space into a buffer.
*/
ssize_t vme_master_read (struct vme_resource *resource, void *buf, size_t count,
loff_t offset)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
size_t length;
if (bridge->master_read == NULL) {
printk("Reading from resource not supported\n");
return -EINVAL;
}
if (resource->type != VME_MASTER) {
printk("Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
length = vme_get_size(resource);
if (offset > length) {
printk("Invalid Offset\n");
return -EFAULT;
}
if ((offset + count) > length)
count = length - offset;
return bridge->master_read(image, buf, count, offset);
}
EXPORT_SYMBOL(vme_master_read);
/*
* Write data out to VME space from a buffer.
*/
ssize_t vme_master_write (struct vme_resource *resource, void *buf,
size_t count, loff_t offset)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
size_t length;
if (bridge->master_write == NULL) {
printk("Writing to resource not supported\n");
return -EINVAL;
}
if (resource->type != VME_MASTER) {
printk("Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
length = vme_get_size(resource);
if (offset > length) {
printk("Invalid Offset\n");
return -EFAULT;
}
if ((offset + count) > length)
count = length - offset;
return bridge->master_write(image, buf, count, offset);
}
EXPORT_SYMBOL(vme_master_write);
/*
* Perform RMW cycle to provided location.
*/
unsigned int vme_master_rmw (struct vme_resource *resource, unsigned int mask,
unsigned int compare, unsigned int swap, loff_t offset)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
if (bridge->master_rmw == NULL) {
printk("Writing to resource not supported\n");
return -EINVAL;
}
if (resource->type != VME_MASTER) {
printk("Not a master resource\n");
return -EINVAL;
}
image = list_entry(resource->entry, struct vme_master_resource, list);
return bridge->master_rmw(image, mask, compare, swap, offset);
}
EXPORT_SYMBOL(vme_master_rmw);
void vme_master_free(struct vme_resource *resource)
{
struct vme_master_resource *master_image;
if (resource->type != VME_MASTER) {
printk("Not a master resource\n");
return;
}
master_image = list_entry(resource->entry, struct vme_master_resource,
list);
if (master_image == NULL) {
printk("Can't find master resource\n");
return;
}
/* Unlock image */
spin_lock(&(master_image->lock));
if (master_image->locked == 0)
printk(KERN_ERR "Image is already free\n");
master_image->locked = 0;
spin_unlock(&(master_image->lock));
/* Free up resource memory */
kfree(resource);
}
EXPORT_SYMBOL(vme_master_free);
/*
* Request a DMA controller with specific attributes, return some unique
* identifier.
*/
struct vme_resource *vme_request_dma(struct device *dev)
{
struct vme_bridge *bridge;
struct list_head *dma_pos = NULL;
struct vme_dma_resource *allocated_ctrlr = NULL;
struct vme_dma_resource *dma_ctrlr = NULL;
struct vme_resource *resource = NULL;
/* XXX Not checking resource attributes */
printk(KERN_ERR "No VME resource Attribute tests done\n");
bridge = dev_to_bridge(dev);
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
/* Loop through DMA resources */
list_for_each(dma_pos, &(bridge->dma_resources)) {
dma_ctrlr = list_entry(dma_pos,
struct vme_dma_resource, list);
if (dma_ctrlr == NULL) {
printk("Registered NULL DMA resource\n");
continue;
}
/* Find an unlocked controller */
mutex_lock(&(dma_ctrlr->mtx));
if(dma_ctrlr->locked == 0) {
dma_ctrlr->locked = 1;
mutex_unlock(&(dma_ctrlr->mtx));
allocated_ctrlr = dma_ctrlr;
break;
}
mutex_unlock(&(dma_ctrlr->mtx));
}
/* Check to see if we found a resource */
if (allocated_ctrlr == NULL)
goto err_ctrlr;
resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
if (resource == NULL) {
printk(KERN_WARNING "Unable to allocate resource structure\n");
goto err_alloc;
}
resource->type = VME_DMA;
resource->entry = &(allocated_ctrlr->list);
return resource;
err_alloc:
/* Unlock image */
mutex_lock(&(dma_ctrlr->mtx));
dma_ctrlr->locked = 0;
mutex_unlock(&(dma_ctrlr->mtx));
err_ctrlr:
err_bus:
return NULL;
}
EXPORT_SYMBOL(vme_request_dma);
/*
* Start new list
*/
struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
{
struct vme_dma_resource *ctrlr;
struct vme_dma_list *dma_list;
if (resource->type != VME_DMA) {
printk("Not a DMA resource\n");
return NULL;
}
ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
dma_list = (struct vme_dma_list *)kmalloc(
sizeof(struct vme_dma_list), GFP_KERNEL);
if(dma_list == NULL) {
printk("Unable to allocate memory for new dma list\n");
return NULL;
}
INIT_LIST_HEAD(&(dma_list->entries));
dma_list->parent = ctrlr;
mutex_init(&(dma_list->mtx));
return dma_list;
}
EXPORT_SYMBOL(vme_new_dma_list);
/*
* Create "Pattern" type attributes
*/
struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
vme_pattern_t type)
{
struct vme_dma_attr *attributes;
struct vme_dma_pattern *pattern_attr;
attributes = (struct vme_dma_attr *)kmalloc(
sizeof(struct vme_dma_attr), GFP_KERNEL);
if(attributes == NULL) {
printk("Unable to allocate memory for attributes structure\n");
goto err_attr;
}
pattern_attr = (struct vme_dma_pattern *)kmalloc(
sizeof(struct vme_dma_pattern), GFP_KERNEL);
if(pattern_attr == NULL) {
printk("Unable to allocate memory for pattern attributes\n");
goto err_pat;
}
attributes->type = VME_DMA_PATTERN;
attributes->private = (void *)pattern_attr;
pattern_attr->pattern = pattern;
pattern_attr->type = type;
return attributes;
kfree(pattern_attr);
err_pat:
kfree(attributes);
err_attr:
return NULL;
}
EXPORT_SYMBOL(vme_dma_pattern_attribute);
/*
* Create "PCI" type attributes
*/
struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
{
struct vme_dma_attr *attributes;
struct vme_dma_pci *pci_attr;
/* XXX Run some sanity checks here */
attributes = (struct vme_dma_attr *)kmalloc(
sizeof(struct vme_dma_attr), GFP_KERNEL);
if(attributes == NULL) {
printk("Unable to allocate memory for attributes structure\n");
goto err_attr;
}
pci_attr = (struct vme_dma_pci *)kmalloc(sizeof(struct vme_dma_pci),
GFP_KERNEL);
if(pci_attr == NULL) {
printk("Unable to allocate memory for pci attributes\n");
goto err_pci;
}
attributes->type = VME_DMA_PCI;
attributes->private = (void *)pci_attr;
pci_attr->address = address;
return attributes;
kfree(pci_attr);
err_pci:
kfree(attributes);
err_attr:
return NULL;
}
EXPORT_SYMBOL(vme_dma_pci_attribute);
/*
* Create "VME" type attributes
*/
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
struct vme_dma_attr *attributes;
struct vme_dma_vme *vme_attr;
/* XXX Run some sanity checks here */
attributes = (struct vme_dma_attr *)kmalloc(
sizeof(struct vme_dma_attr), GFP_KERNEL);
if(attributes == NULL) {
printk("Unable to allocate memory for attributes structure\n");
goto err_attr;
}
vme_attr = (struct vme_dma_vme *)kmalloc(sizeof(struct vme_dma_vme),
GFP_KERNEL);
if(vme_attr == NULL) {
printk("Unable to allocate memory for vme attributes\n");
goto err_vme;
}
attributes->type = VME_DMA_VME;
attributes->private = (void *)vme_attr;
vme_attr->address = address;
vme_attr->aspace = aspace;
vme_attr->cycle = cycle;
vme_attr->dwidth = dwidth;
return attributes;
kfree(vme_attr);
err_vme:
kfree(attributes);
err_attr:
return NULL;
}
EXPORT_SYMBOL(vme_dma_vme_attribute);
/*
* Free attribute
*/
void vme_dma_free_attribute(struct vme_dma_attr *attributes)
{
kfree(attributes->private);
kfree(attributes);
}
EXPORT_SYMBOL(vme_dma_free_attribute);
int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
struct vme_dma_attr *dest, size_t count)
{
struct vme_bridge *bridge = list->parent->parent;
int retval;
if (bridge->dma_list_add == NULL) {
printk("Link List DMA generation not supported\n");
return -EINVAL;
}
if (mutex_trylock(&(list->mtx))) {
printk("Link List already submitted\n");
return -EINVAL;
}
retval = bridge->dma_list_add(list, src, dest, count);
mutex_unlock(&(list->mtx));
return retval;
}
EXPORT_SYMBOL(vme_dma_list_add);
int vme_dma_list_exec(struct vme_dma_list *list)
{
struct vme_bridge *bridge = list->parent->parent;
int retval;
if (bridge->dma_list_exec == NULL) {
printk("Link List DMA execution not supported\n");
return -EINVAL;
}
mutex_lock(&(list->mtx));
retval = bridge->dma_list_exec(list);
mutex_unlock(&(list->mtx));
return retval;
}
EXPORT_SYMBOL(vme_dma_list_exec);
int vme_dma_list_free(struct vme_dma_list *list)
{
struct vme_bridge *bridge = list->parent->parent;
int retval;
if (bridge->dma_list_empty == NULL) {
printk("Emptying of Link Lists not supported\n");
return -EINVAL;
}
if (mutex_trylock(&(list->mtx))) {
printk("Link List in use\n");
return -EINVAL;
}
/*
* Empty out all of the entries from the dma list. We need to go to the
* low level driver as dma entries are driver specific.
*/
retval = bridge->dma_list_empty(list);
if (retval) {
printk("Unable to empty link-list entries\n");
mutex_unlock(&(list->mtx));
return retval;
}
mutex_unlock(&(list->mtx));
kfree(list);
return retval;
}
EXPORT_SYMBOL(vme_dma_list_free);
int vme_dma_free(struct vme_resource *resource)
{
struct vme_dma_resource *ctrlr;
if (resource->type != VME_DMA) {
printk("Not a DMA resource\n");
return -EINVAL;
}
ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
if (mutex_trylock(&(ctrlr->mtx))) {
printk("Resource busy, can't free\n");
return -EBUSY;
}
if (!(list_empty(&(ctrlr->pending)) && list_empty(&(ctrlr->running)))) {
printk("Resource still processing transfers\n");
mutex_unlock(&(ctrlr->mtx));
return -EBUSY;
}
ctrlr->locked = 0;
mutex_unlock(&(ctrlr->mtx));
return 0;
}
EXPORT_SYMBOL(vme_dma_free);
int vme_request_irq(struct device *dev, int level, int statid,
void (*callback)(int level, int vector, void *priv_data),
void *priv_data)
{
struct vme_bridge *bridge;
bridge = dev_to_bridge(dev);
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
if((level < 1) || (level > 7)) {
printk(KERN_WARNING "Invalid interrupt level\n");
return -EINVAL;
}
if (bridge->request_irq == NULL) {
printk("Registering interrupts not supported\n");
return -EINVAL;
}
return bridge->request_irq(level, statid, callback, priv_data);
}
EXPORT_SYMBOL(vme_request_irq);
void vme_free_irq(struct device *dev, int level, int statid)
{
struct vme_bridge *bridge;
bridge = dev_to_bridge(dev);
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
return;
}
if((level < 1) || (level > 7)) {
printk(KERN_WARNING "Invalid interrupt level\n");
return;
}
if (bridge->free_irq == NULL) {
printk("Freeing interrupts not supported\n");
return;
}
bridge->free_irq(level, statid);
}
EXPORT_SYMBOL(vme_free_irq);
int vme_generate_irq(struct device *dev, int level, int statid)
{
struct vme_bridge *bridge;
bridge = dev_to_bridge(dev);
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
if((level < 1) || (level > 7)) {
printk(KERN_WARNING "Invalid interrupt level\n");
return -EINVAL;
}
if (bridge->generate_irq == NULL) {
printk("Interrupt generation not supported\n");
return -EINVAL;
}
return bridge->generate_irq(level, statid);
}
EXPORT_SYMBOL(vme_generate_irq);
/*
* Request the location monitor, return resource or NULL
*/
struct vme_resource *vme_lm_request(struct device *dev)
{
struct vme_bridge *bridge;
struct list_head *lm_pos = NULL;
struct vme_lm_resource *allocated_lm = NULL;
struct vme_lm_resource *lm = NULL;
struct vme_resource *resource = NULL;
bridge = dev_to_bridge(dev);
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
/* Loop through DMA resources */
list_for_each(lm_pos, &(bridge->lm_resources)) {
lm = list_entry(lm_pos,
struct vme_lm_resource, list);
if (lm == NULL) {
printk(KERN_ERR "Registered NULL Location Monitor "
"resource\n");
continue;
}
/* Find an unlocked controller */
mutex_lock(&(lm->mtx));
if (lm->locked == 0) {
lm->locked = 1;
mutex_unlock(&(lm->mtx));
allocated_lm = lm;
break;
}
mutex_unlock(&(lm->mtx));
}
/* Check to see if we found a resource */
if (allocated_lm == NULL)
goto err_lm;
resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
if (resource == NULL) {
printk(KERN_ERR "Unable to allocate resource structure\n");
goto err_alloc;
}
resource->type = VME_LM;
resource->entry = &(allocated_lm->list);
return resource;
err_alloc:
/* Unlock image */
mutex_lock(&(lm->mtx));
lm->locked = 0;
mutex_unlock(&(lm->mtx));
err_lm:
err_bus:
return NULL;
}
EXPORT_SYMBOL(vme_lm_request);
int vme_lm_count(struct vme_resource *resource)
{
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
printk(KERN_ERR "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
return lm->monitors;
}
EXPORT_SYMBOL(vme_lm_count);
int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
vme_address_t aspace, vme_cycle_t cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
printk(KERN_ERR "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
if (bridge->lm_set == NULL) {
printk(KERN_ERR "vme_lm_set not supported\n");
return -EINVAL;
}
/* XXX Check parameters */
return lm->parent->lm_set(lm, lm_base, aspace, cycle);
}
EXPORT_SYMBOL(vme_lm_set);
int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
vme_address_t *aspace, vme_cycle_t *cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
printk(KERN_ERR "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
if (bridge->lm_get == NULL) {
printk(KERN_ERR "vme_lm_get not supported\n");
return -EINVAL;
}
return bridge->lm_get(lm, lm_base, aspace, cycle);
}
EXPORT_SYMBOL(vme_lm_get);
int vme_lm_attach(struct vme_resource *resource, int monitor,
void (*callback)(int))
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
printk(KERN_ERR "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
if (bridge->lm_attach == NULL) {
printk(KERN_ERR "vme_lm_attach not supported\n");
return -EINVAL;
}
return bridge->lm_attach(lm, monitor, callback);
}
EXPORT_SYMBOL(vme_lm_attach);
int vme_lm_detach(struct vme_resource *resource, int monitor)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
printk(KERN_ERR "Not a Location Monitor resource\n");
return -EINVAL;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
if (bridge->lm_detach == NULL) {
printk(KERN_ERR "vme_lm_detach not supported\n");
return -EINVAL;
}
return bridge->lm_detach(lm, monitor);
}
EXPORT_SYMBOL(vme_lm_detach);
void vme_lm_free(struct vme_resource *resource)
{
struct vme_lm_resource *lm;
if (resource->type != VME_LM) {
printk(KERN_ERR "Not a Location Monitor resource\n");
return;
}
lm = list_entry(resource->entry, struct vme_lm_resource, list);
if (mutex_trylock(&(lm->mtx))) {
printk(KERN_ERR "Resource busy, can't free\n");
return;
}
/* XXX Check to see that there aren't any callbacks still attached */
lm->locked = 0;
mutex_unlock(&(lm->mtx));
}
EXPORT_SYMBOL(vme_lm_free);
int vme_slot_get(struct device *bus)
{
struct vme_bridge *bridge;
bridge = dev_to_bridge(bus);
if (bridge == NULL) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
if (bridge->slot_get == NULL) {
printk("vme_slot_get not supported\n");
return -EINVAL;
}
return bridge->slot_get();
}
EXPORT_SYMBOL(vme_slot_get);
/* - Bridge Registration --------------------------------------------------- */
static int vme_alloc_bus_num(void)
{
int i;
mutex_lock(&vme_bus_num_mtx);
for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
if (((vme_bus_numbers >> i) & 0x1) == 0) {
vme_bus_numbers |= (0x1 << i);
break;
}
}
mutex_unlock(&vme_bus_num_mtx);
return i;
}
static void vme_free_bus_num(int bus)
{
mutex_lock(&vme_bus_num_mtx);
vme_bus_numbers |= ~(0x1 << bus);
mutex_unlock(&vme_bus_num_mtx);
}
int vme_register_bridge (struct vme_bridge *bridge)
{
struct device *dev;
int retval;
int i;
bridge->num = vme_alloc_bus_num();
/* This creates 32 vme "slot" devices. This equates to a slot for each
* ID available in a system conforming to the ANSI/VITA 1-1994
* specification.
*/
for (i = 0; i < VME_SLOTS_MAX; i++) {
dev = &(bridge->dev[i]);
memset(dev, 0, sizeof(struct device));
dev->parent = bridge->parent;
dev->bus = &(vme_bus_type);
/*
* We save a pointer to the bridge in platform_data so that we
* can get to it later. We keep driver_data for use by the
* driver that binds against the slot
*/
dev->platform_data = bridge;
dev_set_name(dev, "vme-%x.%x", bridge->num, i + 1);
retval = device_register(dev);
if(retval)
goto err_reg;
}
return retval;
i = VME_SLOTS_MAX;
err_reg:
while (i > -1) {
dev = &(bridge->dev[i]);
device_unregister(dev);
}
vme_free_bus_num(bridge->num);
return retval;
}
EXPORT_SYMBOL(vme_register_bridge);
void vme_unregister_bridge (struct vme_bridge *bridge)
{
int i;
struct device *dev;
for (i = 0; i < VME_SLOTS_MAX; i++) {
dev = &(bridge->dev[i]);
device_unregister(dev);
}
vme_free_bus_num(bridge->num);
}
EXPORT_SYMBOL(vme_unregister_bridge);
/* - Driver Registration --------------------------------------------------- */
int vme_register_driver (struct vme_driver *drv)
{
drv->driver.name = drv->name;
drv->driver.bus = &vme_bus_type;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL(vme_register_driver);
void vme_unregister_driver (struct vme_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(vme_unregister_driver);
/* - Bus Registration ------------------------------------------------------ */
int vme_calc_slot(struct device *dev)
{
struct vme_bridge *bridge;
int num;
bridge = dev_to_bridge(dev);
/* Determine slot number */
num = 0;
while(num < VME_SLOTS_MAX) {
if(&(bridge->dev[num]) == dev) {
break;
}
num++;
}
if (num == VME_SLOTS_MAX) {
dev_err(dev, "Failed to identify slot\n");
num = 0;
goto err_dev;
}
num++;
err_dev:
return num;
}
static struct vme_driver *dev_to_vme_driver(struct device *dev)
{
if(dev->driver == NULL)
printk("Bugger dev->driver is NULL\n");
return container_of(dev->driver, struct vme_driver, driver);
}
static int vme_bus_match(struct device *dev, struct device_driver *drv)
{
struct vme_bridge *bridge;
struct vme_driver *driver;
int i, num;
bridge = dev_to_bridge(dev);
driver = container_of(drv, struct vme_driver, driver);
num = vme_calc_slot(dev);
if (!num)
goto err_dev;
if (driver->bind_table == NULL) {
dev_err(dev, "Bind table NULL\n");
goto err_table;
}
i = 0;
while((driver->bind_table[i].bus != 0) ||
(driver->bind_table[i].slot != 0)) {
if (bridge->num == driver->bind_table[i].bus) {
if (num == driver->bind_table[i].slot)
return 1;
if (driver->bind_table[i].slot == VME_SLOT_ALL)
return 1;
if ((driver->bind_table[i].slot == VME_SLOT_CURRENT) &&
(num == vme_slot_get(dev)))
return 1;
}
i++;
}
err_dev:
err_table:
return 0;
}
static int vme_bus_probe(struct device *dev)
{
struct vme_bridge *bridge;
struct vme_driver *driver;
int retval = -ENODEV;
driver = dev_to_vme_driver(dev);
bridge = dev_to_bridge(dev);
if(driver->probe != NULL) {
retval = driver->probe(dev, bridge->num, vme_calc_slot(dev));
}
return retval;
}
static int vme_bus_remove(struct device *dev)
{
struct vme_bridge *bridge;
struct vme_driver *driver;
int retval = -ENODEV;
driver = dev_to_vme_driver(dev);
bridge = dev_to_bridge(dev);
if(driver->remove != NULL) {
retval = driver->remove(dev, bridge->num, vme_calc_slot(dev));
}
return retval;
}
struct bus_type vme_bus_type = {
.name = "vme",
.match = vme_bus_match,
.probe = vme_bus_probe,
.remove = vme_bus_remove,
};
EXPORT_SYMBOL(vme_bus_type);
static int __init vme_init (void)
{
return bus_register(&vme_bus_type);
}
static void __exit vme_exit (void)
{
bus_unregister(&vme_bus_type);
}
MODULE_DESCRIPTION("VME bridge driver framework");
MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com");
MODULE_LICENSE("GPL");
module_init(vme_init);
module_exit(vme_exit);
| gpl-2.0 |
SM-G920P/TeamSPR_Kernel_OLD | drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c | 2302 | 12376 | /*
* mxl111sf-tuner.c - driver for the MaxLinear MXL111SF CMOS tuner
*
* Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "mxl111sf-tuner.h"
#include "mxl111sf-phy.h"
#include "mxl111sf-reg.h"
/* debug */
static int mxl111sf_tuner_debug;
module_param_named(debug, mxl111sf_tuner_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able)).");
#define mxl_dbg(fmt, arg...) \
if (mxl111sf_tuner_debug) \
mxl_printk(KERN_DEBUG, fmt, ##arg)
#define err pr_err
/* ------------------------------------------------------------------------ */
struct mxl111sf_tuner_state {
struct mxl111sf_state *mxl_state;
struct mxl111sf_tuner_config *cfg;
enum mxl_if_freq if_freq;
u32 frequency;
u32 bandwidth;
};
static int mxl111sf_tuner_read_reg(struct mxl111sf_tuner_state *state,
u8 addr, u8 *data)
{
return (state->cfg->read_reg) ?
state->cfg->read_reg(state->mxl_state, addr, data) :
-EINVAL;
}
static int mxl111sf_tuner_write_reg(struct mxl111sf_tuner_state *state,
u8 addr, u8 data)
{
return (state->cfg->write_reg) ?
state->cfg->write_reg(state->mxl_state, addr, data) :
-EINVAL;
}
static int mxl111sf_tuner_program_regs(struct mxl111sf_tuner_state *state,
struct mxl111sf_reg_ctrl_info *ctrl_reg_info)
{
return (state->cfg->program_regs) ?
state->cfg->program_regs(state->mxl_state, ctrl_reg_info) :
-EINVAL;
}
static int mxl1x1sf_tuner_top_master_ctrl(struct mxl111sf_tuner_state *state,
int onoff)
{
return (state->cfg->top_master_ctrl) ?
state->cfg->top_master_ctrl(state->mxl_state, onoff) :
-EINVAL;
}
/* ------------------------------------------------------------------------ */
static struct mxl111sf_reg_ctrl_info mxl_phy_tune_rf[] = {
{0x1d, 0x7f, 0x00}, /* channel bandwidth section 1/2/3,
DIG_MODEINDEX, _A, _CSF, */
{0x1e, 0xff, 0x00}, /* channel frequency (lo and fractional) */
{0x1f, 0xff, 0x00}, /* channel frequency (hi for integer portion) */
{0, 0, 0}
};
/* ------------------------------------------------------------------------ */
static struct mxl111sf_reg_ctrl_info *mxl111sf_calc_phy_tune_regs(u32 freq,
u8 bw)
{
u8 filt_bw;
/* set channel bandwidth */
switch (bw) {
case 0: /* ATSC */
filt_bw = 25;
break;
case 1: /* QAM */
filt_bw = 69;
break;
case 6:
filt_bw = 21;
break;
case 7:
filt_bw = 42;
break;
case 8:
filt_bw = 63;
break;
default:
err("%s: invalid bandwidth setting!", __func__);
return NULL;
}
/* calculate RF channel */
freq /= 1000000;
freq *= 64;
#if 0
/* do round */
freq += 0.5;
#endif
/* set bandwidth */
mxl_phy_tune_rf[0].data = filt_bw;
/* set RF */
mxl_phy_tune_rf[1].data = (freq & 0xff);
mxl_phy_tune_rf[2].data = (freq >> 8) & 0xff;
/* start tune */
return mxl_phy_tune_rf;
}
static int mxl1x1sf_tuner_set_if_output_freq(struct mxl111sf_tuner_state *state)
{
int ret;
u8 ctrl;
#if 0
u16 iffcw;
u32 if_freq;
#endif
mxl_dbg("(IF polarity = %d, IF freq = 0x%02x)",
state->cfg->invert_spectrum, state->cfg->if_freq);
/* set IF polarity */
ctrl = state->cfg->invert_spectrum;
ctrl |= state->cfg->if_freq;
ret = mxl111sf_tuner_write_reg(state, V6_TUNER_IF_SEL_REG, ctrl);
if (mxl_fail(ret))
goto fail;
#if 0
if_freq /= 1000000;
/* do round */
if_freq += 0.5;
if (MXL_IF_LO == state->cfg->if_freq) {
ctrl = 0x08;
iffcw = (u16)(if_freq / (108 * 4096));
} else if (MXL_IF_HI == state->cfg->if_freq) {
ctrl = 0x08;
iffcw = (u16)(if_freq / (216 * 4096));
} else {
ctrl = 0;
iffcw = 0;
}
ctrl |= (iffcw >> 8);
#endif
ret = mxl111sf_tuner_read_reg(state, V6_TUNER_IF_FCW_BYP_REG, &ctrl);
if (mxl_fail(ret))
goto fail;
ctrl &= 0xf0;
ctrl |= 0x90;
ret = mxl111sf_tuner_write_reg(state, V6_TUNER_IF_FCW_BYP_REG, ctrl);
if (mxl_fail(ret))
goto fail;
#if 0
ctrl = iffcw & 0x00ff;
#endif
ret = mxl111sf_tuner_write_reg(state, V6_TUNER_IF_FCW_REG, ctrl);
if (mxl_fail(ret))
goto fail;
state->if_freq = state->cfg->if_freq;
fail:
return ret;
}
static int mxl1x1sf_tune_rf(struct dvb_frontend *fe, u32 freq, u8 bw)
{
struct mxl111sf_tuner_state *state = fe->tuner_priv;
static struct mxl111sf_reg_ctrl_info *reg_ctrl_array;
int ret;
u8 mxl_mode;
mxl_dbg("(freq = %d, bw = 0x%x)", freq, bw);
/* stop tune */
ret = mxl111sf_tuner_write_reg(state, START_TUNE_REG, 0);
if (mxl_fail(ret))
goto fail;
/* check device mode */
ret = mxl111sf_tuner_read_reg(state, MXL_MODE_REG, &mxl_mode);
if (mxl_fail(ret))
goto fail;
/* Fill out registers for channel tune */
reg_ctrl_array = mxl111sf_calc_phy_tune_regs(freq, bw);
if (!reg_ctrl_array)
return -EINVAL;
ret = mxl111sf_tuner_program_regs(state, reg_ctrl_array);
if (mxl_fail(ret))
goto fail;
if ((mxl_mode & MXL_DEV_MODE_MASK) == MXL_TUNER_MODE) {
/* IF tuner mode only */
mxl1x1sf_tuner_top_master_ctrl(state, 0);
mxl1x1sf_tuner_top_master_ctrl(state, 1);
mxl1x1sf_tuner_set_if_output_freq(state);
}
ret = mxl111sf_tuner_write_reg(state, START_TUNE_REG, 1);
if (mxl_fail(ret))
goto fail;
if (state->cfg->ant_hunt)
state->cfg->ant_hunt(fe);
fail:
return ret;
}
static int mxl1x1sf_tuner_get_lock_status(struct mxl111sf_tuner_state *state,
int *rf_synth_lock,
int *ref_synth_lock)
{
int ret;
u8 data;
*rf_synth_lock = 0;
*ref_synth_lock = 0;
ret = mxl111sf_tuner_read_reg(state, V6_RF_LOCK_STATUS_REG, &data);
if (mxl_fail(ret))
goto fail;
*ref_synth_lock = ((data & 0x03) == 0x03) ? 1 : 0;
*rf_synth_lock = ((data & 0x0c) == 0x0c) ? 1 : 0;
fail:
return ret;
}
#if 0
static int mxl1x1sf_tuner_loop_thru_ctrl(struct mxl111sf_tuner_state *state,
int onoff)
{
return mxl111sf_tuner_write_reg(state, V6_TUNER_LOOP_THRU_CTRL_REG,
onoff ? 1 : 0);
}
#endif
/* ------------------------------------------------------------------------ */
static int mxl111sf_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u32 delsys = c->delivery_system;
struct mxl111sf_tuner_state *state = fe->tuner_priv;
int ret;
u8 bw;
mxl_dbg("()");
switch (delsys) {
case SYS_ATSC:
case SYS_ATSCMH:
bw = 0; /* ATSC */
break;
case SYS_DVBC_ANNEX_B:
bw = 1; /* US CABLE */
break;
case SYS_DVBT:
switch (c->bandwidth_hz) {
case 6000000:
bw = 6;
break;
case 7000000:
bw = 7;
break;
case 8000000:
bw = 8;
break;
default:
err("%s: bandwidth not set!", __func__);
return -EINVAL;
}
break;
default:
err("%s: modulation type not supported!", __func__);
return -EINVAL;
}
ret = mxl1x1sf_tune_rf(fe, c->frequency, bw);
if (mxl_fail(ret))
goto fail;
state->frequency = c->frequency;
state->bandwidth = c->bandwidth_hz;
fail:
return ret;
}
/* ------------------------------------------------------------------------ */
#if 0
static int mxl111sf_tuner_init(struct dvb_frontend *fe)
{
struct mxl111sf_tuner_state *state = fe->tuner_priv;
int ret;
/* wake from standby handled by usb driver */
return ret;
}
static int mxl111sf_tuner_sleep(struct dvb_frontend *fe)
{
struct mxl111sf_tuner_state *state = fe->tuner_priv;
int ret;
/* enter standby mode handled by usb driver */
return ret;
}
#endif
/* ------------------------------------------------------------------------ */
static int mxl111sf_tuner_get_status(struct dvb_frontend *fe, u32 *status)
{
struct mxl111sf_tuner_state *state = fe->tuner_priv;
int rf_locked, ref_locked, ret;
*status = 0;
ret = mxl1x1sf_tuner_get_lock_status(state, &rf_locked, &ref_locked);
if (mxl_fail(ret))
goto fail;
mxl_info("%s%s", rf_locked ? "rf locked " : "",
ref_locked ? "ref locked" : "");
if ((rf_locked) || (ref_locked))
*status |= TUNER_STATUS_LOCKED;
fail:
return ret;
}
static int mxl111sf_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
{
struct mxl111sf_tuner_state *state = fe->tuner_priv;
u8 val1, val2;
int ret;
*strength = 0;
ret = mxl111sf_tuner_write_reg(state, 0x00, 0x02);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_tuner_read_reg(state, V6_DIG_RF_PWR_LSB_REG, &val1);
if (mxl_fail(ret))
goto fail;
ret = mxl111sf_tuner_read_reg(state, V6_DIG_RF_PWR_MSB_REG, &val2);
if (mxl_fail(ret))
goto fail;
*strength = val1 | ((val2 & 0x07) << 8);
fail:
ret = mxl111sf_tuner_write_reg(state, 0x00, 0x00);
mxl_fail(ret);
return ret;
}
/* ------------------------------------------------------------------------ */
static int mxl111sf_tuner_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct mxl111sf_tuner_state *state = fe->tuner_priv;
*frequency = state->frequency;
return 0;
}
static int mxl111sf_tuner_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
{
struct mxl111sf_tuner_state *state = fe->tuner_priv;
*bandwidth = state->bandwidth;
return 0;
}
static int mxl111sf_tuner_get_if_frequency(struct dvb_frontend *fe,
u32 *frequency)
{
struct mxl111sf_tuner_state *state = fe->tuner_priv;
*frequency = 0;
switch (state->if_freq) {
case MXL_IF_4_0: /* 4.0 MHz */
*frequency = 4000000;
break;
case MXL_IF_4_5: /* 4.5 MHz */
*frequency = 4500000;
break;
case MXL_IF_4_57: /* 4.57 MHz */
*frequency = 4570000;
break;
case MXL_IF_5_0: /* 5.0 MHz */
*frequency = 5000000;
break;
case MXL_IF_5_38: /* 5.38 MHz */
*frequency = 5380000;
break;
case MXL_IF_6_0: /* 6.0 MHz */
*frequency = 6000000;
break;
case MXL_IF_6_28: /* 6.28 MHz */
*frequency = 6280000;
break;
case MXL_IF_7_2: /* 7.2 MHz */
*frequency = 7200000;
break;
case MXL_IF_35_25: /* 35.25 MHz */
*frequency = 35250000;
break;
case MXL_IF_36: /* 36 MHz */
*frequency = 36000000;
break;
case MXL_IF_36_15: /* 36.15 MHz */
*frequency = 36150000;
break;
case MXL_IF_44: /* 44 MHz */
*frequency = 44000000;
break;
}
return 0;
}
static int mxl111sf_tuner_release(struct dvb_frontend *fe)
{
struct mxl111sf_tuner_state *state = fe->tuner_priv;
mxl_dbg("()");
kfree(state);
fe->tuner_priv = NULL;
return 0;
}
/* ------------------------------------------------------------------------- */
static struct dvb_tuner_ops mxl111sf_tuner_tuner_ops = {
.info = {
.name = "MaxLinear MxL111SF",
#if 0
.frequency_min = ,
.frequency_max = ,
.frequency_step = ,
#endif
},
#if 0
.init = mxl111sf_tuner_init,
.sleep = mxl111sf_tuner_sleep,
#endif
.set_params = mxl111sf_tuner_set_params,
.get_status = mxl111sf_tuner_get_status,
.get_rf_strength = mxl111sf_get_rf_strength,
.get_frequency = mxl111sf_tuner_get_frequency,
.get_bandwidth = mxl111sf_tuner_get_bandwidth,
.get_if_frequency = mxl111sf_tuner_get_if_frequency,
.release = mxl111sf_tuner_release,
};
struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
struct mxl111sf_state *mxl_state,
struct mxl111sf_tuner_config *cfg)
{
struct mxl111sf_tuner_state *state = NULL;
mxl_dbg("()");
state = kzalloc(sizeof(struct mxl111sf_tuner_state), GFP_KERNEL);
if (state == NULL)
return NULL;
state->mxl_state = mxl_state;
state->cfg = cfg;
memcpy(&fe->ops.tuner_ops, &mxl111sf_tuner_tuner_ops,
sizeof(struct dvb_tuner_ops));
fe->tuner_priv = state;
return fe;
}
EXPORT_SYMBOL_GPL(mxl111sf_tuner_attach);
MODULE_DESCRIPTION("MaxLinear MxL111SF CMOS tuner driver");
MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
Tesla-Redux-Devices/android_kernel_samsung_trlte | drivers/net/ethernet/packetengines/yellowfin.c | 2302 | 45672 | /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
/*
Written 1997-2001 by Donald Becker.
This software may be used and distributed according to the terms of
the GNU General Public License (GPL), incorporated herein by reference.
Drivers based on or derived from this code fall under the GPL and must
retain the authorship, copyright and license notice. This file is not
a complete program and may only be used when the entire operating
system is licensed under the GPL.
This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
It also supports the Symbios Logic version of the same chip core.
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
Annapolis MD 21403
Support and updates available at
http://www.scyld.com/network/yellowfin.html
[link no longer provides useful info -jgarzik]
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "yellowfin"
#define DRV_VERSION "2.1"
#define DRV_RELDATE "Sep 11, 2006"
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
static int mtu;
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
/* System-wide count of bogus-rx frames. */
static int bogus_rx;
static int dma_ctrl = 0x004A0263; /* Constrained by errata */
static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
#elif defined(YF_NEW) /* A future perfect board :->. */
static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
static int fifo_cfg = 0x0028;
#else
static const int dma_ctrl = 0x004A0263; /* Constrained by errata */
static const int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
#endif
/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
Setting to > 1514 effectively disables this feature. */
static int rx_copybreak;
/* Used to pass the media type, etc.
No media types are currently defined. These exist for driver
interoperability.
*/
#define MAX_UNITS 8 /* More are supported, limit only on options */
static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* Do ugly workaround for GX server chipset errata. */
static int gx_fix;
/* Operational parameters that are set at compile time. */
/* Keep the ring sizes a power of two for efficiency.
Making the Tx ring too long decreases the effectiveness of channel
bonding and packet priority.
There are no ill effects from too-large receive rings. */
#define TX_RING_SIZE 16
#define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
#define RX_RING_SIZE 64
#define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
#define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc)
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (2*HZ)
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
#define yellowfin_debug debug
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/unaligned.h>
#include <asm/io.h>
/* These identify the driver base version and may not be removed. */
static const char version[] =
KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
" (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
MODULE_LICENSE("GPL");
module_param(max_interrupt_work, int, 0);
module_param(mtu, int, 0);
module_param(debug, int, 0);
module_param(rx_copybreak, int, 0);
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
module_param(gx_fix, int, 0);
MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
/*
Theory of Operation
I. Board Compatibility
This device driver is designed for the Packet Engines "Yellowfin" Gigabit
Ethernet adapter. The G-NIC 64-bit PCI card is supported, as well as the
Symbios 53C885E dual function chip.
II. Board-specific settings
PCI bus devices are configured by the system at boot time, so no jumpers
need to be set on the board. The system BIOS preferably should assign the
PCI INTA signal to an otherwise unused system IRQ line.
Note: Kernel versions earlier than 1.3.73 do not support shared PCI
interrupt lines.
III. Driver operation
IIIa. Ring buffers
The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
This is a descriptor list scheme similar to that used by the EEPro100 and
Tulip. This driver uses two statically allocated fixed-size descriptor lists
formed into rings by a branch from the final descriptor to the beginning of
the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
The driver allocates full frame size skbuffs for the Rx ring buffers at
open() time and passes the skb->data field to the Yellowfin as receive data
buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
a fresh skbuff is allocated and the frame is copied to the new skbuff.
When the incoming frame is larger, the skbuff is passed directly up the
protocol stack and replaced by a newly allocated skbuff.
The RX_COPYBREAK value is chosen to trade-off the memory wasted by
using a full-sized skbuff for small frames vs. the copying costs of larger
frames. For small frames the copying cost is negligible (esp. considering
that we are pre-loading the cache with immediately useful header
information). For large frames the copying cost is non-trivial, and the
larger copy might flush the cache of useful data.
IIIC. Synchronization
The driver runs as two independent, single-threaded flows of control. One
is the send-packet routine, which enforces single-threaded use by the
dev->tbusy flag. The other thread is the interrupt handler, which is single
threaded by the hardware and other software.
The send packet thread has partial control over the Tx ring and 'dev->tbusy'
flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
queue slot is empty, it clears the tbusy flag when finished otherwise it sets
the 'yp->tx_full' flag.
The interrupt handler has exclusive control over the Rx ring and records stats
from the Tx ring. After reaping the stats, it marks the Tx queue entry as
empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
clears both the tx_full and tbusy flags.
IV. Notes
Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
and an AlphaStation to verifty the Alpha port!
IVb. References
Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
Data Manual v3.0
http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
IVc. Errata
See Packet Engines confidential appendix (prototype chips only).
*/
enum capability_flags {
HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
HasMACAddrBug=32, /* Only on early revs. */
DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
};
/* The PCI I/O space extent. */
enum {
YELLOWFIN_SIZE = 0x100,
};
struct pci_id_info {
const char *name;
struct match_info {
int pci, pci_mask, subsystem, subsystem_mask;
int revision, revision_mask; /* Only 8 bits. */
} id;
int drv_flags; /* Driver use, intended as capability flags. */
};
static const struct pci_id_info pci_id_tbl[] = {
{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
HasMII | DontUseEeprom },
{ }
};
static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ }
};
MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
/* Offsets to the Yellowfin registers. Various sizes and alignments. */
enum yellowfin_offsets {
TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
MII_Status=0xAE,
RxDepth=0xB8, FlowCtrl=0xBC,
AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
EEFeature=0xF5,
};
/* The Yellowfin Rx and Tx buffer descriptors.
Elements are written as 32 bit for endian portability. */
struct yellowfin_desc {
__le32 dbdma_cmd;
__le32 addr;
__le32 branch_addr;
__le32 result_status;
};
struct tx_status_words {
#ifdef __BIG_ENDIAN
u16 tx_errs;
u16 tx_cnt;
u16 paused;
u16 total_tx_cnt;
#else /* Little endian chips. */
u16 tx_cnt;
u16 tx_errs;
u16 total_tx_cnt;
u16 paused;
#endif /* __BIG_ENDIAN */
};
/* Bits in yellowfin_desc.cmd */
enum desc_cmd_bits {
CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
CMD_NOP=0x60000000, CMD_STOP=0x70000000,
BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
BRANCH_IFTRUE=0x040000,
};
/* Bits in yellowfin_desc.status */
enum desc_status_bits { RX_EOP=0x0040, };
/* Bits in the interrupt status/mask registers. */
enum intr_status_bits {
IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
IntrEarlyRx=0x100, IntrWakeup=0x200, };
#define PRIV_ALIGN 31 /* Required alignment mask */
#define MII_CNT 4
struct yellowfin_private {
/* Descriptor rings first for alignment.
Tx requires a second descriptor for status. */
struct yellowfin_desc *rx_ring;
struct yellowfin_desc *tx_ring;
struct sk_buff* rx_skbuff[RX_RING_SIZE];
struct sk_buff* tx_skbuff[TX_RING_SIZE];
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
struct tx_status_words *tx_status;
dma_addr_t tx_status_dma;
struct timer_list timer; /* Media selection timer. */
/* Frequently used and paired value: keep adjacent for cache effect. */
int chip_id, drv_flags;
struct pci_dev *pci_dev;
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
unsigned int rx_buf_sz; /* Based on MTU+slack. */
struct tx_status_words *tx_tail_desc;
unsigned int cur_tx, dirty_tx;
int tx_threshold;
unsigned int tx_full:1; /* The Tx queue is full. */
unsigned int full_duplex:1; /* Full-duplex operation requested. */
unsigned int duplex_lock:1;
unsigned int medialock:1; /* Do not sense media. */
unsigned int default_port:4; /* Last dev->if_port value. */
/* MII transceiver section. */
int mii_cnt; /* MII device addresses. */
u16 advertising; /* NWay media advertisement */
unsigned char phys[MII_CNT]; /* MII device addresses, only first one used */
spinlock_t lock;
void __iomem *base;
};
static int read_eeprom(void __iomem *ioaddr, int location);
static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int yellowfin_open(struct net_device *dev);
static void yellowfin_timer(unsigned long data);
static void yellowfin_tx_timeout(struct net_device *dev);
static int yellowfin_init_ring(struct net_device *dev);
static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
static int yellowfin_rx(struct net_device *dev);
static void yellowfin_error(struct net_device *dev, int intr_status);
static int yellowfin_close(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
static const struct ethtool_ops ethtool_ops;
static const struct net_device_ops netdev_ops = {
.ndo_open = yellowfin_open,
.ndo_stop = yellowfin_close,
.ndo_start_xmit = yellowfin_start_xmit,
.ndo_set_rx_mode = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = yellowfin_tx_timeout,
};
static int yellowfin_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev;
struct yellowfin_private *np;
int irq;
int chip_idx = ent->driver_data;
static int find_cnt;
void __iomem *ioaddr;
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
int drv_flags = pci_id_tbl[chip_idx].drv_flags;
void *ring_space;
dma_addr_t ring_dma;
#ifdef USE_IO_OPS
int bar = 0;
#else
int bar = 1;
#endif
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
static int printed_version;
if (!printed_version++)
printk(version);
#endif
i = pci_enable_device(pdev);
if (i) return i;
dev = alloc_etherdev(sizeof(*np));
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
np = netdev_priv(dev);
if (pci_request_regions(pdev, DRV_NAME))
goto err_out_free_netdev;
pci_set_master (pdev);
ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
if (!ioaddr)
goto err_out_free_res;
irq = pdev->irq;
if (drv_flags & DontUseEeprom)
for (i = 0; i < 6; i++)
dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
else {
int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
}
/* Reset the chip. */
iowrite32(0x80000000, ioaddr + DMACtrl);
pci_set_drvdata(pdev, dev);
spin_lock_init(&np->lock);
np->pci_dev = pdev;
np->chip_id = chip_idx;
np->drv_flags = drv_flags;
np->base = ioaddr;
ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_cleardev;
np->tx_ring = ring_space;
np->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
np->rx_ring = ring_space;
np->rx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_rx;
np->tx_status = ring_space;
np->tx_status_dma = ring_dma;
if (dev->mem_start)
option = dev->mem_start;
/* The lower four bits are the media type. */
if (option > 0) {
if (option & 0x200)
np->full_duplex = 1;
np->default_port = option & 15;
if (np->default_port)
np->medialock = 1;
}
if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
np->full_duplex = 1;
if (np->full_duplex)
np->duplex_lock = 1;
/* The Yellowfin-specific entries in the device structure. */
dev->netdev_ops = &netdev_ops;
SET_ETHTOOL_OPS(dev, ðtool_ops);
dev->watchdog_timeo = TX_TIMEOUT;
if (mtu)
dev->mtu = mtu;
i = register_netdev(dev);
if (i)
goto err_out_unmap_status;
netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
pci_id_tbl[chip_idx].name,
ioread32(ioaddr + ChipRev), ioaddr,
dev->dev_addr, irq);
if (np->drv_flags & HasMII) {
int phy, phy_idx = 0;
for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
int mii_status = mdio_read(ioaddr, phy, 1);
if (mii_status != 0xffff && mii_status != 0x0000) {
np->phys[phy_idx++] = phy;
np->advertising = mdio_read(ioaddr, phy, 4);
netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
phy, mii_status, np->advertising);
}
}
np->mii_cnt = phy_idx;
}
find_cnt++;
return 0;
err_out_unmap_status:
pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
np->tx_status_dma);
err_out_unmap_rx:
pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
err_out_unmap_tx:
pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
err_out_cleardev:
pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
err_out_free_netdev:
free_netdev (dev);
return -ENODEV;
}
static int read_eeprom(void __iomem *ioaddr, int location)
{
int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
iowrite8(location, ioaddr + EEAddr);
iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
while ((ioread8(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
;
return ioread8(ioaddr + EERead);
}
/* MII Managemen Data I/O accesses.
These routines assume the MDIO controller is idle, and do not exit until
the command is finished. */
static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
{
int i;
iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
iowrite16(1, ioaddr + MII_Cmd);
for (i = 10000; i >= 0; i--)
if ((ioread16(ioaddr + MII_Status) & 1) == 0)
break;
return ioread16(ioaddr + MII_Rd_Data);
}
static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
{
int i;
iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
iowrite16(value, ioaddr + MII_Wr_Data);
/* Wait for the command to finish. */
for (i = 10000; i >= 0; i--)
if ((ioread16(ioaddr + MII_Status) & 1) == 0)
break;
}
static int yellowfin_open(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
const int irq = yp->pci_dev->irq;
void __iomem *ioaddr = yp->base;
int i, rc;
/* Reset the chip. */
iowrite32(0x80000000, ioaddr + DMACtrl);
rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
rc = yellowfin_init_ring(dev);
if (rc < 0)
goto err_free_irq;
iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
for (i = 0; i < 6; i++)
iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
/* Set up various condition 'select' registers.
There are no options here. */
iowrite32(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
iowrite32(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
iowrite32(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
iowrite32(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
iowrite32(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
iowrite32(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
/* Initialize other registers: with so many this eventually this will
converted to an offset/value list. */
iowrite32(dma_ctrl, ioaddr + DMACtrl);
iowrite16(fifo_cfg, ioaddr + FIFOcfg);
/* Enable automatic generation of flow control frames, period 0xffff. */
iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
yp->tx_threshold = 32;
iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
if (dev->if_port == 0)
dev->if_port = yp->default_port;
netif_start_queue(dev);
/* Setting the Rx mode will start the Rx process. */
if (yp->drv_flags & IsGigabit) {
/* We are always in full-duplex mode with gigabit! */
yp->full_duplex = 1;
iowrite16(0x01CF, ioaddr + Cnfg);
} else {
iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
iowrite16(0x1018, ioaddr + FrameGap1);
iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
}
set_rx_mode(dev);
/* Enable interrupts by setting the interrupt mask. */
iowrite16(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
iowrite16(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
iowrite32(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
iowrite32(0x80008000, ioaddr + TxCtrl);
if (yellowfin_debug > 2) {
netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
}
/* Set the timer to check for link beat. */
init_timer(&yp->timer);
yp->timer.expires = jiffies + 3*HZ;
yp->timer.data = (unsigned long)dev;
yp->timer.function = yellowfin_timer; /* timer handler */
add_timer(&yp->timer);
out:
return rc;
err_free_irq:
free_irq(irq, dev);
goto out;
}
static void yellowfin_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
int next_tick = 60*HZ;
if (yellowfin_debug > 3) {
netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
ioread16(ioaddr + IntrStatus));
}
if (yp->mii_cnt) {
int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
int negotiated = lpa & yp->advertising;
if (yellowfin_debug > 1)
netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
yp->phys[0], bmsr, lpa);
yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
if (bmsr & BMSR_LSTATUS)
next_tick = 60*HZ;
else
next_tick = 3*HZ;
}
yp->timer.expires = jiffies + next_tick;
add_timer(&yp->timer);
}
static void yellowfin_tx_timeout(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
yp->cur_tx, yp->dirty_tx,
ioread32(ioaddr + TxStatus),
ioread32(ioaddr + RxStatus));
/* Note: these should be KERN_DEBUG. */
if (yellowfin_debug) {
int i;
pr_warning(" Rx ring %p: ", yp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
pr_cont(" %08x", yp->rx_ring[i].result_status);
pr_cont("\n");
pr_warning(" Tx ring %p: ", yp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
pr_cont(" %04x /%08x",
yp->tx_status[i].tx_errs,
yp->tx_ring[i].result_status);
pr_cont("\n");
}
/* If the hardware is found to hang regularly, we will update the code
to reinitialize the chip here. */
dev->if_port = 0;
/* Wake the potentially-idle transmit channel. */
iowrite32(0x10001000, yp->base + TxCtrl);
if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
netif_wake_queue (dev); /* Typical path */
dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static int yellowfin_init_ring(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
int i, j;
yp->tx_full = 0;
yp->cur_rx = yp->cur_tx = 0;
yp->dirty_tx = 0;
yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
for (i = 0; i < RX_RING_SIZE; i++) {
yp->rx_ring[i].dbdma_cmd =
cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
}
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
yp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header. */
yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
}
if (i != RX_RING_SIZE) {
for (j = 0; j < i; j++)
dev_kfree_skb(yp->rx_skbuff[j]);
return -ENOMEM;
}
yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
#define NO_TXSTATS
#ifdef NO_TXSTATS
/* In this mode the Tx ring needs only a single descriptor. */
for (i = 0; i < TX_RING_SIZE; i++) {
yp->tx_skbuff[i] = NULL;
yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
}
/* Wrap ring */
yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
#else
{
/* Tx ring needs a pair of descriptors, the second for the status. */
for (i = 0; i < TX_RING_SIZE; i++) {
j = 2*i;
yp->tx_skbuff[i] = 0;
/* Branch on Tx error. */
yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
(j+1)*sizeof(struct yellowfin_desc));
j++;
if (yp->flags & FullTxStatus) {
yp->tx_ring[j].dbdma_cmd =
cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
i*sizeof(struct tx_status_words));
} else {
/* Symbios chips write only tx_errs word. */
yp->tx_ring[j].dbdma_cmd =
cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
yp->tx_ring[j].request_cnt = 2;
/* Om pade ummmmm... */
yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
i*sizeof(struct tx_status_words) +
&(yp->tx_status[0].tx_errs) -
&(yp->tx_status[0]));
}
yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
}
/* Wrap ring */
yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
}
#endif
yp->tx_tail_desc = &yp->tx_status[0];
return 0;
}
static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
unsigned entry;
int len = skb->len;
netif_stop_queue (dev);
/* Note: Ordering is important here, set the field with the
"ownership" bit last, and only then increment cur_tx. */
/* Calculate the next Tx descriptor entry. */
entry = yp->cur_tx % TX_RING_SIZE;
if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
/* Fix GX chipset errata. */
if (cacheline_end > 24 || cacheline_end == 0) {
len = skb->len + 32 - cacheline_end + 1;
if (skb_padto(skb, len)) {
yp->tx_skbuff[entry] = NULL;
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
}
}
yp->tx_skbuff[entry] = skb;
#ifdef NO_TXSTATS
yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
skb->data, len, PCI_DMA_TODEVICE));
yp->tx_ring[entry].result_status = 0;
if (entry >= TX_RING_SIZE-1) {
/* New stop command. */
yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
} else {
yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->tx_ring[entry].dbdma_cmd =
cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
}
yp->cur_tx++;
#else
yp->tx_ring[entry<<1].request_cnt = len;
yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
skb->data, len, PCI_DMA_TODEVICE));
/* The input_last (status-write) command is constant, but we must
rewrite the subsequent 'stop' command. */
yp->cur_tx++;
{
unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
}
/* Final step -- overwrite the old 'stop' command. */
yp->tx_ring[entry<<1].dbdma_cmd =
cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
CMD_TX_PKT | BRANCH_IFTRUE) | len);
#endif
/* Non-x86 Todo: explicitly flush cache lines here. */
/* Wake the potentially-idle transmit channel. */
iowrite32(0x10001000, yp->base + TxCtrl);
if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
netif_start_queue (dev); /* Typical path */
else
yp->tx_full = 1;
if (yellowfin_debug > 4) {
netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
yp->cur_tx, entry);
}
return NETDEV_TX_OK;
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct yellowfin_private *yp;
void __iomem *ioaddr;
int boguscnt = max_interrupt_work;
unsigned int handled = 0;
yp = netdev_priv(dev);
ioaddr = yp->base;
spin_lock (&yp->lock);
do {
u16 intr_status = ioread16(ioaddr + IntrClear);
if (yellowfin_debug > 4)
netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
intr_status);
if (intr_status == 0)
break;
handled = 1;
if (intr_status & (IntrRxDone | IntrEarlyRx)) {
yellowfin_rx(dev);
iowrite32(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
}
#ifdef NO_TXSTATS
for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
int entry = yp->dirty_tx % TX_RING_SIZE;
struct sk_buff *skb;
if (yp->tx_ring[entry].result_status == 0)
break;
skb = yp->tx_skbuff[entry];
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
/* Free the original skb. */
pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
yp->tx_skbuff[entry] = NULL;
}
if (yp->tx_full &&
yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
/* The ring is no longer full, clear tbusy. */
yp->tx_full = 0;
netif_wake_queue(dev);
}
#else
if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
unsigned dirty_tx = yp->dirty_tx;
for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
dirty_tx++) {
/* Todo: optimize this. */
int entry = dirty_tx % TX_RING_SIZE;
u16 tx_errs = yp->tx_status[entry].tx_errs;
struct sk_buff *skb;
#ifndef final_version
if (yellowfin_debug > 5)
netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
entry,
yp->tx_status[entry].tx_cnt,
yp->tx_status[entry].tx_errs,
yp->tx_status[entry].total_tx_cnt,
yp->tx_status[entry].paused);
#endif
if (tx_errs == 0)
break; /* It still hasn't been Txed */
skb = yp->tx_skbuff[entry];
if (tx_errs & 0xF810) {
/* There was an major error, log it. */
#ifndef final_version
if (yellowfin_debug > 1)
netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
tx_errs);
#endif
dev->stats.tx_errors++;
if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
} else {
#ifndef final_version
if (yellowfin_debug > 4)
netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
tx_errs);
#endif
dev->stats.tx_bytes += skb->len;
dev->stats.collisions += tx_errs & 15;
dev->stats.tx_packets++;
}
/* Free the original skb. */
pci_unmap_single(yp->pci_dev,
yp->tx_ring[entry<<1].addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
yp->tx_skbuff[entry] = 0;
/* Mark status as empty. */
yp->tx_status[entry].tx_errs = 0;
}
#ifndef final_version
if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
dirty_tx, yp->cur_tx, yp->tx_full);
dirty_tx += TX_RING_SIZE;
}
#endif
if (yp->tx_full &&
yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
/* The ring is no longer full, clear tbusy. */
yp->tx_full = 0;
netif_wake_queue(dev);
}
yp->dirty_tx = dirty_tx;
yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
}
#endif
/* Log errors and other uncommon events. */
if (intr_status & 0x2ee) /* Abnormal error summary. */
yellowfin_error(dev, intr_status);
if (--boguscnt < 0) {
netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
intr_status);
break;
}
} while (1);
if (yellowfin_debug > 3)
netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
ioread16(ioaddr + IntrStatus));
spin_unlock (&yp->lock);
return IRQ_RETVAL(handled);
}
/* This routine is logically part of the interrupt handler, but separated
for clarity and better register allocation. */
static int yellowfin_rx(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
int entry = yp->cur_rx % RX_RING_SIZE;
int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
if (yellowfin_debug > 4) {
printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
entry, yp->rx_ring[entry].result_status);
printk(KERN_DEBUG " #%d desc. %08x %08x %08x\n",
entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
yp->rx_ring[entry].result_status);
}
/* If EOP is set on the next entry, it's a new packet. Send it up. */
while (1) {
struct yellowfin_desc *desc = &yp->rx_ring[entry];
struct sk_buff *rx_skb = yp->rx_skbuff[entry];
s16 frame_status;
u16 desc_status;
int data_size;
u8 *buf_addr;
if(!desc->result_status)
break;
pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
desc_status = le32_to_cpu(desc->result_status) >> 16;
buf_addr = rx_skb->data;
data_size = (le32_to_cpu(desc->dbdma_cmd) -
le32_to_cpu(desc->result_status)) & 0xffff;
frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
if (yellowfin_debug > 4)
printk(KERN_DEBUG " %s() status was %04x\n",
__func__, frame_status);
if (--boguscnt < 0)
break;
if ( ! (desc_status & RX_EOP)) {
if (data_size != 0)
netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
desc_status, data_size);
dev->stats.rx_length_errors++;
} else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
/* There was a error. */
if (yellowfin_debug > 3)
printk(KERN_DEBUG " %s() Rx error was %04x\n",
__func__, frame_status);
dev->stats.rx_errors++;
if (frame_status & 0x0060) dev->stats.rx_length_errors++;
if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
if (frame_status < 0) dev->stats.rx_dropped++;
} else if ( !(yp->drv_flags & IsGigabit) &&
((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
u8 status1 = buf_addr[data_size-2];
u8 status2 = buf_addr[data_size-1];
dev->stats.rx_errors++;
if (status1 & 0xC0) dev->stats.rx_length_errors++;
if (status2 & 0x03) dev->stats.rx_frame_errors++;
if (status2 & 0x04) dev->stats.rx_crc_errors++;
if (status2 & 0x80) dev->stats.rx_dropped++;
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
} else if ((yp->flags & HasMACAddrBug) &&
memcmp(le32_to_cpu(yp->rx_ring_dma +
entry*sizeof(struct yellowfin_desc)),
dev->dev_addr, 6) != 0 &&
memcmp(le32_to_cpu(yp->rx_ring_dma +
entry*sizeof(struct yellowfin_desc)),
"\377\377\377\377\377\377", 6) != 0) {
if (bogus_rx++ == 0)
netdev_warn(dev, "Bad frame to %pM\n",
buf_addr);
#endif
} else {
struct sk_buff *skb;
int pkt_len = data_size -
(yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
/* To verify: Yellowfin Length should omit the CRC! */
#ifndef final_version
if (yellowfin_debug > 4)
printk(KERN_DEBUG " %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
__func__, pkt_len, data_size, boguscnt);
#endif
/* Check if the packet is long enough to just pass up the skbuff
without copying to a properly sized skbuff. */
if (pkt_len > rx_copybreak) {
skb_put(skb = rx_skb, pkt_len);
pci_unmap_single(yp->pci_dev,
le32_to_cpu(yp->rx_ring[entry].addr),
yp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
yp->rx_skbuff[entry] = NULL;
} else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header */
skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(yp->pci_dev,
le32_to_cpu(desc->addr),
yp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
}
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
}
entry = (++yp->cur_rx) % RX_RING_SIZE;
}
/* Refill the Rx ring buffers. */
for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
entry = yp->dirty_rx % RX_RING_SIZE;
if (yp->rx_skbuff[entry] == NULL) {
struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
if (skb == NULL)
break; /* Better luck next round. */
yp->rx_skbuff[entry] = skb;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
}
yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
if (entry != 0)
yp->rx_ring[entry - 1].dbdma_cmd =
cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
else
yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
| yp->rx_buf_sz);
}
return 0;
}
static void yellowfin_error(struct net_device *dev, int intr_status)
{
netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
/* Hmmmmm, it's not clear what to do here. */
if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
dev->stats.tx_errors++;
if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
dev->stats.rx_errors++;
}
static int yellowfin_close(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
int i;
netif_stop_queue (dev);
if (yellowfin_debug > 1) {
netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
ioread16(ioaddr + TxStatus),
ioread16(ioaddr + RxStatus),
ioread16(ioaddr + IntrStatus));
netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
yp->cur_tx, yp->dirty_tx,
yp->cur_rx, yp->dirty_rx);
}
/* Disable interrupts by clearing the interrupt mask. */
iowrite16(0x0000, ioaddr + IntrEnb);
/* Stop the chip's Tx and Rx processes. */
iowrite32(0x80000000, ioaddr + RxCtrl);
iowrite32(0x80000000, ioaddr + TxCtrl);
del_timer(&yp->timer);
#if defined(__i386__)
if (yellowfin_debug > 2) {
printk(KERN_DEBUG " Tx ring at %08llx:\n",
(unsigned long long)yp->tx_ring_dma);
for (i = 0; i < TX_RING_SIZE*2; i++)
printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
for (i = 0; i < TX_RING_SIZE; i++)
printk(KERN_DEBUG " #%d status %04x %04x %04x %04x\n",
i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
printk(KERN_DEBUG " Rx ring %08llx:\n",
(unsigned long long)yp->rx_ring_dma);
for (i = 0; i < RX_RING_SIZE; i++) {
printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
yp->rx_ring[i].result_status);
if (yellowfin_debug > 6) {
if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
int j;
printk(KERN_DEBUG);
for (j = 0; j < 0x50; j++)
pr_cont(" %04x",
get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
pr_cont("\n");
}
}
}
}
#endif /* __i386__ debugging only */
free_irq(yp->pci_dev->irq, dev);
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (yp->rx_skbuff[i]) {
dev_kfree_skb(yp->rx_skbuff[i]);
}
yp->rx_skbuff[i] = NULL;
}
for (i = 0; i < TX_RING_SIZE; i++) {
if (yp->tx_skbuff[i])
dev_kfree_skb(yp->tx_skbuff[i]);
yp->tx_skbuff[i] = NULL;
}
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
if (yellowfin_debug > 0) {
netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
bogus_rx);
}
#endif
return 0;
}
/* Set or clear the multicast filter for this adaptor. */
static void set_rx_mode(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
u16 cfg_value = ioread16(ioaddr + Cnfg);
/* Stop the Rx process to change any value. */
iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
iowrite16(0x000F, ioaddr + AddrMode);
} else if ((netdev_mc_count(dev) > 64) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well, or accept all multicasts. */
iowrite16(0x000B, ioaddr + AddrMode);
} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
struct netdev_hw_addr *ha;
u16 hash_table[4];
int i;
memset(hash_table, 0, sizeof(hash_table));
netdev_for_each_mc_addr(ha, dev) {
unsigned int bit;
/* Due to a bug in the early chip versions, multiple filter
slots must be set for each address. */
if (yp->drv_flags & HasMulticastBug) {
bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
}
bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
}
/* Copy the hash table to the chip. */
for (i = 0; i < 4; i++)
iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
iowrite16(0x0003, ioaddr + AddrMode);
} else { /* Normal, unicast/broadcast-only mode. */
iowrite16(0x0001, ioaddr + AddrMode);
}
/* Restart the Rx process. */
iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
}
static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct yellowfin_private *np = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = yellowfin_get_drvinfo
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct yellowfin_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
struct mii_ioctl_data *data = if_mii(rq);
switch(cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = np->phys[0] & 0x1f;
/* Fall Through */
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
return 0;
case SIOCSMIIREG: /* Write MII PHY register. */
if (data->phy_id == np->phys[0]) {
u16 value = data->val_in;
switch (data->reg_num) {
case 0:
/* Check for autonegotiation on or reset. */
np->medialock = (value & 0x9000) ? 0 : 1;
if (np->medialock)
np->full_duplex = (value & 0x0100) ? 1 : 0;
break;
case 4: np->advertising = value; break;
}
/* Perhaps check_duplex(dev), depending on chip semantics. */
}
mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
return 0;
default:
return -EOPNOTSUPP;
}
}
static void yellowfin_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct yellowfin_private *np;
BUG_ON(!dev);
np = netdev_priv(dev);
pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
np->tx_status_dma);
pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
unregister_netdev (dev);
pci_iounmap(pdev, np->base);
pci_release_regions (pdev);
free_netdev (dev);
pci_set_drvdata(pdev, NULL);
}
static struct pci_driver yellowfin_driver = {
.name = DRV_NAME,
.id_table = yellowfin_pci_tbl,
.probe = yellowfin_init_one,
.remove = yellowfin_remove_one,
};
static int __init yellowfin_init (void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
printk(version);
#endif
return pci_register_driver(&yellowfin_driver);
}
static void __exit yellowfin_cleanup (void)
{
pci_unregister_driver (&yellowfin_driver);
}
module_init(yellowfin_init);
module_exit(yellowfin_cleanup);
| gpl-2.0 |
Split-Screen/android_kernel_oneplus_msm8996 | sound/isa/wavefront/wavefront_fx.c | 3326 | 6309 | /*
* Copyright (c) 1998-2002 by Paul Davis <pbd@op.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/io.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include <sound/core.h>
#include <sound/snd_wavefront.h>
#include <sound/initval.h>
/* Control bits for the Load Control Register
*/
#define FX_LSB_TRANSFER 0x01 /* transfer after DSP LSB byte written */
#define FX_MSB_TRANSFER 0x02 /* transfer after DSP MSB byte written */
#define FX_AUTO_INCR 0x04 /* auto-increment DSP address after transfer */
#define WAIT_IDLE 0xff
static int
wavefront_fx_idle (snd_wavefront_t *dev)
{
int i;
unsigned int x = 0x80;
for (i = 0; i < 1000; i++) {
x = inb (dev->fx_status);
if ((x & 0x80) == 0) {
break;
}
}
if (x & 0x80) {
snd_printk ("FX device never idle.\n");
return 0;
}
return (1);
}
static void
wavefront_fx_mute (snd_wavefront_t *dev, int onoff)
{
if (!wavefront_fx_idle(dev)) {
return;
}
outb (onoff ? 0x02 : 0x00, dev->fx_op);
}
static int
wavefront_fx_memset (snd_wavefront_t *dev,
int page,
int addr,
int cnt,
unsigned short *data)
{
if (page < 0 || page > 7) {
snd_printk ("FX memset: "
"page must be >= 0 and <= 7\n");
return -(EINVAL);
}
if (addr < 0 || addr > 0x7f) {
snd_printk ("FX memset: "
"addr must be >= 0 and <= 7f\n");
return -(EINVAL);
}
if (cnt == 1) {
outb (FX_LSB_TRANSFER, dev->fx_lcr);
outb (page, dev->fx_dsp_page);
outb (addr, dev->fx_dsp_addr);
outb ((data[0] >> 8), dev->fx_dsp_msb);
outb ((data[0] & 0xff), dev->fx_dsp_lsb);
snd_printk ("FX: addr %d:%x set to 0x%x\n",
page, addr, data[0]);
} else {
int i;
outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev->fx_lcr);
outb (page, dev->fx_dsp_page);
outb (addr, dev->fx_dsp_addr);
for (i = 0; i < cnt; i++) {
outb ((data[i] >> 8), dev->fx_dsp_msb);
outb ((data[i] & 0xff), dev->fx_dsp_lsb);
if (!wavefront_fx_idle (dev)) {
break;
}
}
if (i != cnt) {
snd_printk ("FX memset "
"(0x%x, 0x%x, 0x%lx, %d) incomplete\n",
page, addr, (unsigned long) data, cnt);
return -(EIO);
}
}
return 0;
}
int
snd_wavefront_fx_detect (snd_wavefront_t *dev)
{
/* This is a crude check, but its the best one I have for now.
Certainly on the Maui and the Tropez, wavefront_fx_idle() will
report "never idle", which suggests that this test should
work OK.
*/
if (inb (dev->fx_status) & 0x80) {
snd_printk ("Hmm, probably a Maui or Tropez.\n");
return -1;
}
return 0;
}
int
snd_wavefront_fx_open (struct snd_hwdep *hw, struct file *file)
{
if (!try_module_get(hw->card->module))
return -EFAULT;
file->private_data = hw;
return 0;
}
int
snd_wavefront_fx_release (struct snd_hwdep *hw, struct file *file)
{
module_put(hw->card->module);
return 0;
}
int
snd_wavefront_fx_ioctl (struct snd_hwdep *sdev, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct snd_card *card;
snd_wavefront_card_t *acard;
snd_wavefront_t *dev;
wavefront_fx_info r;
unsigned short *page_data = NULL;
unsigned short *pd;
int err = 0;
card = sdev->card;
if (snd_BUG_ON(!card))
return -ENODEV;
if (snd_BUG_ON(!card->private_data))
return -ENODEV;
acard = card->private_data;
dev = &acard->wavefront;
if (copy_from_user (&r, (void __user *)arg, sizeof (wavefront_fx_info)))
return -EFAULT;
switch (r.request) {
case WFFX_MUTE:
wavefront_fx_mute (dev, r.data[0]);
return -EIO;
case WFFX_MEMSET:
if (r.data[2] <= 0) {
snd_printk ("cannot write "
"<= 0 bytes to FX\n");
return -EIO;
} else if (r.data[2] == 1) {
pd = (unsigned short *) &r.data[3];
} else {
if (r.data[2] > 256) {
snd_printk ("cannot write "
"> 512 bytes to FX\n");
return -EIO;
}
page_data = memdup_user((unsigned char __user *)
r.data[3],
r.data[2] * sizeof(short));
if (IS_ERR(page_data))
return PTR_ERR(page_data);
pd = page_data;
}
err = wavefront_fx_memset (dev,
r.data[0], /* page */
r.data[1], /* addr */
r.data[2], /* cnt */
pd);
kfree(page_data);
break;
default:
snd_printk ("FX: ioctl %d not yet supported\n",
r.request);
return -ENOTTY;
}
return err;
}
/* YSS225 initialization.
This code was developed using DOSEMU. The Turtle Beach SETUPSND
utility was run with I/O tracing in DOSEMU enabled, and a reconstruction
of the port I/O done, using the Yamaha faxback document as a guide
to add more logic to the code. Its really pretty weird.
This is the approach of just dumping the whole I/O
sequence as a series of port/value pairs and a simple loop
that outputs it.
*/
int
snd_wavefront_fx_start (snd_wavefront_t *dev)
{
unsigned int i;
int err;
const struct firmware *firmware = NULL;
if (dev->fx_initialized)
return 0;
err = request_firmware(&firmware, "yamaha/yss225_registers.bin",
dev->card->dev);
if (err < 0) {
err = -1;
goto out;
}
for (i = 0; i + 1 < firmware->size; i += 2) {
if (firmware->data[i] >= 8 && firmware->data[i] < 16) {
outb(firmware->data[i + 1],
dev->base + firmware->data[i]);
} else if (firmware->data[i] == WAIT_IDLE) {
if (!wavefront_fx_idle(dev)) {
err = -1;
goto out;
}
} else {
snd_printk(KERN_ERR "invalid address"
" in register data\n");
err = -1;
goto out;
}
}
dev->fx_initialized = 1;
err = 0;
out:
release_firmware(firmware);
return err;
}
MODULE_FIRMWARE("yamaha/yss225_registers.bin");
| gpl-2.0 |
Howpathetic/Revolution_kernel | arch/mips/kernel/signal.c | 4350 | 17911 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/cache.h>
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/personality.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/compiler.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/tracehook.h>
#include <asm/abi.h>
#include <asm/asm.h>
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#include <asm/fpu.h>
#include <asm/sim.h>
#include <asm/ucontext.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
#include <asm/vdso.h>
#include <asm/dsp.h>
#include "signal-common.h"
static int (*save_fp_context)(struct sigcontext __user *sc);
static int (*restore_fp_context)(struct sigcontext __user *sc);
extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
struct sigframe {
u32 sf_ass[4]; /* argument save space for o32 */
u32 sf_pad[2]; /* Was: signal trampoline */
struct sigcontext sf_sc;
sigset_t sf_mask;
};
struct rt_sigframe {
u32 rs_ass[4]; /* argument save space for o32 */
u32 rs_pad[2]; /* Was: signal trampoline */
struct siginfo rs_info;
struct ucontext rs_uc;
};
/*
* Helper routines
*/
static int protected_save_fp_context(struct sigcontext __user *sc)
{
int err;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(1);
err = save_fp_context(sc); /* this might fail */
unlock_fpu_owner();
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __put_user(0, &sc->sc_fpregs[0]) |
__put_user(0, &sc->sc_fpregs[31]) |
__put_user(0, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
return err;
}
static int protected_restore_fp_context(struct sigcontext __user *sc)
{
int err, tmp __maybe_unused;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(0);
err = restore_fp_context(sc); /* this might fail */
unlock_fpu_owner();
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __get_user(tmp, &sc->sc_fpregs[0]) |
__get_user(tmp, &sc->sc_fpregs[31]) |
__get_user(tmp, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
return err;
}
int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int i;
unsigned int used_math;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
err |= __put_user(0, &sc->sc_regs[0]);
for (i = 1; i < 32; i++)
err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
#ifdef CONFIG_CPU_HAS_SMARTMIPS
err |= __put_user(regs->acx, &sc->sc_acx);
#endif
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __put_user(mfhi1(), &sc->sc_hi1);
err |= __put_user(mflo1(), &sc->sc_lo1);
err |= __put_user(mfhi2(), &sc->sc_hi2);
err |= __put_user(mflo2(), &sc->sc_lo2);
err |= __put_user(mfhi3(), &sc->sc_hi3);
err |= __put_user(mflo3(), &sc->sc_lo3);
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
}
used_math = !!used_math();
err |= __put_user(used_math, &sc->sc_used_math);
if (used_math) {
/*
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
err |= protected_save_fp_context(sc);
}
return err;
}
int fpcsr_pending(unsigned int __user *fpcsr)
{
int err, sig = 0;
unsigned int csr, enabled;
err = __get_user(csr, fpcsr);
enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
/*
* If the signal handler set some FPU exceptions, clear it and
* send SIGFPE.
*/
if (csr & enabled) {
csr &= ~enabled;
err |= __put_user(csr, fpcsr);
sig = SIGFPE;
}
return err ?: sig;
}
static int
check_and_restore_fp_context(struct sigcontext __user *sc)
{
int err, sig;
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
err |= protected_restore_fp_context(sc);
return err ?: sig;
}
int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
unsigned int used_math;
unsigned long treg;
int err = 0;
int i;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
#ifdef CONFIG_CPU_HAS_SMARTMIPS
err |= __get_user(regs->acx, &sc->sc_acx);
#endif
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
}
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
err |= __get_user(used_math, &sc->sc_used_math);
conditional_used_math(used_math);
if (used_math) {
/* restore fpu context if we have used it before */
if (!err)
err = check_and_restore_fp_context(sc);
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu(0);
}
return err;
}
void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
size_t frame_size)
{
unsigned long sp;
/* Default to using normal stack */
sp = regs->regs[29];
/*
* FPU emulator may have it's own trampoline active just
* above the user stack, 16-bytes before the next lowest
* 16 byte boundary. Try to avoid trashing it.
*/
sp -= 32;
/* This is the X/Open sanctioned signal stack switching. */
if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
}
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
#ifdef CONFIG_TRAD_SIGNALS
asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
sigset_t newset;
sigset_t __user *uset;
uset = (sigset_t __user *) regs.regs[4];
if (copy_from_user(&newset, uset, sizeof(sigset_t)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
current->saved_sigmask = current->blocked;
set_current_blocked(&newset);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
return -ERESTARTNOHAND;
}
#endif
asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
sigset_t newset;
sigset_t __user *unewset;
size_t sigsetsize;
/* XXX Don't preclude handling different sized sigset_t's. */
sigsetsize = regs.regs[5];
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
unewset = (sigset_t __user *) regs.regs[4];
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
current->saved_sigmask = current->blocked;
set_current_blocked(&newset);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
return -ERESTARTNOHAND;
}
#ifdef CONFIG_TRAD_SIGNALS
SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
struct sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
int err = 0;
if (act) {
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)))
return -EFAULT;
err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
err |= __get_user(mask, &act->sa_mask.sig[0]);
if (err)
return -EFAULT;
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
return -EFAULT;
err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
err |= __put_user(0, &oact->sa_mask.sig[1]);
err |= __put_user(0, &oact->sa_mask.sig[2]);
err |= __put_user(0, &oact->sa_mask.sig[3]);
if (err)
return -EFAULT;
}
return ret;
}
#endif
asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs)
{
const stack_t __user *uss = (const stack_t __user *) regs.regs[4];
stack_t __user *uoss = (stack_t __user *) regs.regs[5];
unsigned long usp = regs.regs[29];
return do_sigaltstack(uss, uoss, usp);
}
#ifdef CONFIG_TRAD_SIGNALS
asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct sigframe __user *frame;
sigset_t blocked;
int sig;
frame = (struct sigframe __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
goto badframe;
sigdelsetmask(&blocked, ~_BLOCKABLE);
set_current_blocked(&blocked);
sig = restore_sigcontext(®s, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
:/* no outputs */
:"r" (®s));
/* Unreached */
badframe:
force_sig(SIGSEGV, current);
}
#endif /* CONFIG_TRAD_SIGNALS */
asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe __user *frame;
sigset_t set;
int sig;
frame = (struct rt_sigframe __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
set_current_blocked(&set);
sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]);
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
:/* no outputs */
:"r" (®s));
/* Unreached */
badframe:
force_sig(SIGSEGV, current);
}
#ifdef CONFIG_TRAD_SIGNALS
static int setup_frame(void *sig_return, struct k_sigaction *ka,
struct pt_regs *regs, int signr, sigset_t *set)
{
struct sigframe __user *frame;
int err = 0;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
err |= setup_sigcontext(regs, &frame->sf_sc);
err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
if (err)
goto give_sigsegv;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to struct sigcontext
*
* $25 and c0_epc point to the signal handler, $29 points to the
* struct sigframe.
*/
regs->regs[ 4] = signr;
regs->regs[ 5] = 0;
regs->regs[ 6] = (unsigned long) &frame->sf_sc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
give_sigsegv:
force_sigsegv(signr, current);
return -EFAULT;
}
#endif
static int setup_rt_frame(void *sig_return, struct k_sigaction *ka,
struct pt_regs *regs, int signr, sigset_t *set,
siginfo_t *info)
{
struct rt_sigframe __user *frame;
int err = 0;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
/* Create siginfo. */
err |= copy_siginfo_to_user(&frame->rs_info, info);
/* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(NULL, &frame->rs_uc.uc_link);
err |= __put_user((void __user *)current->sas_ss_sp,
&frame->rs_uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->regs[29]),
&frame->rs_uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size,
&frame->rs_uc.uc_stack.ss_size);
err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
if (err)
goto give_sigsegv;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to ucontext
*
* $25 and c0_epc point to the signal handler, $29 points to
* the struct rt_sigframe.
*/
regs->regs[ 4] = signr;
regs->regs[ 5] = (unsigned long) &frame->rs_info;
regs->regs[ 6] = (unsigned long) &frame->rs_uc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
give_sigsegv:
force_sigsegv(signr, current);
return -EFAULT;
}
struct mips_abi mips_abi = {
#ifdef CONFIG_TRAD_SIGNALS
.setup_frame = setup_frame,
.signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
#endif
.setup_rt_frame = setup_rt_frame,
.rt_signal_return_offset =
offsetof(struct mips_vdso, rt_signal_trampoline),
.restart = __NR_restart_syscall
};
static int handle_signal(unsigned long sig, siginfo_t *info,
struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
{
int ret;
struct mips_abi *abi = current->thread.abi;
void *vdso = current->mm->context.vdso;
if (regs->regs[0]) {
switch(regs->regs[2]) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
regs->regs[2] = EINTR;
break;
case ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->regs[2] = EINTR;
break;
}
/* fallthrough */
case ERESTARTNOINTR:
regs->regs[7] = regs->regs[26];
regs->regs[2] = regs->regs[0];
regs->cp0_epc -= 4;
}
regs->regs[0] = 0; /* Don't deal with this again. */
}
if (sig_uses_siginfo(ka))
ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
ka, regs, sig, oldset, info);
else
ret = abi->setup_frame(vdso + abi->signal_return_offset,
ka, regs, sig, oldset);
if (ret)
return ret;
block_sigmask(ka, sig);
return ret;
}
static void do_signal(struct pt_regs *regs)
{
struct k_sigaction ka;
sigset_t *oldset;
siginfo_t info;
int signr;
/*
* We want the common case to go fast, which is why we may in certain
* cases get here from kernel mode. Just return without doing anything
* if so.
*/
if (!user_mode(regs))
return;
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = ¤t->saved_sigmask;
else
oldset = ¤t->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
/* Whee! Actually deliver the signal. */
if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
/*
* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TIF_RESTORE_SIGMASK flag.
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
return;
}
if (regs->regs[0]) {
if (regs->regs[2] == ERESTARTNOHAND ||
regs->regs[2] == ERESTARTSYS ||
regs->regs[2] == ERESTARTNOINTR) {
regs->regs[2] = regs->regs[0];
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
}
if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
regs->regs[2] = current->thread.abi->restart;
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
}
regs->regs[0] = 0; /* Don't deal with this again. */
}
/*
* If there's no signal to deliver, we just put the saved sigmask
* back
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
}
}
/*
* notification of userspace execution resumption
* - triggered by the TIF_WORK_MASK flags
*/
asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
__u32 thread_info_flags)
{
local_irq_enable();
/* deal with pending signal delivery */
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
if (current->replacement_session_keyring)
key_replace_session_keyring();
}
}
#ifdef CONFIG_SMP
static int smp_save_fp_context(struct sigcontext __user *sc)
{
return raw_cpu_has_fpu
? _save_fp_context(sc)
: fpu_emulator_save_context(sc);
}
static int smp_restore_fp_context(struct sigcontext __user *sc)
{
return raw_cpu_has_fpu
? _restore_fp_context(sc)
: fpu_emulator_restore_context(sc);
}
#endif
static int signal_setup(void)
{
#ifdef CONFIG_SMP
/* For now just do the cpu_has_fpu check when the functions are invoked */
save_fp_context = smp_save_fp_context;
restore_fp_context = smp_restore_fp_context;
#else
if (cpu_has_fpu) {
save_fp_context = _save_fp_context;
restore_fp_context = _restore_fp_context;
} else {
save_fp_context = fpu_emulator_save_context;
restore_fp_context = fpu_emulator_restore_context;
}
#endif
return 0;
}
arch_initcall(signal_setup);
| gpl-2.0 |
virtualopensystems/linux-kvm-arm | drivers/media/pci/mantis/mantis_ca.c | 4606 | 5803 | /*
Mantis PCI bridge driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include "dmxdev.h"
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "mantis_common.h"
#include "mantis_link.h"
#include "mantis_hif.h"
#include "mantis_reg.h"
#include "mantis_ca.h"
static int mantis_ca_read_attr_mem(struct dvb_ca_en50221 *en50221, int slot, int addr)
{
struct mantis_ca *ca = en50221->data;
struct mantis_pci *mantis = ca->ca_priv;
dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request Attribute Mem Read", slot);
if (slot != 0)
return -EINVAL;
return mantis_hif_read_mem(ca, addr);
}
static int mantis_ca_write_attr_mem(struct dvb_ca_en50221 *en50221, int slot, int addr, u8 data)
{
struct mantis_ca *ca = en50221->data;
struct mantis_pci *mantis = ca->ca_priv;
dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request Attribute Mem Write", slot);
if (slot != 0)
return -EINVAL;
return mantis_hif_write_mem(ca, addr, data);
}
static int mantis_ca_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
{
struct mantis_ca *ca = en50221->data;
struct mantis_pci *mantis = ca->ca_priv;
dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request CAM control Read", slot);
if (slot != 0)
return -EINVAL;
return mantis_hif_read_iom(ca, addr);
}
static int mantis_ca_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr, u8 data)
{
struct mantis_ca *ca = en50221->data;
struct mantis_pci *mantis = ca->ca_priv;
dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request CAM control Write", slot);
if (slot != 0)
return -EINVAL;
return mantis_hif_write_iom(ca, addr, data);
}
static int mantis_ca_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
{
struct mantis_ca *ca = en50221->data;
struct mantis_pci *mantis = ca->ca_priv;
dprintk(MANTIS_DEBUG, 1, "Slot(%d): Slot RESET", slot);
udelay(500); /* Wait.. */
mmwrite(0xda, MANTIS_PCMCIA_RESET); /* Leading edge assert */
udelay(500);
mmwrite(0x00, MANTIS_PCMCIA_RESET); /* Trailing edge deassert */
msleep(1000);
dvb_ca_en50221_camready_irq(&ca->en50221, 0);
return 0;
}
static int mantis_ca_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
{
struct mantis_ca *ca = en50221->data;
struct mantis_pci *mantis = ca->ca_priv;
dprintk(MANTIS_DEBUG, 1, "Slot(%d): Slot shutdown", slot);
return 0;
}
static int mantis_ts_control(struct dvb_ca_en50221 *en50221, int slot)
{
struct mantis_ca *ca = en50221->data;
struct mantis_pci *mantis = ca->ca_priv;
dprintk(MANTIS_DEBUG, 1, "Slot(%d): TS control", slot);
/* mantis_set_direction(mantis, 1); */ /* Enable TS through CAM */
return 0;
}
static int mantis_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open)
{
struct mantis_ca *ca = en50221->data;
struct mantis_pci *mantis = ca->ca_priv;
dprintk(MANTIS_DEBUG, 1, "Slot(%d): Poll Slot status", slot);
if (ca->slot_state == MODULE_INSERTED) {
dprintk(MANTIS_DEBUG, 1, "CA Module present and ready");
return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY;
} else {
dprintk(MANTIS_DEBUG, 1, "CA Module not present or not ready");
}
return 0;
}
int mantis_ca_init(struct mantis_pci *mantis)
{
struct dvb_adapter *dvb_adapter = &mantis->dvb_adapter;
struct mantis_ca *ca;
int ca_flags = 0, result;
dprintk(MANTIS_DEBUG, 1, "Initializing Mantis CA");
ca = kzalloc(sizeof(struct mantis_ca), GFP_KERNEL);
if (!ca) {
dprintk(MANTIS_ERROR, 1, "Out of memory!, exiting ..");
result = -ENOMEM;
goto err;
}
ca->ca_priv = mantis;
mantis->mantis_ca = ca;
ca_flags = DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE;
/* register CA interface */
ca->en50221.owner = THIS_MODULE;
ca->en50221.read_attribute_mem = mantis_ca_read_attr_mem;
ca->en50221.write_attribute_mem = mantis_ca_write_attr_mem;
ca->en50221.read_cam_control = mantis_ca_read_cam_ctl;
ca->en50221.write_cam_control = mantis_ca_write_cam_ctl;
ca->en50221.slot_reset = mantis_ca_slot_reset;
ca->en50221.slot_shutdown = mantis_ca_slot_shutdown;
ca->en50221.slot_ts_enable = mantis_ts_control;
ca->en50221.poll_slot_status = mantis_slot_status;
ca->en50221.data = ca;
mutex_init(&ca->ca_lock);
init_waitqueue_head(&ca->hif_data_wq);
init_waitqueue_head(&ca->hif_opdone_wq);
init_waitqueue_head(&ca->hif_write_wq);
dprintk(MANTIS_ERROR, 1, "Registering EN50221 device");
result = dvb_ca_en50221_init(dvb_adapter, &ca->en50221, ca_flags, 1);
if (result != 0) {
dprintk(MANTIS_ERROR, 1, "EN50221: Initialization failed <%d>", result);
goto err;
}
dprintk(MANTIS_ERROR, 1, "Registered EN50221 device");
mantis_evmgr_init(ca);
return 0;
err:
kfree(ca);
return result;
}
EXPORT_SYMBOL_GPL(mantis_ca_init);
void mantis_ca_exit(struct mantis_pci *mantis)
{
struct mantis_ca *ca = mantis->mantis_ca;
dprintk(MANTIS_DEBUG, 1, "Mantis CA exit");
if (!ca)
return;
mantis_evmgr_exit(ca);
dprintk(MANTIS_ERROR, 1, "Unregistering EN50221 device");
dvb_ca_en50221_release(&ca->en50221);
kfree(ca);
}
EXPORT_SYMBOL_GPL(mantis_ca_exit);
| gpl-2.0 |
sktjdgns1189/android_kernel_samsung_SHW-M290S | arch/arm/mach-ks8695/devices.c | 4606 | 5114 | /*
* arch/arm/mach-ks8695/devices.c
*
* Copyright (C) 2006 Andrew Victor
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <linux/platform_device.h>
#include <mach/irqs.h>
#include <mach/regs-wan.h>
#include <mach/regs-lan.h>
#include <mach/regs-hpna.h>
#include <mach/regs-switch.h>
#include <mach/regs-misc.h>
/* --------------------------------------------------------------------
* Ethernet
* -------------------------------------------------------------------- */
static u64 eth_dmamask = 0xffffffffUL;
static struct resource ks8695_wan_resources[] = {
[0] = {
.start = KS8695_WAN_PA,
.end = KS8695_WAN_PA + 0x00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "WAN RX",
.start = KS8695_IRQ_WAN_RX_STATUS,
.end = KS8695_IRQ_WAN_RX_STATUS,
.flags = IORESOURCE_IRQ,
},
[2] = {
.name = "WAN TX",
.start = KS8695_IRQ_WAN_TX_STATUS,
.end = KS8695_IRQ_WAN_TX_STATUS,
.flags = IORESOURCE_IRQ,
},
[3] = {
.name = "WAN Link",
.start = KS8695_IRQ_WAN_LINK,
.end = KS8695_IRQ_WAN_LINK,
.flags = IORESOURCE_IRQ,
},
[4] = {
.name = "WAN PHY",
.start = KS8695_MISC_PA,
.end = KS8695_MISC_PA + 0x1f,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device ks8695_wan_device = {
.name = "ks8695_ether",
.id = 0,
.dev = {
.dma_mask = ð_dmamask,
.coherent_dma_mask = 0xffffffff,
},
.resource = ks8695_wan_resources,
.num_resources = ARRAY_SIZE(ks8695_wan_resources),
};
static struct resource ks8695_lan_resources[] = {
[0] = {
.start = KS8695_LAN_PA,
.end = KS8695_LAN_PA + 0x00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "LAN RX",
.start = KS8695_IRQ_LAN_RX_STATUS,
.end = KS8695_IRQ_LAN_RX_STATUS,
.flags = IORESOURCE_IRQ,
},
[2] = {
.name = "LAN TX",
.start = KS8695_IRQ_LAN_TX_STATUS,
.end = KS8695_IRQ_LAN_TX_STATUS,
.flags = IORESOURCE_IRQ,
},
[3] = {
.name = "LAN SWITCH",
.start = KS8695_SWITCH_PA,
.end = KS8695_SWITCH_PA + 0x4f,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device ks8695_lan_device = {
.name = "ks8695_ether",
.id = 1,
.dev = {
.dma_mask = ð_dmamask,
.coherent_dma_mask = 0xffffffff,
},
.resource = ks8695_lan_resources,
.num_resources = ARRAY_SIZE(ks8695_lan_resources),
};
static struct resource ks8695_hpna_resources[] = {
[0] = {
.start = KS8695_HPNA_PA,
.end = KS8695_HPNA_PA + 0x00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "HPNA RX",
.start = KS8695_IRQ_HPNA_RX_STATUS,
.end = KS8695_IRQ_HPNA_RX_STATUS,
.flags = IORESOURCE_IRQ,
},
[2] = {
.name = "HPNA TX",
.start = KS8695_IRQ_HPNA_TX_STATUS,
.end = KS8695_IRQ_HPNA_TX_STATUS,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device ks8695_hpna_device = {
.name = "ks8695_ether",
.id = 2,
.dev = {
.dma_mask = ð_dmamask,
.coherent_dma_mask = 0xffffffff,
},
.resource = ks8695_hpna_resources,
.num_resources = ARRAY_SIZE(ks8695_hpna_resources),
};
void __init ks8695_add_device_wan(void)
{
platform_device_register(&ks8695_wan_device);
}
void __init ks8695_add_device_lan(void)
{
platform_device_register(&ks8695_lan_device);
}
void __init ks8696_add_device_hpna(void)
{
platform_device_register(&ks8695_hpna_device);
}
/* --------------------------------------------------------------------
* Watchdog
* -------------------------------------------------------------------- */
static struct platform_device ks8695_wdt_device = {
.name = "ks8695_wdt",
.id = -1,
.num_resources = 0,
};
static void __init ks8695_add_device_watchdog(void)
{
platform_device_register(&ks8695_wdt_device);
}
/* --------------------------------------------------------------------
* LEDs
* -------------------------------------------------------------------- */
#if defined(CONFIG_LEDS)
short ks8695_leds_cpu = -1;
short ks8695_leds_timer = -1;
void __init ks8695_init_leds(u8 cpu_led, u8 timer_led)
{
/* Enable GPIO to access the LEDs */
gpio_direction_output(cpu_led, 1);
gpio_direction_output(timer_led, 1);
ks8695_leds_cpu = cpu_led;
ks8695_leds_timer = timer_led;
}
#else
void __init ks8695_init_leds(u8 cpu_led, u8 timer_led) {}
#endif
/* -------------------------------------------------------------------- */
/*
* These devices are always present and don't need any board-specific
* setup.
*/
static int __init ks8695_add_standard_devices(void)
{
ks8695_add_device_watchdog();
return 0;
}
arch_initcall(ks8695_add_standard_devices);
| gpl-2.0 |
TeamOrion-Devices/kernel_sony_msm8x27 | drivers/video/i740fb.c | 4862 | 34356 | /*
* i740fb - framebuffer driver for Intel740
* Copyright (c) 2011 Ondrej Zary
*
* Based on old i740fb driver (c) 2001-2002 Andrey Ulanov <drey@rt.mipt.ru>
* which was partially based on:
* VGA 16-color framebuffer driver (c) 1999 Ben Pfaff <pfaffben@debian.org>
* and Petr Vandrovec <VANDROVE@vc.cvut.cz>
* i740 driver from XFree86 (c) 1998-1999 Precision Insight, Inc., Cedar Park,
* Texas.
* i740fb by Patrick LERDA, v0.9
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/console.h>
#include <video/vga.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#include "i740_reg.h"
static char *mode_option __devinitdata;
#ifdef CONFIG_MTRR
static int mtrr __devinitdata = 1;
#endif
struct i740fb_par {
unsigned char __iomem *regs;
bool has_sgram;
#ifdef CONFIG_MTRR
int mtrr_reg;
#endif
bool ddc_registered;
struct i2c_adapter ddc_adapter;
struct i2c_algo_bit_data ddc_algo;
u32 pseudo_palette[16];
struct mutex open_lock;
unsigned int ref_count;
u8 crtc[VGA_CRT_C];
u8 atc[VGA_ATT_C];
u8 gdc[VGA_GFX_C];
u8 seq[VGA_SEQ_C];
u8 misc;
u8 vss;
/* i740 specific registers */
u8 display_cntl;
u8 pixelpipe_cfg0;
u8 pixelpipe_cfg1;
u8 pixelpipe_cfg2;
u8 video_clk2_m;
u8 video_clk2_n;
u8 video_clk2_mn_msbs;
u8 video_clk2_div_sel;
u8 pll_cntl;
u8 address_mapping;
u8 io_cntl;
u8 bitblt_cntl;
u8 ext_vert_total;
u8 ext_vert_disp_end;
u8 ext_vert_sync_start;
u8 ext_vert_blank_start;
u8 ext_horiz_total;
u8 ext_horiz_blank;
u8 ext_offset;
u8 interlace_cntl;
u32 lmi_fifo_watermark;
u8 ext_start_addr;
u8 ext_start_addr_hi;
};
#define DACSPEED8 203
#define DACSPEED16 163
#define DACSPEED24_SG 136
#define DACSPEED24_SD 128
#define DACSPEED32 86
static struct fb_fix_screeninfo i740fb_fix __devinitdata = {
.id = "i740fb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.xpanstep = 8,
.ypanstep = 1,
.accel = FB_ACCEL_NONE,
};
static inline void i740outb(struct i740fb_par *par, u16 port, u8 val)
{
vga_mm_w(par->regs, port, val);
}
static inline u8 i740inb(struct i740fb_par *par, u16 port)
{
return vga_mm_r(par->regs, port);
}
static inline void i740outreg(struct i740fb_par *par, u16 port, u8 reg, u8 val)
{
vga_mm_w_fast(par->regs, port, reg, val);
}
static inline u8 i740inreg(struct i740fb_par *par, u16 port, u8 reg)
{
vga_mm_w(par->regs, port, reg);
return vga_mm_r(par->regs, port+1);
}
static inline void i740outreg_mask(struct i740fb_par *par, u16 port, u8 reg,
u8 val, u8 mask)
{
vga_mm_w_fast(par->regs, port, reg, (val & mask)
| (i740inreg(par, port, reg) & ~mask));
}
#define REG_DDC_DRIVE 0x62
#define REG_DDC_STATE 0x63
#define DDC_SCL (1 << 3)
#define DDC_SDA (1 << 2)
static void i740fb_ddc_setscl(void *data, int val)
{
struct i740fb_par *par = data;
i740outreg_mask(par, XRX, REG_DDC_DRIVE, DDC_SCL, DDC_SCL);
i740outreg_mask(par, XRX, REG_DDC_STATE, val ? DDC_SCL : 0, DDC_SCL);
}
static void i740fb_ddc_setsda(void *data, int val)
{
struct i740fb_par *par = data;
i740outreg_mask(par, XRX, REG_DDC_DRIVE, DDC_SDA, DDC_SDA);
i740outreg_mask(par, XRX, REG_DDC_STATE, val ? DDC_SDA : 0, DDC_SDA);
}
static int i740fb_ddc_getscl(void *data)
{
struct i740fb_par *par = data;
i740outreg_mask(par, XRX, REG_DDC_DRIVE, 0, DDC_SCL);
return !!(i740inreg(par, XRX, REG_DDC_STATE) & DDC_SCL);
}
static int i740fb_ddc_getsda(void *data)
{
struct i740fb_par *par = data;
i740outreg_mask(par, XRX, REG_DDC_DRIVE, 0, DDC_SDA);
return !!(i740inreg(par, XRX, REG_DDC_STATE) & DDC_SDA);
}
static int __devinit i740fb_setup_ddc_bus(struct fb_info *info)
{
struct i740fb_par *par = info->par;
strlcpy(par->ddc_adapter.name, info->fix.id,
sizeof(par->ddc_adapter.name));
par->ddc_adapter.owner = THIS_MODULE;
par->ddc_adapter.class = I2C_CLASS_DDC;
par->ddc_adapter.algo_data = &par->ddc_algo;
par->ddc_adapter.dev.parent = info->device;
par->ddc_algo.setsda = i740fb_ddc_setsda;
par->ddc_algo.setscl = i740fb_ddc_setscl;
par->ddc_algo.getsda = i740fb_ddc_getsda;
par->ddc_algo.getscl = i740fb_ddc_getscl;
par->ddc_algo.udelay = 10;
par->ddc_algo.timeout = 20;
par->ddc_algo.data = par;
i2c_set_adapdata(&par->ddc_adapter, par);
return i2c_bit_add_bus(&par->ddc_adapter);
}
static int i740fb_open(struct fb_info *info, int user)
{
struct i740fb_par *par = info->par;
mutex_lock(&(par->open_lock));
par->ref_count++;
mutex_unlock(&(par->open_lock));
return 0;
}
static int i740fb_release(struct fb_info *info, int user)
{
struct i740fb_par *par = info->par;
mutex_lock(&(par->open_lock));
if (par->ref_count == 0) {
printk(KERN_ERR "fb%d: release called with zero refcount\n",
info->node);
mutex_unlock(&(par->open_lock));
return -EINVAL;
}
par->ref_count--;
mutex_unlock(&(par->open_lock));
return 0;
}
static u32 i740_calc_fifo(struct i740fb_par *par, u32 freq, int bpp)
{
/*
* Would like to calculate these values automatically, but a generic
* algorithm does not seem possible. Note: These FIFO water mark
* values were tested on several cards and seem to eliminate the
* all of the snow and vertical banding, but fine adjustments will
* probably be required for other cards.
*/
u32 wm;
switch (bpp) {
case 8:
if (freq > 200)
wm = 0x18120000;
else if (freq > 175)
wm = 0x16110000;
else if (freq > 135)
wm = 0x120E0000;
else
wm = 0x100D0000;
break;
case 15:
case 16:
if (par->has_sgram) {
if (freq > 140)
wm = 0x2C1D0000;
else if (freq > 120)
wm = 0x2C180000;
else if (freq > 100)
wm = 0x24160000;
else if (freq > 90)
wm = 0x18120000;
else if (freq > 50)
wm = 0x16110000;
else if (freq > 32)
wm = 0x13100000;
else
wm = 0x120E0000;
} else {
if (freq > 160)
wm = 0x28200000;
else if (freq > 140)
wm = 0x2A1E0000;
else if (freq > 130)
wm = 0x2B1A0000;
else if (freq > 120)
wm = 0x2C180000;
else if (freq > 100)
wm = 0x24180000;
else if (freq > 90)
wm = 0x18120000;
else if (freq > 50)
wm = 0x16110000;
else if (freq > 32)
wm = 0x13100000;
else
wm = 0x120E0000;
}
break;
case 24:
if (par->has_sgram) {
if (freq > 130)
wm = 0x31200000;
else if (freq > 120)
wm = 0x2E200000;
else if (freq > 100)
wm = 0x2C1D0000;
else if (freq > 80)
wm = 0x25180000;
else if (freq > 64)
wm = 0x24160000;
else if (freq > 49)
wm = 0x18120000;
else if (freq > 32)
wm = 0x16110000;
else
wm = 0x13100000;
} else {
if (freq > 120)
wm = 0x311F0000;
else if (freq > 100)
wm = 0x2C1D0000;
else if (freq > 80)
wm = 0x25180000;
else if (freq > 64)
wm = 0x24160000;
else if (freq > 49)
wm = 0x18120000;
else if (freq > 32)
wm = 0x16110000;
else
wm = 0x13100000;
}
break;
case 32:
if (par->has_sgram) {
if (freq > 80)
wm = 0x2A200000;
else if (freq > 60)
wm = 0x281A0000;
else if (freq > 49)
wm = 0x25180000;
else if (freq > 32)
wm = 0x18120000;
else
wm = 0x16110000;
} else {
if (freq > 80)
wm = 0x29200000;
else if (freq > 60)
wm = 0x281A0000;
else if (freq > 49)
wm = 0x25180000;
else if (freq > 32)
wm = 0x18120000;
else
wm = 0x16110000;
}
break;
}
return wm;
}
/* clock calculation from i740fb by Patrick LERDA */
#define I740_RFREQ 1000000
#define TARGET_MAX_N 30
#define I740_FFIX (1 << 8)
#define I740_RFREQ_FIX (I740_RFREQ / I740_FFIX)
#define I740_REF_FREQ (6667 * I740_FFIX / 100) /* 66.67 MHz */
#define I740_MAX_VCO_FREQ (450 * I740_FFIX) /* 450 MHz */
static void i740_calc_vclk(u32 freq, struct i740fb_par *par)
{
const u32 err_max = freq / (200 * I740_RFREQ / I740_FFIX);
const u32 err_target = freq / (1000 * I740_RFREQ / I740_FFIX);
u32 err_best = 512 * I740_FFIX;
u32 f_err, f_vco;
int m_best = 0, n_best = 0, p_best = 0, d_best = 0;
int m, n;
p_best = min(15, ilog2(I740_MAX_VCO_FREQ / (freq / I740_RFREQ_FIX)));
d_best = 0;
f_vco = (freq * (1 << p_best)) / I740_RFREQ_FIX;
freq = freq / I740_RFREQ_FIX;
n = 2;
do {
n++;
m = ((f_vco * n) / I740_REF_FREQ + 2) / 4;
if (m < 3)
m = 3;
{
u32 f_out = (((m * I740_REF_FREQ * (4 << 2 * d_best))
/ n) + ((1 << p_best) / 2)) / (1 << p_best);
f_err = (freq - f_out);
if (abs(f_err) < err_max) {
m_best = m;
n_best = n;
err_best = f_err;
}
}
} while ((abs(f_err) >= err_target) &&
((n <= TARGET_MAX_N) || (abs(err_best) > err_max)));
if (abs(f_err) < err_target) {
m_best = m;
n_best = n;
}
par->video_clk2_m = (m_best - 2) & 0xFF;
par->video_clk2_n = (n_best - 2) & 0xFF;
par->video_clk2_mn_msbs = ((((n_best - 2) >> 4) & VCO_N_MSBS)
| (((m_best - 2) >> 8) & VCO_M_MSBS));
par->video_clk2_div_sel =
((p_best << 4) | (d_best ? 4 : 0) | REF_DIV_1);
}
static int i740fb_decode_var(const struct fb_var_screeninfo *var,
struct i740fb_par *par, struct fb_info *info)
{
/*
* Get the video params out of 'var'.
* If a value doesn't fit, round it up, if it's too big, return -EINVAL.
*/
u32 xres, right, hslen, left, xtotal;
u32 yres, lower, vslen, upper, ytotal;
u32 vxres, xoffset, vyres, yoffset;
u32 bpp, base, dacspeed24, mem;
u8 r7;
int i;
dev_dbg(info->device, "decode_var: xres: %i, yres: %i, xres_v: %i, xres_v: %i\n",
var->xres, var->yres, var->xres_virtual, var->xres_virtual);
dev_dbg(info->device, " xoff: %i, yoff: %i, bpp: %i, graysc: %i\n",
var->xoffset, var->yoffset, var->bits_per_pixel,
var->grayscale);
dev_dbg(info->device, " activate: %i, nonstd: %i, vmode: %i\n",
var->activate, var->nonstd, var->vmode);
dev_dbg(info->device, " pixclock: %i, hsynclen:%i, vsynclen:%i\n",
var->pixclock, var->hsync_len, var->vsync_len);
dev_dbg(info->device, " left: %i, right: %i, up:%i, lower:%i\n",
var->left_margin, var->right_margin, var->upper_margin,
var->lower_margin);
bpp = var->bits_per_pixel;
switch (bpp) {
case 1 ... 8:
bpp = 8;
if ((1000000 / var->pixclock) > DACSPEED8) {
dev_err(info->device, "requested pixclock %i MHz out of range (max. %i MHz at 8bpp)\n",
1000000 / var->pixclock, DACSPEED8);
return -EINVAL;
}
break;
case 9 ... 15:
bpp = 15;
case 16:
if ((1000000 / var->pixclock) > DACSPEED16) {
dev_err(info->device, "requested pixclock %i MHz out of range (max. %i MHz at 15/16bpp)\n",
1000000 / var->pixclock, DACSPEED16);
return -EINVAL;
}
break;
case 17 ... 24:
bpp = 24;
dacspeed24 = par->has_sgram ? DACSPEED24_SG : DACSPEED24_SD;
if ((1000000 / var->pixclock) > dacspeed24) {
dev_err(info->device, "requested pixclock %i MHz out of range (max. %i MHz at 24bpp)\n",
1000000 / var->pixclock, dacspeed24);
return -EINVAL;
}
break;
case 25 ... 32:
bpp = 32;
if ((1000000 / var->pixclock) > DACSPEED32) {
dev_err(info->device, "requested pixclock %i MHz out of range (max. %i MHz at 32bpp)\n",
1000000 / var->pixclock, DACSPEED32);
return -EINVAL;
}
break;
default:
return -EINVAL;
}
xres = ALIGN(var->xres, 8);
vxres = ALIGN(var->xres_virtual, 16);
if (vxres < xres)
vxres = xres;
xoffset = ALIGN(var->xoffset, 8);
if (xres + xoffset > vxres)
xoffset = vxres - xres;
left = ALIGN(var->left_margin, 8);
right = ALIGN(var->right_margin, 8);
hslen = ALIGN(var->hsync_len, 8);
yres = var->yres;
vyres = var->yres_virtual;
if (yres > vyres)
vyres = yres;
yoffset = var->yoffset;
if (yres + yoffset > vyres)
yoffset = vyres - yres;
lower = var->lower_margin;
vslen = var->vsync_len;
upper = var->upper_margin;
mem = vxres * vyres * ((bpp + 1) / 8);
if (mem > info->screen_size) {
dev_err(info->device, "not enough video memory (%d KB requested, %ld KB avaliable)\n",
mem >> 10, info->screen_size >> 10);
return -ENOMEM;
}
if (yoffset + yres > vyres)
yoffset = vyres - yres;
xtotal = xres + right + hslen + left;
ytotal = yres + lower + vslen + upper;
par->crtc[VGA_CRTC_H_TOTAL] = (xtotal >> 3) - 5;
par->crtc[VGA_CRTC_H_DISP] = (xres >> 3) - 1;
par->crtc[VGA_CRTC_H_BLANK_START] = ((xres + right) >> 3) - 1;
par->crtc[VGA_CRTC_H_SYNC_START] = (xres + right) >> 3;
par->crtc[VGA_CRTC_H_SYNC_END] = (((xres + right + hslen) >> 3) & 0x1F)
| ((((xres + right + hslen) >> 3) & 0x20) << 2);
par->crtc[VGA_CRTC_H_BLANK_END] = ((xres + right + hslen) >> 3 & 0x1F)
| 0x80;
par->crtc[VGA_CRTC_V_TOTAL] = ytotal - 2;
r7 = 0x10; /* disable linecompare */
if (ytotal & 0x100)
r7 |= 0x01;
if (ytotal & 0x200)
r7 |= 0x20;
par->crtc[VGA_CRTC_PRESET_ROW] = 0;
par->crtc[VGA_CRTC_MAX_SCAN] = 0x40; /* 1 scanline, no linecmp */
if (var->vmode & FB_VMODE_DOUBLE)
par->crtc[VGA_CRTC_MAX_SCAN] |= 0x80;
par->crtc[VGA_CRTC_CURSOR_START] = 0x00;
par->crtc[VGA_CRTC_CURSOR_END] = 0x00;
par->crtc[VGA_CRTC_CURSOR_HI] = 0x00;
par->crtc[VGA_CRTC_CURSOR_LO] = 0x00;
par->crtc[VGA_CRTC_V_DISP_END] = yres-1;
if ((yres-1) & 0x100)
r7 |= 0x02;
if ((yres-1) & 0x200)
r7 |= 0x40;
par->crtc[VGA_CRTC_V_BLANK_START] = yres + lower - 1;
par->crtc[VGA_CRTC_V_SYNC_START] = yres + lower - 1;
if ((yres + lower - 1) & 0x100)
r7 |= 0x0C;
if ((yres + lower - 1) & 0x200) {
par->crtc[VGA_CRTC_MAX_SCAN] |= 0x20;
r7 |= 0x80;
}
/* disabled IRQ */
par->crtc[VGA_CRTC_V_SYNC_END] =
((yres + lower - 1 + vslen) & 0x0F) & ~0x10;
/* 0x7F for VGA, but some SVGA chips require all 8 bits to be set */
par->crtc[VGA_CRTC_V_BLANK_END] = (yres + lower - 1 + vslen) & 0xFF;
par->crtc[VGA_CRTC_UNDERLINE] = 0x00;
par->crtc[VGA_CRTC_MODE] = 0xC3 ;
par->crtc[VGA_CRTC_LINE_COMPARE] = 0xFF;
par->crtc[VGA_CRTC_OVERFLOW] = r7;
par->vss = 0x00; /* 3DA */
for (i = 0x00; i < 0x10; i++)
par->atc[i] = i;
par->atc[VGA_ATC_MODE] = 0x81;
par->atc[VGA_ATC_OVERSCAN] = 0x00; /* 0 for EGA, 0xFF for VGA */
par->atc[VGA_ATC_PLANE_ENABLE] = 0x0F;
par->atc[VGA_ATC_COLOR_PAGE] = 0x00;
par->misc = 0xC3;
if (var->sync & FB_SYNC_HOR_HIGH_ACT)
par->misc &= ~0x40;
if (var->sync & FB_SYNC_VERT_HIGH_ACT)
par->misc &= ~0x80;
par->seq[VGA_SEQ_CLOCK_MODE] = 0x01;
par->seq[VGA_SEQ_PLANE_WRITE] = 0x0F;
par->seq[VGA_SEQ_CHARACTER_MAP] = 0x00;
par->seq[VGA_SEQ_MEMORY_MODE] = 0x06;
par->gdc[VGA_GFX_SR_VALUE] = 0x00;
par->gdc[VGA_GFX_SR_ENABLE] = 0x00;
par->gdc[VGA_GFX_COMPARE_VALUE] = 0x00;
par->gdc[VGA_GFX_DATA_ROTATE] = 0x00;
par->gdc[VGA_GFX_PLANE_READ] = 0;
par->gdc[VGA_GFX_MODE] = 0x02;
par->gdc[VGA_GFX_MISC] = 0x05;
par->gdc[VGA_GFX_COMPARE_MASK] = 0x0F;
par->gdc[VGA_GFX_BIT_MASK] = 0xFF;
base = (yoffset * vxres + (xoffset & ~7)) >> 2;
switch (bpp) {
case 8:
par->crtc[VGA_CRTC_OFFSET] = vxres >> 3;
par->ext_offset = vxres >> 11;
par->pixelpipe_cfg1 = DISPLAY_8BPP_MODE;
par->bitblt_cntl = COLEXP_8BPP;
break;
case 15: /* 0rrrrrgg gggbbbbb */
case 16: /* rrrrrggg gggbbbbb */
par->pixelpipe_cfg1 = (var->green.length == 6) ?
DISPLAY_16BPP_MODE : DISPLAY_15BPP_MODE;
par->crtc[VGA_CRTC_OFFSET] = vxres >> 2;
par->ext_offset = vxres >> 10;
par->bitblt_cntl = COLEXP_16BPP;
base *= 2;
break;
case 24:
par->crtc[VGA_CRTC_OFFSET] = (vxres * 3) >> 3;
par->ext_offset = (vxres * 3) >> 11;
par->pixelpipe_cfg1 = DISPLAY_24BPP_MODE;
par->bitblt_cntl = COLEXP_24BPP;
base &= 0xFFFFFFFE; /* ...ignore the last bit. */
base *= 3;
break;
case 32:
par->crtc[VGA_CRTC_OFFSET] = vxres >> 1;
par->ext_offset = vxres >> 9;
par->pixelpipe_cfg1 = DISPLAY_32BPP_MODE;
par->bitblt_cntl = COLEXP_RESERVED; /* Unimplemented on i740 */
base *= 4;
break;
}
par->crtc[VGA_CRTC_START_LO] = base & 0x000000FF;
par->crtc[VGA_CRTC_START_HI] = (base & 0x0000FF00) >> 8;
par->ext_start_addr =
((base & 0x003F0000) >> 16) | EXT_START_ADDR_ENABLE;
par->ext_start_addr_hi = (base & 0x3FC00000) >> 22;
par->pixelpipe_cfg0 = DAC_8_BIT;
par->pixelpipe_cfg2 = DISPLAY_GAMMA_ENABLE | OVERLAY_GAMMA_ENABLE;
par->io_cntl = EXTENDED_CRTC_CNTL;
par->address_mapping = LINEAR_MODE_ENABLE | PAGE_MAPPING_ENABLE;
par->display_cntl = HIRES_MODE;
/* Set the MCLK freq */
par->pll_cntl = PLL_MEMCLK_100000KHZ; /* 100 MHz -- use as default */
/* Calculate the extended CRTC regs */
par->ext_vert_total = (ytotal - 2) >> 8;
par->ext_vert_disp_end = (yres - 1) >> 8;
par->ext_vert_sync_start = (yres + lower) >> 8;
par->ext_vert_blank_start = (yres + lower) >> 8;
par->ext_horiz_total = ((xtotal >> 3) - 5) >> 8;
par->ext_horiz_blank = (((xres + right) >> 3) & 0x40) >> 6;
par->interlace_cntl = INTERLACE_DISABLE;
/* Set the overscan color to 0. (NOTE: This only affects >8bpp mode) */
par->atc[VGA_ATC_OVERSCAN] = 0;
/* Calculate VCLK that most closely matches the requested dot clock */
i740_calc_vclk((((u32)1e9) / var->pixclock) * (u32)(1e3), par);
/* Since we program the clocks ourselves, always use VCLK2. */
par->misc |= 0x0C;
/* Calculate the FIFO Watermark and Burst Length. */
par->lmi_fifo_watermark =
i740_calc_fifo(par, 1000000 / var->pixclock, bpp);
return 0;
}
static int i740fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
switch (var->bits_per_pixel) {
case 8:
var->red.offset = var->green.offset = var->blue.offset = 0;
var->red.length = var->green.length = var->blue.length = 8;
break;
case 16:
switch (var->green.length) {
default:
case 5:
var->red.offset = 10;
var->green.offset = 5;
var->blue.offset = 0;
var->red.length = 5;
var->green.length = 5;
var->blue.length = 5;
break;
case 6:
var->red.offset = 11;
var->green.offset = 5;
var->blue.offset = 0;
var->red.length = var->blue.length = 5;
break;
}
break;
case 24:
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
var->red.length = var->green.length = var->blue.length = 8;
break;
case 32:
var->transp.offset = 24;
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
var->transp.length = 8;
var->red.length = var->green.length = var->blue.length = 8;
break;
default:
return -EINVAL;
}
if (var->xres > var->xres_virtual)
var->xres_virtual = var->xres;
if (var->yres > var->yres_virtual)
var->yres_virtual = var->yres;
if (info->monspecs.hfmax && info->monspecs.vfmax &&
info->monspecs.dclkmax && fb_validate_mode(var, info) < 0)
return -EINVAL;
return 0;
}
static void vga_protect(struct i740fb_par *par)
{
/* disable the display */
i740outreg_mask(par, VGA_SEQ_I, VGA_SEQ_CLOCK_MODE, 0x20, 0x20);
i740inb(par, 0x3DA);
i740outb(par, VGA_ATT_W, 0x00); /* enable pallete access */
}
static void vga_unprotect(struct i740fb_par *par)
{
/* reenable display */
i740outreg_mask(par, VGA_SEQ_I, VGA_SEQ_CLOCK_MODE, 0, 0x20);
i740inb(par, 0x3DA);
i740outb(par, VGA_ATT_W, 0x20); /* disable pallete access */
}
static int i740fb_set_par(struct fb_info *info)
{
struct i740fb_par *par = info->par;
u32 itemp;
int i;
i = i740fb_decode_var(&info->var, par, info);
if (i)
return i;
memset(info->screen_base, 0, info->screen_size);
vga_protect(par);
i740outreg(par, XRX, DRAM_EXT_CNTL, DRAM_REFRESH_DISABLE);
mdelay(1);
i740outreg(par, XRX, VCLK2_VCO_M, par->video_clk2_m);
i740outreg(par, XRX, VCLK2_VCO_N, par->video_clk2_n);
i740outreg(par, XRX, VCLK2_VCO_MN_MSBS, par->video_clk2_mn_msbs);
i740outreg(par, XRX, VCLK2_VCO_DIV_SEL, par->video_clk2_div_sel);
i740outreg_mask(par, XRX, PIXPIPE_CONFIG_0,
par->pixelpipe_cfg0 & DAC_8_BIT, 0x80);
i740inb(par, 0x3DA);
i740outb(par, 0x3C0, 0x00);
/* update misc output register */
i740outb(par, VGA_MIS_W, par->misc | 0x01);
/* synchronous reset on */
i740outreg(par, VGA_SEQ_I, VGA_SEQ_RESET, 0x01);
/* write sequencer registers */
i740outreg(par, VGA_SEQ_I, VGA_SEQ_CLOCK_MODE,
par->seq[VGA_SEQ_CLOCK_MODE] | 0x20);
for (i = 2; i < VGA_SEQ_C; i++)
i740outreg(par, VGA_SEQ_I, i, par->seq[i]);
/* synchronous reset off */
i740outreg(par, VGA_SEQ_I, VGA_SEQ_RESET, 0x03);
/* deprotect CRT registers 0-7 */
i740outreg(par, VGA_CRT_IC, VGA_CRTC_V_SYNC_END,
par->crtc[VGA_CRTC_V_SYNC_END]);
/* write CRT registers */
for (i = 0; i < VGA_CRT_C; i++)
i740outreg(par, VGA_CRT_IC, i, par->crtc[i]);
/* write graphics controller registers */
for (i = 0; i < VGA_GFX_C; i++)
i740outreg(par, VGA_GFX_I, i, par->gdc[i]);
/* write attribute controller registers */
for (i = 0; i < VGA_ATT_C; i++) {
i740inb(par, VGA_IS1_RC); /* reset flip-flop */
i740outb(par, VGA_ATT_IW, i);
i740outb(par, VGA_ATT_IW, par->atc[i]);
}
i740inb(par, VGA_IS1_RC);
i740outb(par, VGA_ATT_IW, 0x20);
i740outreg(par, VGA_CRT_IC, EXT_VERT_TOTAL, par->ext_vert_total);
i740outreg(par, VGA_CRT_IC, EXT_VERT_DISPLAY, par->ext_vert_disp_end);
i740outreg(par, VGA_CRT_IC, EXT_VERT_SYNC_START,
par->ext_vert_sync_start);
i740outreg(par, VGA_CRT_IC, EXT_VERT_BLANK_START,
par->ext_vert_blank_start);
i740outreg(par, VGA_CRT_IC, EXT_HORIZ_TOTAL, par->ext_horiz_total);
i740outreg(par, VGA_CRT_IC, EXT_HORIZ_BLANK, par->ext_horiz_blank);
i740outreg(par, VGA_CRT_IC, EXT_OFFSET, par->ext_offset);
i740outreg(par, VGA_CRT_IC, EXT_START_ADDR_HI, par->ext_start_addr_hi);
i740outreg(par, VGA_CRT_IC, EXT_START_ADDR, par->ext_start_addr);
i740outreg_mask(par, VGA_CRT_IC, INTERLACE_CNTL,
par->interlace_cntl, INTERLACE_ENABLE);
i740outreg_mask(par, XRX, ADDRESS_MAPPING, par->address_mapping, 0x1F);
i740outreg_mask(par, XRX, BITBLT_CNTL, par->bitblt_cntl, COLEXP_MODE);
i740outreg_mask(par, XRX, DISPLAY_CNTL,
par->display_cntl, VGA_WRAP_MODE | GUI_MODE);
i740outreg_mask(par, XRX, PIXPIPE_CONFIG_0, par->pixelpipe_cfg0, 0x9B);
i740outreg_mask(par, XRX, PIXPIPE_CONFIG_2, par->pixelpipe_cfg2, 0x0C);
i740outreg(par, XRX, PLL_CNTL, par->pll_cntl);
i740outreg_mask(par, XRX, PIXPIPE_CONFIG_1,
par->pixelpipe_cfg1, DISPLAY_COLOR_MODE);
itemp = readl(par->regs + FWATER_BLC);
itemp &= ~(LMI_BURST_LENGTH | LMI_FIFO_WATERMARK);
itemp |= par->lmi_fifo_watermark;
writel(itemp, par->regs + FWATER_BLC);
i740outreg(par, XRX, DRAM_EXT_CNTL, DRAM_REFRESH_60HZ);
i740outreg_mask(par, MRX, COL_KEY_CNTL_1, 0, BLANK_DISP_OVERLAY);
i740outreg_mask(par, XRX, IO_CTNL,
par->io_cntl, EXTENDED_ATTR_CNTL | EXTENDED_CRTC_CNTL);
if (par->pixelpipe_cfg1 != DISPLAY_8BPP_MODE) {
i740outb(par, VGA_PEL_MSK, 0xFF);
i740outb(par, VGA_PEL_IW, 0x00);
for (i = 0; i < 256; i++) {
itemp = (par->pixelpipe_cfg0 & DAC_8_BIT) ? i : i >> 2;
i740outb(par, VGA_PEL_D, itemp);
i740outb(par, VGA_PEL_D, itemp);
i740outb(par, VGA_PEL_D, itemp);
}
}
/* Wait for screen to stabilize. */
mdelay(50);
vga_unprotect(par);
info->fix.line_length =
info->var.xres_virtual * info->var.bits_per_pixel / 8;
if (info->var.bits_per_pixel == 8)
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
else
info->fix.visual = FB_VISUAL_TRUECOLOR;
return 0;
}
static int i740fb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
u32 r, g, b;
dev_dbg(info->device, "setcolreg: regno: %i, red=%d, green=%d, blue=%d, transp=%d, bpp=%d\n",
regno, red, green, blue, transp, info->var.bits_per_pixel);
switch (info->fix.visual) {
case FB_VISUAL_PSEUDOCOLOR:
if (regno >= 256)
return -EINVAL;
i740outb(info->par, VGA_PEL_IW, regno);
i740outb(info->par, VGA_PEL_D, red >> 8);
i740outb(info->par, VGA_PEL_D, green >> 8);
i740outb(info->par, VGA_PEL_D, blue >> 8);
break;
case FB_VISUAL_TRUECOLOR:
if (regno >= 16)
return -EINVAL;
r = (red >> (16 - info->var.red.length))
<< info->var.red.offset;
b = (blue >> (16 - info->var.blue.length))
<< info->var.blue.offset;
g = (green >> (16 - info->var.green.length))
<< info->var.green.offset;
((u32 *) info->pseudo_palette)[regno] = r | g | b;
break;
default:
return -EINVAL;
}
return 0;
}
static int i740fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct i740fb_par *par = info->par;
u32 base = (var->yoffset * info->var.xres_virtual
+ (var->xoffset & ~7)) >> 2;
dev_dbg(info->device, "pan_display: xoffset: %i yoffset: %i base: %i\n",
var->xoffset, var->yoffset, base);
switch (info->var.bits_per_pixel) {
case 8:
break;
case 15:
case 16:
base *= 2;
break;
case 24:
/*
* The last bit does not seem to have any effect on the start
* address register in 24bpp mode, so...
*/
base &= 0xFFFFFFFE; /* ...ignore the last bit. */
base *= 3;
break;
case 32:
base *= 4;
break;
}
par->crtc[VGA_CRTC_START_LO] = base & 0x000000FF;
par->crtc[VGA_CRTC_START_HI] = (base & 0x0000FF00) >> 8;
par->ext_start_addr_hi = (base & 0x3FC00000) >> 22;
par->ext_start_addr =
((base & 0x003F0000) >> 16) | EXT_START_ADDR_ENABLE;
i740outreg(par, VGA_CRT_IC, VGA_CRTC_START_LO, base & 0x000000FF);
i740outreg(par, VGA_CRT_IC, VGA_CRTC_START_HI,
(base & 0x0000FF00) >> 8);
i740outreg(par, VGA_CRT_IC, EXT_START_ADDR_HI,
(base & 0x3FC00000) >> 22);
i740outreg(par, VGA_CRT_IC, EXT_START_ADDR,
((base & 0x003F0000) >> 16) | EXT_START_ADDR_ENABLE);
return 0;
}
static int i740fb_blank(int blank_mode, struct fb_info *info)
{
struct i740fb_par *par = info->par;
unsigned char SEQ01;
int DPMSSyncSelect;
switch (blank_mode) {
case FB_BLANK_UNBLANK:
case FB_BLANK_NORMAL:
SEQ01 = 0x00;
DPMSSyncSelect = HSYNC_ON | VSYNC_ON;
break;
case FB_BLANK_VSYNC_SUSPEND:
SEQ01 = 0x20;
DPMSSyncSelect = HSYNC_ON | VSYNC_OFF;
break;
case FB_BLANK_HSYNC_SUSPEND:
SEQ01 = 0x20;
DPMSSyncSelect = HSYNC_OFF | VSYNC_ON;
break;
case FB_BLANK_POWERDOWN:
SEQ01 = 0x20;
DPMSSyncSelect = HSYNC_OFF | VSYNC_OFF;
break;
default:
return -EINVAL;
}
/* Turn the screen on/off */
i740outb(par, SRX, 0x01);
SEQ01 |= i740inb(par, SRX + 1) & ~0x20;
i740outb(par, SRX, 0x01);
i740outb(par, SRX + 1, SEQ01);
/* Set the DPMS mode */
i740outreg(par, XRX, DPMS_SYNC_SELECT, DPMSSyncSelect);
/* Let fbcon do a soft blank for us */
return (blank_mode == FB_BLANK_NORMAL) ? 1 : 0;
}
static struct fb_ops i740fb_ops = {
.owner = THIS_MODULE,
.fb_open = i740fb_open,
.fb_release = i740fb_release,
.fb_check_var = i740fb_check_var,
.fb_set_par = i740fb_set_par,
.fb_setcolreg = i740fb_setcolreg,
.fb_blank = i740fb_blank,
.fb_pan_display = i740fb_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/* ------------------------------------------------------------------------- */
static int __devinit i740fb_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct fb_info *info;
struct i740fb_par *par;
int ret, tmp;
bool found = false;
u8 *edid;
info = framebuffer_alloc(sizeof(struct i740fb_par), &(dev->dev));
if (!info) {
dev_err(&(dev->dev), "cannot allocate framebuffer\n");
return -ENOMEM;
}
par = info->par;
mutex_init(&par->open_lock);
info->var.activate = FB_ACTIVATE_NOW;
info->var.bits_per_pixel = 8;
info->fbops = &i740fb_ops;
info->pseudo_palette = par->pseudo_palette;
ret = pci_enable_device(dev);
if (ret) {
dev_err(info->device, "cannot enable PCI device\n");
goto err_enable_device;
}
ret = pci_request_regions(dev, info->fix.id);
if (ret) {
dev_err(info->device, "error requesting regions\n");
goto err_request_regions;
}
info->screen_base = pci_ioremap_bar(dev, 0);
if (!info->screen_base) {
dev_err(info->device, "error remapping base\n");
ret = -ENOMEM;
goto err_ioremap_1;
}
par->regs = pci_ioremap_bar(dev, 1);
if (!par->regs) {
dev_err(info->device, "error remapping MMIO\n");
ret = -ENOMEM;
goto err_ioremap_2;
}
/* detect memory size */
if ((i740inreg(par, XRX, DRAM_ROW_TYPE) & DRAM_ROW_1)
== DRAM_ROW_1_SDRAM)
i740outb(par, XRX, DRAM_ROW_BNDRY_1);
else
i740outb(par, XRX, DRAM_ROW_BNDRY_0);
info->screen_size = i740inb(par, XRX + 1) * 1024 * 1024;
/* detect memory type */
tmp = i740inreg(par, XRX, DRAM_ROW_CNTL_LO);
par->has_sgram = !((tmp & DRAM_RAS_TIMING) ||
(tmp & DRAM_RAS_PRECHARGE));
printk(KERN_INFO "fb%d: Intel740 on %s, %ld KB %s\n", info->node,
pci_name(dev), info->screen_size >> 10,
par->has_sgram ? "SGRAM" : "SDRAM");
info->fix = i740fb_fix;
info->fix.mmio_start = pci_resource_start(dev, 1);
info->fix.mmio_len = pci_resource_len(dev, 1);
info->fix.smem_start = pci_resource_start(dev, 0);
info->fix.smem_len = info->screen_size;
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
if (i740fb_setup_ddc_bus(info) == 0) {
par->ddc_registered = true;
edid = fb_ddc_read(&par->ddc_adapter);
if (edid) {
fb_edid_to_monspecs(edid, &info->monspecs);
kfree(edid);
if (!info->monspecs.modedb)
dev_err(info->device,
"error getting mode database\n");
else {
const struct fb_videomode *m;
fb_videomode_to_modelist(
info->monspecs.modedb,
info->monspecs.modedb_len,
&info->modelist);
m = fb_find_best_display(&info->monspecs,
&info->modelist);
if (m) {
fb_videomode_to_var(&info->var, m);
/* fill all other info->var's fields */
if (!i740fb_check_var(&info->var, info))
found = true;
}
}
}
}
if (!mode_option && !found)
mode_option = "640x480-8@60";
if (mode_option) {
ret = fb_find_mode(&info->var, info, mode_option,
info->monspecs.modedb,
info->monspecs.modedb_len,
NULL, info->var.bits_per_pixel);
if (!ret || ret == 4) {
dev_err(info->device, "mode %s not found\n",
mode_option);
ret = -EINVAL;
}
}
fb_destroy_modedb(info->monspecs.modedb);
info->monspecs.modedb = NULL;
/* maximize virtual vertical size for fast scrolling */
info->var.yres_virtual = info->fix.smem_len * 8 /
(info->var.bits_per_pixel * info->var.xres_virtual);
if (ret == -EINVAL)
goto err_find_mode;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
dev_err(info->device, "cannot allocate colormap\n");
goto err_alloc_cmap;
}
ret = register_framebuffer(info);
if (ret) {
dev_err(info->device, "error registering framebuffer\n");
goto err_reg_framebuffer;
}
printk(KERN_INFO "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
pci_set_drvdata(dev, info);
#ifdef CONFIG_MTRR
if (mtrr) {
par->mtrr_reg = -1;
par->mtrr_reg = mtrr_add(info->fix.smem_start,
info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
}
#endif
return 0;
err_reg_framebuffer:
fb_dealloc_cmap(&info->cmap);
err_alloc_cmap:
err_find_mode:
if (par->ddc_registered)
i2c_del_adapter(&par->ddc_adapter);
pci_iounmap(dev, par->regs);
err_ioremap_2:
pci_iounmap(dev, info->screen_base);
err_ioremap_1:
pci_release_regions(dev);
err_request_regions:
/* pci_disable_device(dev); */
err_enable_device:
framebuffer_release(info);
return ret;
}
static void __devexit i740fb_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
if (info) {
struct i740fb_par *par = info->par;
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
mtrr_del(par->mtrr_reg, 0, 0);
par->mtrr_reg = -1;
}
#endif
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
if (par->ddc_registered)
i2c_del_adapter(&par->ddc_adapter);
pci_iounmap(dev, par->regs);
pci_iounmap(dev, info->screen_base);
pci_release_regions(dev);
/* pci_disable_device(dev); */
pci_set_drvdata(dev, NULL);
framebuffer_release(info);
}
}
#ifdef CONFIG_PM
static int i740fb_suspend(struct pci_dev *dev, pm_message_t state)
{
struct fb_info *info = pci_get_drvdata(dev);
struct i740fb_par *par = info->par;
/* don't disable console during hibernation and wakeup from it */
if (state.event == PM_EVENT_FREEZE || state.event == PM_EVENT_PRETHAW)
return 0;
console_lock();
mutex_lock(&(par->open_lock));
/* do nothing if framebuffer is not active */
if (par->ref_count == 0) {
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
fb_set_suspend(info, 1);
pci_save_state(dev);
pci_disable_device(dev);
pci_set_power_state(dev, pci_choose_state(dev, state));
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
static int i740fb_resume(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
struct i740fb_par *par = info->par;
console_lock();
mutex_lock(&(par->open_lock));
if (par->ref_count == 0)
goto fail;
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
if (pci_enable_device(dev))
goto fail;
i740fb_set_par(info);
fb_set_suspend(info, 0);
fail:
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
#else
#define i740fb_suspend NULL
#define i740fb_resume NULL
#endif /* CONFIG_PM */
#define I740_ID_PCI 0x00d1
#define I740_ID_AGP 0x7800
static DEFINE_PCI_DEVICE_TABLE(i740fb_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, I740_ID_PCI) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, I740_ID_AGP) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, i740fb_id_table);
static struct pci_driver i740fb_driver = {
.name = "i740fb",
.id_table = i740fb_id_table,
.probe = i740fb_probe,
.remove = __devexit_p(i740fb_remove),
.suspend = i740fb_suspend,
.resume = i740fb_resume,
};
#ifndef MODULE
static int __init i740fb_setup(char *options)
{
char *opt;
if (!options || !*options)
return 0;
while ((opt = strsep(&options, ",")) != NULL) {
if (!*opt)
continue;
#ifdef CONFIG_MTRR
else if (!strncmp(opt, "mtrr:", 5))
mtrr = simple_strtoul(opt + 5, NULL, 0);
#endif
else
mode_option = opt;
}
return 0;
}
#endif
int __init i740fb_init(void)
{
#ifndef MODULE
char *option = NULL;
if (fb_get_options("i740fb", &option))
return -ENODEV;
i740fb_setup(option);
#endif
return pci_register_driver(&i740fb_driver);
}
static void __exit i740fb_exit(void)
{
pci_unregister_driver(&i740fb_driver);
}
module_init(i740fb_init);
module_exit(i740fb_exit);
MODULE_AUTHOR("(c) 2011 Ondrej Zary <linux@rainbow-software.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("fbdev driver for Intel740");
module_param(mode_option, charp, 0444);
MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
#ifdef CONFIG_MTRR
module_param(mtrr, int, 0444);
MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
#endif
| gpl-2.0 |
purrify/android_kernel_lenovo_a6000 | arch/mips/kernel/irq_txx9.c | 4862 | 4861 | /*
* Based on linux/arch/mips/jmr3927/rbhma3100/irq.c,
* linux/arch/mips/tx4927/common/tx4927_irq.c,
* linux/arch/mips/tx4938/common/irq.c
*
* Copyright 2001, 2003-2005 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
* ahennessy@mvista.com
* source@mvista.com
* Copyright (C) 2000-2001 Toshiba Corporation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <asm/txx9irq.h>
struct txx9_irc_reg {
u32 cer;
u32 cr[2];
u32 unused0;
u32 ilr[8];
u32 unused1[4];
u32 imr;
u32 unused2[7];
u32 scr;
u32 unused3[7];
u32 ssr;
u32 unused4[7];
u32 csr;
};
/* IRCER : Int. Control Enable */
#define TXx9_IRCER_ICE 0x00000001
/* IRCR : Int. Control */
#define TXx9_IRCR_LOW 0x00000000
#define TXx9_IRCR_HIGH 0x00000001
#define TXx9_IRCR_DOWN 0x00000002
#define TXx9_IRCR_UP 0x00000003
#define TXx9_IRCR_EDGE(cr) ((cr) & 0x00000002)
/* IRSCR : Int. Status Control */
#define TXx9_IRSCR_EIClrE 0x00000100
#define TXx9_IRSCR_EIClr_MASK 0x0000000f
/* IRCSR : Int. Current Status */
#define TXx9_IRCSR_IF 0x00010000
#define TXx9_IRCSR_ILV_MASK 0x00000700
#define TXx9_IRCSR_IVL_MASK 0x0000001f
#define irc_dlevel 0
#define irc_elevel 1
static struct txx9_irc_reg __iomem *txx9_ircptr __read_mostly;
static struct {
unsigned char level;
unsigned char mode;
} txx9irq[TXx9_MAX_IR] __read_mostly;
static void txx9_irq_unmask(struct irq_data *d)
{
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2];
int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
__raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
| (txx9irq[irq_nr].level << ofs),
ilrp);
#ifdef CONFIG_CPU_TX39XX
/* update IRCSR */
__raw_writel(0, &txx9_ircptr->imr);
__raw_writel(irc_elevel, &txx9_ircptr->imr);
#endif
}
static inline void txx9_irq_mask(struct irq_data *d)
{
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2];
int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
__raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
| (irc_dlevel << ofs),
ilrp);
#ifdef CONFIG_CPU_TX39XX
/* update IRCSR */
__raw_writel(0, &txx9_ircptr->imr);
__raw_writel(irc_elevel, &txx9_ircptr->imr);
/* flush write buffer */
__raw_readl(&txx9_ircptr->ssr);
#else
mmiowb();
#endif
}
static void txx9_irq_mask_ack(struct irq_data *d)
{
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
txx9_irq_mask(d);
/* clear edge detection */
if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode)))
__raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr);
}
static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
u32 cr;
u32 __iomem *crp;
int ofs;
int mode;
if (flow_type & IRQF_TRIGGER_PROBE)
return 0;
switch (flow_type & IRQF_TRIGGER_MASK) {
case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break;
case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break;
case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break;
case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break;
default:
return -EINVAL;
}
crp = &txx9_ircptr->cr[(unsigned int)irq_nr / 8];
cr = __raw_readl(crp);
ofs = (irq_nr & (8 - 1)) * 2;
cr &= ~(0x3 << ofs);
cr |= (mode & 0x3) << ofs;
__raw_writel(cr, crp);
txx9irq[irq_nr].mode = mode;
return 0;
}
static struct irq_chip txx9_irq_chip = {
.name = "TXX9",
.irq_ack = txx9_irq_mask_ack,
.irq_mask = txx9_irq_mask,
.irq_mask_ack = txx9_irq_mask_ack,
.irq_unmask = txx9_irq_unmask,
.irq_set_type = txx9_irq_set_type,
};
void __init txx9_irq_init(unsigned long baseaddr)
{
int i;
txx9_ircptr = ioremap(baseaddr, sizeof(struct txx9_irc_reg));
for (i = 0; i < TXx9_MAX_IR; i++) {
txx9irq[i].level = 4; /* middle level */
txx9irq[i].mode = TXx9_IRCR_LOW;
irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip,
handle_level_irq);
}
/* mask all IRC interrupts */
__raw_writel(0, &txx9_ircptr->imr);
for (i = 0; i < 8; i++)
__raw_writel(0, &txx9_ircptr->ilr[i]);
/* setup IRC interrupt mode (Low Active) */
for (i = 0; i < 2; i++)
__raw_writel(0, &txx9_ircptr->cr[i]);
/* enable interrupt control */
__raw_writel(TXx9_IRCER_ICE, &txx9_ircptr->cer);
__raw_writel(irc_elevel, &txx9_ircptr->imr);
}
int __init txx9_irq_set_pri(int irc_irq, int new_pri)
{
int old_pri;
if ((unsigned int)irc_irq >= TXx9_MAX_IR)
return 0;
old_pri = txx9irq[irc_irq].level;
txx9irq[irc_irq].level = new_pri;
return old_pri;
}
int txx9_irq(void)
{
u32 csr = __raw_readl(&txx9_ircptr->csr);
if (likely(!(csr & TXx9_IRCSR_IF)))
return TXX9_IRQ_BASE + (csr & (TXx9_MAX_IR - 1));
return -1;
}
| gpl-2.0 |
ssvb/linux-sunxi | drivers/media/video/gspca/w996Xcf.c | 5118 | 18008 | /**
*
* GSPCA sub driver for W996[78]CF JPEG USB Dual Mode Camera Chip.
*
* Copyright (C) 2009 Hans de Goede <hdegoede@redhat.com>
*
* This module is adapted from the in kernel v4l1 w9968cf driver:
*
* Copyright (C) 2002-2004 by Luca Risolia <luca.risolia@studio.unibo.it>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/* Note this is not a stand alone driver, it gets included in ov519.c, this
is a bit of a hack, but it needs the driver code for a lot of different
ov sensors which is already present in ov519.c (the old v4l1 driver used
the ovchipcam framework). When we have the time we really should move
the sensor drivers to v4l2 sub drivers, and properly split of this
driver from ov519.c */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define W9968CF_I2C_BUS_DELAY 4 /* delay in us for I2C bit r/w operations */
#define Y_QUANTABLE (&sd->jpeg_hdr[JPEG_QT0_OFFSET])
#define UV_QUANTABLE (&sd->jpeg_hdr[JPEG_QT1_OFFSET])
static const struct v4l2_pix_format w9968cf_vga_mode[] = {
{160, 120, V4L2_PIX_FMT_UYVY, V4L2_FIELD_NONE,
.bytesperline = 160 * 2,
.sizeimage = 160 * 120 * 2,
.colorspace = V4L2_COLORSPACE_JPEG},
{176, 144, V4L2_PIX_FMT_UYVY, V4L2_FIELD_NONE,
.bytesperline = 176 * 2,
.sizeimage = 176 * 144 * 2,
.colorspace = V4L2_COLORSPACE_JPEG},
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320 * 2,
.sizeimage = 320 * 240 * 2,
.colorspace = V4L2_COLORSPACE_JPEG},
{352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 352 * 2,
.sizeimage = 352 * 288 * 2,
.colorspace = V4L2_COLORSPACE_JPEG},
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 640 * 2,
.sizeimage = 640 * 480 * 2,
.colorspace = V4L2_COLORSPACE_JPEG},
};
static void reg_w(struct sd *sd, u16 index, u16 value);
/*--------------------------------------------------------------------------
Write 64-bit data to the fast serial bus registers.
Return 0 on success, -1 otherwise.
--------------------------------------------------------------------------*/
static void w9968cf_write_fsb(struct sd *sd, u16* data)
{
struct usb_device *udev = sd->gspca_dev.dev;
u16 value;
int ret;
if (sd->gspca_dev.usb_err < 0)
return;
value = *data++;
memcpy(sd->gspca_dev.usb_buf, data, 6);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
value, 0x06, sd->gspca_dev.usb_buf, 6, 500);
if (ret < 0) {
pr_err("Write FSB registers failed (%d)\n", ret);
sd->gspca_dev.usb_err = ret;
}
}
/*--------------------------------------------------------------------------
Write data to the serial bus control register.
Return 0 on success, a negative number otherwise.
--------------------------------------------------------------------------*/
static void w9968cf_write_sb(struct sd *sd, u16 value)
{
int ret;
if (sd->gspca_dev.usb_err < 0)
return;
/* We don't use reg_w here, as that would cause all writes when
bitbanging i2c to be logged, making the logs impossible to read */
ret = usb_control_msg(sd->gspca_dev.dev,
usb_sndctrlpipe(sd->gspca_dev.dev, 0),
0,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, 0x01, NULL, 0, 500);
udelay(W9968CF_I2C_BUS_DELAY);
if (ret < 0) {
pr_err("Write SB reg [01] %04x failed\n", value);
sd->gspca_dev.usb_err = ret;
}
}
/*--------------------------------------------------------------------------
Read data from the serial bus control register.
Return 0 on success, a negative number otherwise.
--------------------------------------------------------------------------*/
static int w9968cf_read_sb(struct sd *sd)
{
int ret;
if (sd->gspca_dev.usb_err < 0)
return -1;
/* We don't use reg_r here, as the w9968cf is special and has 16
bit registers instead of 8 bit */
ret = usb_control_msg(sd->gspca_dev.dev,
usb_rcvctrlpipe(sd->gspca_dev.dev, 0),
1,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, 0x01, sd->gspca_dev.usb_buf, 2, 500);
if (ret >= 0) {
ret = sd->gspca_dev.usb_buf[0] |
(sd->gspca_dev.usb_buf[1] << 8);
} else {
pr_err("Read SB reg [01] failed\n");
sd->gspca_dev.usb_err = ret;
}
udelay(W9968CF_I2C_BUS_DELAY);
return ret;
}
/*--------------------------------------------------------------------------
Upload quantization tables for the JPEG compression.
This function is called by w9968cf_start_transfer().
Return 0 on success, a negative number otherwise.
--------------------------------------------------------------------------*/
static void w9968cf_upload_quantizationtables(struct sd *sd)
{
u16 a, b;
int i, j;
reg_w(sd, 0x39, 0x0010); /* JPEG clock enable */
for (i = 0, j = 0; i < 32; i++, j += 2) {
a = Y_QUANTABLE[j] | ((unsigned)(Y_QUANTABLE[j + 1]) << 8);
b = UV_QUANTABLE[j] | ((unsigned)(UV_QUANTABLE[j + 1]) << 8);
reg_w(sd, 0x40 + i, a);
reg_w(sd, 0x60 + i, b);
}
reg_w(sd, 0x39, 0x0012); /* JPEG encoder enable */
}
/****************************************************************************
* Low-level I2C I/O functions. *
* The adapter supports the following I2C transfer functions: *
* i2c_adap_fastwrite_byte_data() (at 400 kHz bit frequency only) *
* i2c_adap_read_byte_data() *
* i2c_adap_read_byte() *
****************************************************************************/
static void w9968cf_smbus_start(struct sd *sd)
{
w9968cf_write_sb(sd, 0x0011); /* SDE=1, SDA=0, SCL=1 */
w9968cf_write_sb(sd, 0x0010); /* SDE=1, SDA=0, SCL=0 */
}
static void w9968cf_smbus_stop(struct sd *sd)
{
w9968cf_write_sb(sd, 0x0010); /* SDE=1, SDA=0, SCL=0 */
w9968cf_write_sb(sd, 0x0011); /* SDE=1, SDA=0, SCL=1 */
w9968cf_write_sb(sd, 0x0013); /* SDE=1, SDA=1, SCL=1 */
}
static void w9968cf_smbus_write_byte(struct sd *sd, u8 v)
{
u8 bit;
int sda;
for (bit = 0 ; bit < 8 ; bit++) {
sda = (v & 0x80) ? 2 : 0;
v <<= 1;
/* SDE=1, SDA=sda, SCL=0 */
w9968cf_write_sb(sd, 0x10 | sda);
/* SDE=1, SDA=sda, SCL=1 */
w9968cf_write_sb(sd, 0x11 | sda);
/* SDE=1, SDA=sda, SCL=0 */
w9968cf_write_sb(sd, 0x10 | sda);
}
}
static void w9968cf_smbus_read_byte(struct sd *sd, u8 *v)
{
u8 bit;
/* No need to ensure SDA is high as we are always called after
read_ack which ends with SDA high */
*v = 0;
for (bit = 0 ; bit < 8 ; bit++) {
*v <<= 1;
/* SDE=1, SDA=1, SCL=1 */
w9968cf_write_sb(sd, 0x0013);
*v |= (w9968cf_read_sb(sd) & 0x0008) ? 1 : 0;
/* SDE=1, SDA=1, SCL=0 */
w9968cf_write_sb(sd, 0x0012);
}
}
static void w9968cf_smbus_write_nack(struct sd *sd)
{
/* No need to ensure SDA is high as we are always called after
read_byte which ends with SDA high */
w9968cf_write_sb(sd, 0x0013); /* SDE=1, SDA=1, SCL=1 */
w9968cf_write_sb(sd, 0x0012); /* SDE=1, SDA=1, SCL=0 */
}
static void w9968cf_smbus_read_ack(struct sd *sd)
{
int sda;
/* Ensure SDA is high before raising clock to avoid a spurious stop */
w9968cf_write_sb(sd, 0x0012); /* SDE=1, SDA=1, SCL=0 */
w9968cf_write_sb(sd, 0x0013); /* SDE=1, SDA=1, SCL=1 */
sda = w9968cf_read_sb(sd);
w9968cf_write_sb(sd, 0x0012); /* SDE=1, SDA=1, SCL=0 */
if (sda >= 0 && (sda & 0x08)) {
PDEBUG(D_USBI, "Did not receive i2c ACK");
sd->gspca_dev.usb_err = -EIO;
}
}
/* SMBus protocol: S Addr Wr [A] Subaddr [A] Value [A] P */
static void w9968cf_i2c_w(struct sd *sd, u8 reg, u8 value)
{
u16* data = (u16 *)sd->gspca_dev.usb_buf;
data[0] = 0x082f | ((sd->sensor_addr & 0x80) ? 0x1500 : 0x0);
data[0] |= (sd->sensor_addr & 0x40) ? 0x4000 : 0x0;
data[1] = 0x2082 | ((sd->sensor_addr & 0x40) ? 0x0005 : 0x0);
data[1] |= (sd->sensor_addr & 0x20) ? 0x0150 : 0x0;
data[1] |= (sd->sensor_addr & 0x10) ? 0x5400 : 0x0;
data[2] = 0x8208 | ((sd->sensor_addr & 0x08) ? 0x0015 : 0x0);
data[2] |= (sd->sensor_addr & 0x04) ? 0x0540 : 0x0;
data[2] |= (sd->sensor_addr & 0x02) ? 0x5000 : 0x0;
data[3] = 0x1d20 | ((sd->sensor_addr & 0x02) ? 0x0001 : 0x0);
data[3] |= (sd->sensor_addr & 0x01) ? 0x0054 : 0x0;
w9968cf_write_fsb(sd, data);
data[0] = 0x8208 | ((reg & 0x80) ? 0x0015 : 0x0);
data[0] |= (reg & 0x40) ? 0x0540 : 0x0;
data[0] |= (reg & 0x20) ? 0x5000 : 0x0;
data[1] = 0x0820 | ((reg & 0x20) ? 0x0001 : 0x0);
data[1] |= (reg & 0x10) ? 0x0054 : 0x0;
data[1] |= (reg & 0x08) ? 0x1500 : 0x0;
data[1] |= (reg & 0x04) ? 0x4000 : 0x0;
data[2] = 0x2082 | ((reg & 0x04) ? 0x0005 : 0x0);
data[2] |= (reg & 0x02) ? 0x0150 : 0x0;
data[2] |= (reg & 0x01) ? 0x5400 : 0x0;
data[3] = 0x001d;
w9968cf_write_fsb(sd, data);
data[0] = 0x8208 | ((value & 0x80) ? 0x0015 : 0x0);
data[0] |= (value & 0x40) ? 0x0540 : 0x0;
data[0] |= (value & 0x20) ? 0x5000 : 0x0;
data[1] = 0x0820 | ((value & 0x20) ? 0x0001 : 0x0);
data[1] |= (value & 0x10) ? 0x0054 : 0x0;
data[1] |= (value & 0x08) ? 0x1500 : 0x0;
data[1] |= (value & 0x04) ? 0x4000 : 0x0;
data[2] = 0x2082 | ((value & 0x04) ? 0x0005 : 0x0);
data[2] |= (value & 0x02) ? 0x0150 : 0x0;
data[2] |= (value & 0x01) ? 0x5400 : 0x0;
data[3] = 0xfe1d;
w9968cf_write_fsb(sd, data);
PDEBUG(D_USBO, "i2c 0x%02x -> [0x%02x]", value, reg);
}
/* SMBus protocol: S Addr Wr [A] Subaddr [A] P S Addr+1 Rd [A] [Value] NA P */
static int w9968cf_i2c_r(struct sd *sd, u8 reg)
{
int ret = 0;
u8 value;
/* Fast serial bus data control disable */
w9968cf_write_sb(sd, 0x0013); /* don't change ! */
w9968cf_smbus_start(sd);
w9968cf_smbus_write_byte(sd, sd->sensor_addr);
w9968cf_smbus_read_ack(sd);
w9968cf_smbus_write_byte(sd, reg);
w9968cf_smbus_read_ack(sd);
w9968cf_smbus_stop(sd);
w9968cf_smbus_start(sd);
w9968cf_smbus_write_byte(sd, sd->sensor_addr + 1);
w9968cf_smbus_read_ack(sd);
w9968cf_smbus_read_byte(sd, &value);
/* signal we don't want to read anymore, the v4l1 driver used to
send an ack here which is very wrong! (and then fixed
the issues this gave by retrying reads) */
w9968cf_smbus_write_nack(sd);
w9968cf_smbus_stop(sd);
/* Fast serial bus data control re-enable */
w9968cf_write_sb(sd, 0x0030);
if (sd->gspca_dev.usb_err >= 0) {
ret = value;
PDEBUG(D_USBI, "i2c [0x%02X] -> 0x%02X", reg, value);
} else
PDEBUG(D_ERR, "i2c read [0x%02x] failed", reg);
return ret;
}
/*--------------------------------------------------------------------------
Turn on the LED on some webcams. A beep should be heard too.
Return 0 on success, a negative number otherwise.
--------------------------------------------------------------------------*/
static void w9968cf_configure(struct sd *sd)
{
reg_w(sd, 0x00, 0xff00); /* power-down */
reg_w(sd, 0x00, 0xbf17); /* reset everything */
reg_w(sd, 0x00, 0xbf10); /* normal operation */
reg_w(sd, 0x01, 0x0010); /* serial bus, SDS high */
reg_w(sd, 0x01, 0x0000); /* serial bus, SDS low */
reg_w(sd, 0x01, 0x0010); /* ..high 'beep-beep' */
reg_w(sd, 0x01, 0x0030); /* Set sda scl to FSB mode */
sd->stopped = 1;
}
static void w9968cf_init(struct sd *sd)
{
unsigned long hw_bufsize = sd->sif ? (352 * 288 * 2) : (640 * 480 * 2),
y0 = 0x0000,
u0 = y0 + hw_bufsize / 2,
v0 = u0 + hw_bufsize / 4,
y1 = v0 + hw_bufsize / 4,
u1 = y1 + hw_bufsize / 2,
v1 = u1 + hw_bufsize / 4;
reg_w(sd, 0x00, 0xff00); /* power off */
reg_w(sd, 0x00, 0xbf10); /* power on */
reg_w(sd, 0x03, 0x405d); /* DRAM timings */
reg_w(sd, 0x04, 0x0030); /* SDRAM timings */
reg_w(sd, 0x20, y0 & 0xffff); /* Y buf.0, low */
reg_w(sd, 0x21, y0 >> 16); /* Y buf.0, high */
reg_w(sd, 0x24, u0 & 0xffff); /* U buf.0, low */
reg_w(sd, 0x25, u0 >> 16); /* U buf.0, high */
reg_w(sd, 0x28, v0 & 0xffff); /* V buf.0, low */
reg_w(sd, 0x29, v0 >> 16); /* V buf.0, high */
reg_w(sd, 0x22, y1 & 0xffff); /* Y buf.1, low */
reg_w(sd, 0x23, y1 >> 16); /* Y buf.1, high */
reg_w(sd, 0x26, u1 & 0xffff); /* U buf.1, low */
reg_w(sd, 0x27, u1 >> 16); /* U buf.1, high */
reg_w(sd, 0x2a, v1 & 0xffff); /* V buf.1, low */
reg_w(sd, 0x2b, v1 >> 16); /* V buf.1, high */
reg_w(sd, 0x32, y1 & 0xffff); /* JPEG buf 0 low */
reg_w(sd, 0x33, y1 >> 16); /* JPEG buf 0 high */
reg_w(sd, 0x34, y1 & 0xffff); /* JPEG buf 1 low */
reg_w(sd, 0x35, y1 >> 16); /* JPEG bug 1 high */
reg_w(sd, 0x36, 0x0000);/* JPEG restart interval */
reg_w(sd, 0x37, 0x0804);/*JPEG VLE FIFO threshold*/
reg_w(sd, 0x38, 0x0000);/* disable hw up-scaling */
reg_w(sd, 0x3f, 0x0000); /* JPEG/MCTL test data */
}
static void w9968cf_set_crop_window(struct sd *sd)
{
int start_cropx, start_cropy, x, y, fw, fh, cw, ch,
max_width, max_height;
if (sd->sif) {
max_width = 352;
max_height = 288;
} else {
max_width = 640;
max_height = 480;
}
if (sd->sensor == SEN_OV7620) {
/* Sigh, this is dependend on the clock / framerate changes
made by the frequency control, sick. */
if (sd->ctrls[FREQ].val == 1) {
start_cropx = 277;
start_cropy = 37;
} else {
start_cropx = 105;
start_cropy = 37;
}
} else {
start_cropx = 320;
start_cropy = 35;
}
/* Work around to avoid FP arithmetics */
#define SC(x) ((x) << 10)
/* Scaling factors */
fw = SC(sd->gspca_dev.width) / max_width;
fh = SC(sd->gspca_dev.height) / max_height;
cw = (fw >= fh) ? max_width : SC(sd->gspca_dev.width) / fh;
ch = (fw >= fh) ? SC(sd->gspca_dev.height) / fw : max_height;
sd->sensor_width = max_width;
sd->sensor_height = max_height;
x = (max_width - cw) / 2;
y = (max_height - ch) / 2;
reg_w(sd, 0x10, start_cropx + x);
reg_w(sd, 0x11, start_cropy + y);
reg_w(sd, 0x12, start_cropx + x + cw);
reg_w(sd, 0x13, start_cropy + y + ch);
}
static void w9968cf_mode_init_regs(struct sd *sd)
{
int val, vs_polarity, hs_polarity;
w9968cf_set_crop_window(sd);
reg_w(sd, 0x14, sd->gspca_dev.width);
reg_w(sd, 0x15, sd->gspca_dev.height);
/* JPEG width & height */
reg_w(sd, 0x30, sd->gspca_dev.width);
reg_w(sd, 0x31, sd->gspca_dev.height);
/* Y & UV frame buffer strides (in WORD) */
if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat ==
V4L2_PIX_FMT_JPEG) {
reg_w(sd, 0x2c, sd->gspca_dev.width / 2);
reg_w(sd, 0x2d, sd->gspca_dev.width / 4);
} else
reg_w(sd, 0x2c, sd->gspca_dev.width);
reg_w(sd, 0x00, 0xbf17); /* reset everything */
reg_w(sd, 0x00, 0xbf10); /* normal operation */
/* Transfer size in WORDS (for UYVY format only) */
val = sd->gspca_dev.width * sd->gspca_dev.height;
reg_w(sd, 0x3d, val & 0xffff); /* low bits */
reg_w(sd, 0x3e, val >> 16); /* high bits */
if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat ==
V4L2_PIX_FMT_JPEG) {
/* We may get called multiple times (usb isoc bw negotiat.) */
jpeg_define(sd->jpeg_hdr, sd->gspca_dev.height,
sd->gspca_dev.width, 0x22); /* JPEG 420 */
jpeg_set_qual(sd->jpeg_hdr, sd->quality);
w9968cf_upload_quantizationtables(sd);
}
/* Video Capture Control Register */
if (sd->sensor == SEN_OV7620) {
/* Seems to work around a bug in the image sensor */
vs_polarity = 1;
hs_polarity = 1;
} else {
vs_polarity = 1;
hs_polarity = 0;
}
val = (vs_polarity << 12) | (hs_polarity << 11);
/* NOTE: We may not have enough memory to do double buffering while
doing compression (amount of memory differs per model cam).
So we use the second image buffer also as jpeg stream buffer
(see w9968cf_init), and disable double buffering. */
if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat ==
V4L2_PIX_FMT_JPEG) {
/* val |= 0x0002; YUV422P */
val |= 0x0003; /* YUV420P */
} else
val |= 0x0080; /* Enable HW double buffering */
/* val |= 0x0020; enable clamping */
/* val |= 0x0008; enable (1-2-1) filter */
/* val |= 0x000c; enable (2-3-6-3-2) filter */
val |= 0x8000; /* capt. enable */
reg_w(sd, 0x16, val);
sd->gspca_dev.empty_packet = 0;
}
static void w9968cf_stop0(struct sd *sd)
{
reg_w(sd, 0x39, 0x0000); /* disable JPEG encoder */
reg_w(sd, 0x16, 0x0000); /* stop video capture */
}
/* The w9968cf docs say that a 0 sized packet means EOF (and also SOF
for the next frame). This seems to simply not be true when operating
in JPEG mode, in this case there may be empty packets within the
frame. So in JPEG mode use the JPEG SOI marker to detect SOF.
Note to make things even more interesting the w9968cf sends *PLANAR* jpeg,
to be precise it sends: SOI, SOF, DRI, SOS, Y-data, SOS, U-data, SOS,
V-data, EOI. */
static void w9968cf_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
struct sd *sd = (struct sd *) gspca_dev;
if (w9968cf_vga_mode[gspca_dev->curr_mode].pixelformat ==
V4L2_PIX_FMT_JPEG) {
if (len >= 2 &&
data[0] == 0xff &&
data[1] == 0xd8) {
gspca_frame_add(gspca_dev, LAST_PACKET,
NULL, 0);
gspca_frame_add(gspca_dev, FIRST_PACKET,
sd->jpeg_hdr, JPEG_HDR_SZ);
/* Strip the ff d8, our own header (which adds
huffman and quantization tables) already has this */
len -= 2;
data += 2;
}
} else {
/* In UYVY mode an empty packet signals EOF */
if (gspca_dev->empty_packet) {
gspca_frame_add(gspca_dev, LAST_PACKET,
NULL, 0);
gspca_frame_add(gspca_dev, FIRST_PACKET,
NULL, 0);
gspca_dev->empty_packet = 0;
}
}
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
| gpl-2.0 |
mythos234/AndromedaCANCRO-KK | fs/btrfs/extent_map.c | 5630 | 8888 | #include <linux/err.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include "ctree.h"
#include "extent_map.h"
static struct kmem_cache *extent_map_cache;
int __init extent_map_init(void)
{
extent_map_cache = kmem_cache_create("extent_map",
sizeof(struct extent_map), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!extent_map_cache)
return -ENOMEM;
return 0;
}
void extent_map_exit(void)
{
if (extent_map_cache)
kmem_cache_destroy(extent_map_cache);
}
/**
* extent_map_tree_init - initialize extent map tree
* @tree: tree to initialize
*
* Initialize the extent tree @tree. Should be called for each new inode
* or other user of the extent_map interface.
*/
void extent_map_tree_init(struct extent_map_tree *tree)
{
tree->map = RB_ROOT;
rwlock_init(&tree->lock);
}
/**
* alloc_extent_map - allocate new extent map structure
*
* Allocate a new extent_map structure. The new structure is
* returned with a reference count of one and needs to be
* freed using free_extent_map()
*/
struct extent_map *alloc_extent_map(void)
{
struct extent_map *em;
em = kmem_cache_alloc(extent_map_cache, GFP_NOFS);
if (!em)
return NULL;
em->in_tree = 0;
em->flags = 0;
em->compress_type = BTRFS_COMPRESS_NONE;
atomic_set(&em->refs, 1);
return em;
}
/**
* free_extent_map - drop reference count of an extent_map
* @em: extent map beeing releasead
*
* Drops the reference out on @em by one and free the structure
* if the reference count hits zero.
*/
void free_extent_map(struct extent_map *em)
{
if (!em)
return;
WARN_ON(atomic_read(&em->refs) == 0);
if (atomic_dec_and_test(&em->refs)) {
WARN_ON(em->in_tree);
kmem_cache_free(extent_map_cache, em);
}
}
static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
struct rb_node *node)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct extent_map *entry;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct extent_map, rb_node);
WARN_ON(!entry->in_tree);
if (offset < entry->start)
p = &(*p)->rb_left;
else if (offset >= extent_map_end(entry))
p = &(*p)->rb_right;
else
return parent;
}
entry = rb_entry(node, struct extent_map, rb_node);
entry->in_tree = 1;
rb_link_node(node, parent, p);
rb_insert_color(node, root);
return NULL;
}
/*
* search through the tree for an extent_map with a given offset. If
* it can't be found, try to find some neighboring extents
*/
static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
struct rb_node **prev_ret,
struct rb_node **next_ret)
{
struct rb_node *n = root->rb_node;
struct rb_node *prev = NULL;
struct rb_node *orig_prev = NULL;
struct extent_map *entry;
struct extent_map *prev_entry = NULL;
while (n) {
entry = rb_entry(n, struct extent_map, rb_node);
prev = n;
prev_entry = entry;
WARN_ON(!entry->in_tree);
if (offset < entry->start)
n = n->rb_left;
else if (offset >= extent_map_end(entry))
n = n->rb_right;
else
return n;
}
if (prev_ret) {
orig_prev = prev;
while (prev && offset >= extent_map_end(prev_entry)) {
prev = rb_next(prev);
prev_entry = rb_entry(prev, struct extent_map, rb_node);
}
*prev_ret = prev;
prev = orig_prev;
}
if (next_ret) {
prev_entry = rb_entry(prev, struct extent_map, rb_node);
while (prev && offset < prev_entry->start) {
prev = rb_prev(prev);
prev_entry = rb_entry(prev, struct extent_map, rb_node);
}
*next_ret = prev;
}
return NULL;
}
/* check to see if two extent_map structs are adjacent and safe to merge */
static int mergable_maps(struct extent_map *prev, struct extent_map *next)
{
if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
return 0;
/*
* don't merge compressed extents, we need to know their
* actual size
*/
if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
return 0;
if (extent_map_end(prev) == next->start &&
prev->flags == next->flags &&
prev->bdev == next->bdev &&
((next->block_start == EXTENT_MAP_HOLE &&
prev->block_start == EXTENT_MAP_HOLE) ||
(next->block_start == EXTENT_MAP_INLINE &&
prev->block_start == EXTENT_MAP_INLINE) ||
(next->block_start == EXTENT_MAP_DELALLOC &&
prev->block_start == EXTENT_MAP_DELALLOC) ||
(next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
next->block_start == extent_map_block_end(prev)))) {
return 1;
}
return 0;
}
static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
{
struct extent_map *merge = NULL;
struct rb_node *rb;
if (em->start != 0) {
rb = rb_prev(&em->rb_node);
if (rb)
merge = rb_entry(rb, struct extent_map, rb_node);
if (rb && mergable_maps(merge, em)) {
em->start = merge->start;
em->len += merge->len;
em->block_len += merge->block_len;
em->block_start = merge->block_start;
merge->in_tree = 0;
rb_erase(&merge->rb_node, &tree->map);
free_extent_map(merge);
}
}
rb = rb_next(&em->rb_node);
if (rb)
merge = rb_entry(rb, struct extent_map, rb_node);
if (rb && mergable_maps(em, merge)) {
em->len += merge->len;
em->block_len += merge->len;
rb_erase(&merge->rb_node, &tree->map);
merge->in_tree = 0;
free_extent_map(merge);
}
}
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
{
int ret = 0;
struct extent_map *em;
write_lock(&tree->lock);
em = lookup_extent_mapping(tree, start, len);
WARN_ON(!em || em->start != start);
if (!em)
goto out;
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
try_merge_map(tree, em);
free_extent_map(em);
out:
write_unlock(&tree->lock);
return ret;
}
/**
* add_extent_mapping - add new extent map to the extent tree
* @tree: tree to insert new map in
* @em: map to insert
*
* Insert @em into @tree or perform a simple forward/backward merge with
* existing mappings. The extent_map struct passed in will be inserted
* into the tree directly, with an additional reference taken, or a
* reference dropped if the merge attempt was successful.
*/
int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em)
{
int ret = 0;
struct rb_node *rb;
struct extent_map *exist;
exist = lookup_extent_mapping(tree, em->start, em->len);
if (exist) {
free_extent_map(exist);
ret = -EEXIST;
goto out;
}
rb = tree_insert(&tree->map, em->start, &em->rb_node);
if (rb) {
ret = -EEXIST;
goto out;
}
atomic_inc(&em->refs);
try_merge_map(tree, em);
out:
return ret;
}
/* simple helper to do math around the end of an extent, handling wrap */
static u64 range_end(u64 start, u64 len)
{
if (start + len < start)
return (u64)-1;
return start + len;
}
struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len, int strict)
{
struct extent_map *em;
struct rb_node *rb_node;
struct rb_node *prev = NULL;
struct rb_node *next = NULL;
u64 end = range_end(start, len);
rb_node = __tree_search(&tree->map, start, &prev, &next);
if (!rb_node) {
if (prev)
rb_node = prev;
else if (next)
rb_node = next;
else
return NULL;
}
em = rb_entry(rb_node, struct extent_map, rb_node);
if (strict && !(end > em->start && start < extent_map_end(em)))
return NULL;
atomic_inc(&em->refs);
return em;
}
/**
* lookup_extent_mapping - lookup extent_map
* @tree: tree to lookup in
* @start: byte offset to start the search
* @len: length of the lookup range
*
* Find and return the first extent_map struct in @tree that intersects the
* [start, len] range. There may be additional objects in the tree that
* intersect, so check the object returned carefully to make sure that no
* additional lookups are needed.
*/
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len)
{
return __lookup_extent_mapping(tree, start, len, 1);
}
/**
* search_extent_mapping - find a nearby extent map
* @tree: tree to lookup in
* @start: byte offset to start the search
* @len: length of the lookup range
*
* Find and return the first extent_map struct in @tree that intersects the
* [start, len] range.
*
* If one can't be found, any nearby extent may be returned
*/
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len)
{
return __lookup_extent_mapping(tree, start, len, 0);
}
/**
* remove_extent_mapping - removes an extent_map from the extent tree
* @tree: extent tree to remove from
* @em: extent map beeing removed
*
* Removes @em from @tree. No reference counts are dropped, and no checks
* are done to see if the range is in use
*/
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
{
int ret = 0;
WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
rb_erase(&em->rb_node, &tree->map);
em->in_tree = 0;
return ret;
}
| gpl-2.0 |
Sparkey67/android_kernel_lge_g3-1 | fs/signalfd.c | 6142 | 8174 | /*
* fs/signalfd.c
*
* Copyright (C) 2003 Linus Torvalds
*
* Mon Mar 5, 2007: Davide Libenzi <davidel@xmailserver.org>
* Changed ->read() to return a siginfo strcture instead of signal number.
* Fixed locking in ->poll().
* Added sighand-detach notification.
* Added fd re-use in sys_signalfd() syscall.
* Now using anonymous inode source.
* Thanks to Oleg Nesterov for useful code review and suggestions.
* More comments and suggestions from Arnd Bergmann.
* Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br>
* Retrieve multiple signals with one read() call
* Sun Jul 15, 2007: Davide Libenzi <davidel@xmailserver.org>
* Attach to the sighand only during read() and poll().
*/
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/list.h>
#include <linux/anon_inodes.h>
#include <linux/signalfd.h>
#include <linux/syscalls.h>
void signalfd_cleanup(struct sighand_struct *sighand)
{
wait_queue_head_t *wqh = &sighand->signalfd_wqh;
/*
* The lockless check can race with remove_wait_queue() in progress,
* but in this case its caller should run under rcu_read_lock() and
* sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return.
*/
if (likely(!waitqueue_active(wqh)))
return;
/* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */
wake_up_poll(wqh, POLLHUP | POLLFREE);
}
struct signalfd_ctx {
sigset_t sigmask;
};
static int signalfd_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static unsigned int signalfd_poll(struct file *file, poll_table *wait)
{
struct signalfd_ctx *ctx = file->private_data;
unsigned int events = 0;
poll_wait(file, ¤t->sighand->signalfd_wqh, wait);
spin_lock_irq(¤t->sighand->siglock);
if (next_signal(¤t->pending, &ctx->sigmask) ||
next_signal(¤t->signal->shared_pending,
&ctx->sigmask))
events |= POLLIN;
spin_unlock_irq(¤t->sighand->siglock);
return events;
}
/*
* Copied from copy_siginfo_to_user() in kernel/signal.c
*/
static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
siginfo_t const *kinfo)
{
long err;
BUILD_BUG_ON(sizeof(struct signalfd_siginfo) != 128);
/*
* Unused members should be zero ...
*/
err = __clear_user(uinfo, sizeof(*uinfo));
/*
* If you change siginfo_t structure, please be sure
* this code is fixed accordingly.
*/
err |= __put_user(kinfo->si_signo, &uinfo->ssi_signo);
err |= __put_user(kinfo->si_errno, &uinfo->ssi_errno);
err |= __put_user((short) kinfo->si_code, &uinfo->ssi_code);
switch (kinfo->si_code & __SI_MASK) {
case __SI_KILL:
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
break;
case __SI_TIMER:
err |= __put_user(kinfo->si_tid, &uinfo->ssi_tid);
err |= __put_user(kinfo->si_overrun, &uinfo->ssi_overrun);
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
case __SI_POLL:
err |= __put_user(kinfo->si_band, &uinfo->ssi_band);
err |= __put_user(kinfo->si_fd, &uinfo->ssi_fd);
break;
case __SI_FAULT:
err |= __put_user((long) kinfo->si_addr, &uinfo->ssi_addr);
#ifdef __ARCH_SI_TRAPNO
err |= __put_user(kinfo->si_trapno, &uinfo->ssi_trapno);
#endif
#ifdef BUS_MCEERR_AO
/*
* Other callers might not initialize the si_lsb field,
* so check explicitly for the right codes here.
*/
if (kinfo->si_code == BUS_MCEERR_AR ||
kinfo->si_code == BUS_MCEERR_AO)
err |= __put_user((short) kinfo->si_addr_lsb,
&uinfo->ssi_addr_lsb);
#endif
break;
case __SI_CHLD:
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
err |= __put_user(kinfo->si_status, &uinfo->ssi_status);
err |= __put_user(kinfo->si_utime, &uinfo->ssi_utime);
err |= __put_user(kinfo->si_stime, &uinfo->ssi_stime);
break;
case __SI_RT: /* This is not generated by the kernel as of now. */
case __SI_MESGQ: /* But this is */
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
default:
/*
* This case catches also the signals queued by sigqueue().
*/
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
}
return err ? -EFAULT: sizeof(*uinfo);
}
static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info,
int nonblock)
{
ssize_t ret;
DECLARE_WAITQUEUE(wait, current);
spin_lock_irq(¤t->sighand->siglock);
ret = dequeue_signal(current, &ctx->sigmask, info);
switch (ret) {
case 0:
if (!nonblock)
break;
ret = -EAGAIN;
default:
spin_unlock_irq(¤t->sighand->siglock);
return ret;
}
add_wait_queue(¤t->sighand->signalfd_wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
ret = dequeue_signal(current, &ctx->sigmask, info);
if (ret != 0)
break;
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
spin_unlock_irq(¤t->sighand->siglock);
schedule();
spin_lock_irq(¤t->sighand->siglock);
}
spin_unlock_irq(¤t->sighand->siglock);
remove_wait_queue(¤t->sighand->signalfd_wqh, &wait);
__set_current_state(TASK_RUNNING);
return ret;
}
/*
* Returns a multiple of the size of a "struct signalfd_siginfo", or a negative
* error code. The "count" parameter must be at least the size of a
* "struct signalfd_siginfo".
*/
static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
struct signalfd_ctx *ctx = file->private_data;
struct signalfd_siginfo __user *siginfo;
int nonblock = file->f_flags & O_NONBLOCK;
ssize_t ret, total = 0;
siginfo_t info;
count /= sizeof(struct signalfd_siginfo);
if (!count)
return -EINVAL;
siginfo = (struct signalfd_siginfo __user *) buf;
do {
ret = signalfd_dequeue(ctx, &info, nonblock);
if (unlikely(ret <= 0))
break;
ret = signalfd_copyinfo(siginfo, &info);
if (ret < 0)
break;
siginfo++;
total += ret;
nonblock = 1;
} while (--count);
return total ? total: ret;
}
static const struct file_operations signalfd_fops = {
.release = signalfd_release,
.poll = signalfd_poll,
.read = signalfd_read,
.llseek = noop_llseek,
};
SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
size_t, sizemask, int, flags)
{
sigset_t sigmask;
struct signalfd_ctx *ctx;
/* Check the SFD_* constants for consistency. */
BUILD_BUG_ON(SFD_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(SFD_NONBLOCK != O_NONBLOCK);
if (flags & ~(SFD_CLOEXEC | SFD_NONBLOCK))
return -EINVAL;
if (sizemask != sizeof(sigset_t) ||
copy_from_user(&sigmask, user_mask, sizeof(sigmask)))
return -EINVAL;
sigdelsetmask(&sigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
signotset(&sigmask);
if (ufd == -1) {
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->sigmask = sigmask;
/*
* When we call this, the initialization must be complete, since
* anon_inode_getfd() will install the fd.
*/
ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx,
O_RDWR | (flags & (O_CLOEXEC | O_NONBLOCK)));
if (ufd < 0)
kfree(ctx);
} else {
struct file *file = fget(ufd);
if (!file)
return -EBADF;
ctx = file->private_data;
if (file->f_op != &signalfd_fops) {
fput(file);
return -EINVAL;
}
spin_lock_irq(¤t->sighand->siglock);
ctx->sigmask = sigmask;
spin_unlock_irq(¤t->sighand->siglock);
wake_up(¤t->sighand->signalfd_wqh);
fput(file);
}
return ufd;
}
SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
size_t, sizemask)
{
return sys_signalfd4(ufd, user_mask, sizemask, 0);
}
| gpl-2.0 |
jfdsmabalot/kernel_samsung_msm8974pro | drivers/hid/hid-monterey.c | 7422 | 2139 | /*
* HID driver for some monterey "special" devices
*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
* Copyright (c) 2006-2007 Jiri Kosina
* Copyright (c) 2007 Paul Walmsley
* Copyright (c) 2008 Jiri Slaby
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
#include "hid-ids.h"
static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
rdesc[30] = 0x0c;
}
return rdesc;
}
#define mr_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
EV_KEY, (c))
static int mr_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
return 0;
switch (usage->hid & HID_USAGE) {
case 0x156: mr_map_key_clear(KEY_WORDPROCESSOR); break;
case 0x157: mr_map_key_clear(KEY_SPREADSHEET); break;
case 0x158: mr_map_key_clear(KEY_PRESENTATION); break;
case 0x15c: mr_map_key_clear(KEY_STOP); break;
default:
return 0;
}
return 1;
}
static const struct hid_device_id mr_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ }
};
MODULE_DEVICE_TABLE(hid, mr_devices);
static struct hid_driver mr_driver = {
.name = "monterey",
.id_table = mr_devices,
.report_fixup = mr_report_fixup,
.input_mapping = mr_input_mapping,
};
static int __init mr_init(void)
{
return hid_register_driver(&mr_driver);
}
static void __exit mr_exit(void)
{
hid_unregister_driver(&mr_driver);
}
module_init(mr_init);
module_exit(mr_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
JustAkan/jolla-kernel_bullhead | arch/ia64/kernel/cyclone.c | 8958 | 3017 | #include <linux/module.h>
#include <linux/smp.h>
#include <linux/time.h>
#include <linux/errno.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
#include <asm/io.h>
/* IBM Summit (EXA) Cyclone counter code*/
#define CYCLONE_CBAR_ADDR 0xFEB00CD0
#define CYCLONE_PMCC_OFFSET 0x51A0
#define CYCLONE_MPMC_OFFSET 0x51D0
#define CYCLONE_MPCS_OFFSET 0x51A8
#define CYCLONE_TIMER_FREQ 100000000
int use_cyclone;
void __init cyclone_setup(void)
{
use_cyclone = 1;
}
static void __iomem *cyclone_mc;
static cycle_t read_cyclone(struct clocksource *cs)
{
return (cycle_t)readq((void __iomem *)cyclone_mc);
}
static struct clocksource clocksource_cyclone = {
.name = "cyclone",
.rating = 300,
.read = read_cyclone,
.mask = (1LL << 40) - 1,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
int __init init_cyclone_clock(void)
{
u64 __iomem *reg;
u64 base; /* saved cyclone base address */
u64 offset; /* offset from pageaddr to cyclone_timer register */
int i;
u32 __iomem *cyclone_timer; /* Cyclone MPMC0 register */
if (!use_cyclone)
return 0;
printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n");
/* find base address */
offset = (CYCLONE_CBAR_ADDR);
reg = ioremap_nocache(offset, sizeof(u64));
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
" register.\n");
use_cyclone = 0;
return -ENODEV;
}
base = readq(reg);
iounmap(reg);
if(!base){
printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
" value.\n");
use_cyclone = 0;
return -ENODEV;
}
/* setup PMCC */
offset = (base + CYCLONE_PMCC_OFFSET);
reg = ioremap_nocache(offset, sizeof(u64));
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid PMCC"
" register.\n");
use_cyclone = 0;
return -ENODEV;
}
writel(0x00000001,reg);
iounmap(reg);
/* setup MPCS */
offset = (base + CYCLONE_MPCS_OFFSET);
reg = ioremap_nocache(offset, sizeof(u64));
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid MPCS"
" register.\n");
use_cyclone = 0;
return -ENODEV;
}
writel(0x00000001,reg);
iounmap(reg);
/* map in cyclone_timer */
offset = (base + CYCLONE_MPMC_OFFSET);
cyclone_timer = ioremap_nocache(offset, sizeof(u32));
if(!cyclone_timer){
printk(KERN_ERR "Summit chipset: Could not find valid MPMC"
" register.\n");
use_cyclone = 0;
return -ENODEV;
}
/*quick test to make sure its ticking*/
for(i=0; i<3; i++){
u32 old = readl(cyclone_timer);
int stall = 100;
while(stall--) barrier();
if(readl(cyclone_timer) == old){
printk(KERN_ERR "Summit chipset: Counter not counting!"
" DISABLED\n");
iounmap(cyclone_timer);
cyclone_timer = NULL;
use_cyclone = 0;
return -ENODEV;
}
}
/* initialize last tick */
cyclone_mc = cyclone_timer;
clocksource_cyclone.archdata.fsys_mmio = cyclone_timer;
clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ);
return 0;
}
__initcall(init_cyclone_clock);
| gpl-2.0 |
AmperificSuperKANG/lge_kernel_loki | drivers/net/wireless/b43/tables_phy_ht.c | 10238 | 32970 | /*
Broadcom B43 wireless driver
IEEE 802.11n HT-PHY data tables
Copyright (c) 2011 Rafał Miłecki <zajec5@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include "b43.h"
#include "tables_phy_ht.h"
#include "phy_common.h"
#include "phy_ht.h"
static const u16 b43_httab_0x12[] = {
0x0000, 0x0008, 0x000a, 0x0010, 0x0012, 0x0019,
0x001a, 0x001c, 0x0080, 0x0088, 0x008a, 0x0090,
0x0092, 0x0099, 0x009a, 0x009c, 0x0100, 0x0108,
0x010a, 0x0110, 0x0112, 0x0119, 0x011a, 0x011c,
0x0180, 0x0188, 0x018a, 0x0190, 0x0192, 0x0199,
0x019a, 0x019c, 0x0000, 0x0098, 0x00a0, 0x00a8,
0x009a, 0x00a2, 0x00aa, 0x0120, 0x0128, 0x0128,
0x0130, 0x0138, 0x0138, 0x0140, 0x0122, 0x012a,
0x012a, 0x0132, 0x013a, 0x013a, 0x0142, 0x01a8,
0x01b0, 0x01b8, 0x01b0, 0x01b8, 0x01c0, 0x01c8,
0x01c0, 0x01c8, 0x01d0, 0x01d0, 0x01d8, 0x01aa,
0x01b2, 0x01ba, 0x01b2, 0x01ba, 0x01c2, 0x01ca,
0x01c2, 0x01ca, 0x01d2, 0x01d2, 0x01da, 0x0001,
0x0002, 0x0004, 0x0009, 0x000c, 0x0011, 0x0014,
0x0018, 0x0020, 0x0021, 0x0022, 0x0024, 0x0081,
0x0082, 0x0084, 0x0089, 0x008c, 0x0091, 0x0094,
0x0098, 0x00a0, 0x00a1, 0x00a2, 0x00a4, 0x0007,
0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
0x0007, 0x0007,
};
static const u16 b43_httab_0x27[] = {
0x0009, 0x000e, 0x0011, 0x0014, 0x0017, 0x001a,
0x001d, 0x0020, 0x0009, 0x000e, 0x0011, 0x0014,
0x0017, 0x001a, 0x001d, 0x0020, 0x0009, 0x000e,
0x0011, 0x0014, 0x0017, 0x001a, 0x001d, 0x0020,
0x0009, 0x000e, 0x0011, 0x0014, 0x0017, 0x001a,
0x001d, 0x0020,
};
static const u16 b43_httab_0x26[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000,
};
static const u32 b43_httab_0x25[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
static const u32 b43_httab_0x2f[] = {
0x00035700, 0x0002cc9a, 0x00026666, 0x0001581f,
0x0001581f, 0x0001581f, 0x0001581f, 0x0001581f,
0x0001581f, 0x0001581f, 0x0001581f, 0x00035700,
0x0002cc9a, 0x00026666, 0x0001581f, 0x0001581f,
0x0001581f, 0x0001581f, 0x0001581f, 0x0001581f,
0x0001581f, 0x0001581f,
};
static const u16 b43_httab_0x1a[] = {
0x0055, 0x0054, 0x0054, 0x0053, 0x0052, 0x0052,
0x0051, 0x0051, 0x0050, 0x004f, 0x004f, 0x004e,
0x004e, 0x004d, 0x004c, 0x004c, 0x004b, 0x004a,
0x0049, 0x0049, 0x0048, 0x0047, 0x0046, 0x0046,
0x0045, 0x0044, 0x0043, 0x0042, 0x0041, 0x0040,
0x0040, 0x003f, 0x003e, 0x003d, 0x003c, 0x003a,
0x0039, 0x0038, 0x0037, 0x0036, 0x0035, 0x0033,
0x0032, 0x0031, 0x002f, 0x002e, 0x002c, 0x002b,
0x0029, 0x0027, 0x0025, 0x0023, 0x0021, 0x001f,
0x001d, 0x001a, 0x0018, 0x0015, 0x0012, 0x000e,
0x000b, 0x0007, 0x0002, 0x00fd,
};
static const u16 b43_httab_0x1b[] = {
0x0055, 0x0054, 0x0054, 0x0053, 0x0052, 0x0052,
0x0051, 0x0051, 0x0050, 0x004f, 0x004f, 0x004e,
0x004e, 0x004d, 0x004c, 0x004c, 0x004b, 0x004a,
0x0049, 0x0049, 0x0048, 0x0047, 0x0046, 0x0046,
0x0045, 0x0044, 0x0043, 0x0042, 0x0041, 0x0040,
0x0040, 0x003f, 0x003e, 0x003d, 0x003c, 0x003a,
0x0039, 0x0038, 0x0037, 0x0036, 0x0035, 0x0033,
0x0032, 0x0031, 0x002f, 0x002e, 0x002c, 0x002b,
0x0029, 0x0027, 0x0025, 0x0023, 0x0021, 0x001f,
0x001d, 0x001a, 0x0018, 0x0015, 0x0012, 0x000e,
0x000b, 0x0007, 0x0002, 0x00fd,
};
static const u16 b43_httab_0x1c[] = {
0x0055, 0x0054, 0x0054, 0x0053, 0x0052, 0x0052,
0x0051, 0x0051, 0x0050, 0x004f, 0x004f, 0x004e,
0x004e, 0x004d, 0x004c, 0x004c, 0x004b, 0x004a,
0x0049, 0x0049, 0x0048, 0x0047, 0x0046, 0x0046,
0x0045, 0x0044, 0x0043, 0x0042, 0x0041, 0x0040,
0x0040, 0x003f, 0x003e, 0x003d, 0x003c, 0x003a,
0x0039, 0x0038, 0x0037, 0x0036, 0x0035, 0x0033,
0x0032, 0x0031, 0x002f, 0x002e, 0x002c, 0x002b,
0x0029, 0x0027, 0x0025, 0x0023, 0x0021, 0x001f,
0x001d, 0x001a, 0x0018, 0x0015, 0x0012, 0x000e,
0x000b, 0x0007, 0x0002, 0x00fd,
};
static const u32 b43_httab_0x1a_0xc0[] = {
0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
};
static const u32 b43_httab_0x1a_0x140[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
static const u32 b43_httab_0x1b_0x140[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
static const u32 b43_httab_0x1c_0x140[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
static const u16 b43_httab_0x1a_0x1c0[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000,
};
static const u16 b43_httab_0x1b_0x1c0[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000,
};
static const u16 b43_httab_0x1c_0x1c0[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000,
};
static const u16 b43_httab_0x1a_0x240[] = {
0x0036, 0x0036, 0x0036, 0x0036, 0x0036, 0x0036,
0x0036, 0x0036, 0x0036, 0x0036, 0x0036, 0x0036,
0x0036, 0x002a, 0x002a, 0x002a, 0x002a, 0x002a,
0x002a, 0x002a, 0x002a, 0x002a, 0x002a, 0x002a,
0x001e, 0x001e, 0x001e, 0x001e, 0x001e, 0x001e,
0x001e, 0x001e, 0x001e, 0x001e, 0x001e, 0x001e,
0x001e, 0x001e, 0x001e, 0x001e, 0x000e, 0x000e,
0x000e, 0x000e, 0x000e, 0x000e, 0x000e, 0x000e,
0x000e, 0x000e, 0x000e, 0x000e, 0x000e, 0x000e,
0x000e, 0x000e, 0x000e, 0x000e, 0x01fc, 0x01fc,
0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc,
0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc,
0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6,
};
static const u16 b43_httab_0x1b_0x240[] = {
0x0036, 0x0036, 0x0036, 0x0036, 0x0036, 0x0036,
0x0036, 0x0036, 0x0036, 0x0036, 0x0036, 0x0036,
0x0036, 0x002a, 0x002a, 0x002a, 0x002a, 0x002a,
0x002a, 0x002a, 0x002a, 0x002a, 0x002a, 0x002a,
0x001e, 0x001e, 0x001e, 0x001e, 0x001e, 0x001e,
0x001e, 0x001e, 0x001e, 0x001e, 0x001e, 0x001e,
0x001e, 0x001e, 0x001e, 0x001e, 0x000e, 0x000e,
0x000e, 0x000e, 0x000e, 0x000e, 0x000e, 0x000e,
0x000e, 0x000e, 0x000e, 0x000e, 0x000e, 0x000e,
0x000e, 0x000e, 0x000e, 0x000e, 0x01fc, 0x01fc,
0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc,
0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc,
0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6,
};
static const u16 b43_httab_0x1c_0x240[] = {
0x0036, 0x0036, 0x0036, 0x0036, 0x0036, 0x0036,
0x0036, 0x0036, 0x0036, 0x0036, 0x0036, 0x0036,
0x0036, 0x002a, 0x002a, 0x002a, 0x002a, 0x002a,
0x002a, 0x002a, 0x002a, 0x002a, 0x002a, 0x002a,
0x001e, 0x001e, 0x001e, 0x001e, 0x001e, 0x001e,
0x001e, 0x001e, 0x001e, 0x001e, 0x001e, 0x001e,
0x001e, 0x001e, 0x001e, 0x001e, 0x000e, 0x000e,
0x000e, 0x000e, 0x000e, 0x000e, 0x000e, 0x000e,
0x000e, 0x000e, 0x000e, 0x000e, 0x000e, 0x000e,
0x000e, 0x000e, 0x000e, 0x000e, 0x01fc, 0x01fc,
0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc,
0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc,
0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
0x01d6, 0x01d6,
};
static const u32 b43_httab_0x1f[] = {
0x00000000, 0x00000000, 0x00016023, 0x00006028,
0x00034036, 0x0003402e, 0x0007203c, 0x0006e037,
0x00070030, 0x0009401f, 0x0009a00f, 0x000b600d,
0x000c8007, 0x000ce007, 0x00101fff, 0x00121ff9,
0x0012e004, 0x0014dffc, 0x0016dff6, 0x0018dfe9,
0x001b3fe5, 0x001c5fd0, 0x001ddfc2, 0x001f1fb6,
0x00207fa4, 0x00219f8f, 0x0022ff7d, 0x00247f6c,
0x0024df5b, 0x00267f4b, 0x0027df3b, 0x0029bf3b,
0x002b5f2f, 0x002d3f2e, 0x002f5f2a, 0x002fff15,
0x00315f0b, 0x0032defa, 0x0033beeb, 0x0034fed9,
0x00353ec5, 0x00361eb0, 0x00363e9b, 0x0036be87,
0x0036be70, 0x0038fe67, 0x0044beb2, 0x00513ef3,
0x00595f11, 0x00669f3d, 0x0078dfdf, 0x00a143aa,
0x01642fff, 0x0162afff, 0x01620fff, 0x0160cfff,
0x015f0fff, 0x015dafff, 0x015bcfff, 0x015bcfff,
0x015b4fff, 0x015acfff, 0x01590fff, 0x0156cfff,
};
static const u32 b43_httab_0x21[] = {
0x00000000, 0x00000000, 0x00016023, 0x00006028,
0x00034036, 0x0003402e, 0x0007203c, 0x0006e037,
0x00070030, 0x0009401f, 0x0009a00f, 0x000b600d,
0x000c8007, 0x000ce007, 0x00101fff, 0x00121ff9,
0x0012e004, 0x0014dffc, 0x0016dff6, 0x0018dfe9,
0x001b3fe5, 0x001c5fd0, 0x001ddfc2, 0x001f1fb6,
0x00207fa4, 0x00219f8f, 0x0022ff7d, 0x00247f6c,
0x0024df5b, 0x00267f4b, 0x0027df3b, 0x0029bf3b,
0x002b5f2f, 0x002d3f2e, 0x002f5f2a, 0x002fff15,
0x00315f0b, 0x0032defa, 0x0033beeb, 0x0034fed9,
0x00353ec5, 0x00361eb0, 0x00363e9b, 0x0036be87,
0x0036be70, 0x0038fe67, 0x0044beb2, 0x00513ef3,
0x00595f11, 0x00669f3d, 0x0078dfdf, 0x00a143aa,
0x01642fff, 0x0162afff, 0x01620fff, 0x0160cfff,
0x015f0fff, 0x015dafff, 0x015bcfff, 0x015bcfff,
0x015b4fff, 0x015acfff, 0x01590fff, 0x0156cfff,
};
static const u32 b43_httab_0x23[] = {
0x00000000, 0x00000000, 0x00016023, 0x00006028,
0x00034036, 0x0003402e, 0x0007203c, 0x0006e037,
0x00070030, 0x0009401f, 0x0009a00f, 0x000b600d,
0x000c8007, 0x000ce007, 0x00101fff, 0x00121ff9,
0x0012e004, 0x0014dffc, 0x0016dff6, 0x0018dfe9,
0x001b3fe5, 0x001c5fd0, 0x001ddfc2, 0x001f1fb6,
0x00207fa4, 0x00219f8f, 0x0022ff7d, 0x00247f6c,
0x0024df5b, 0x00267f4b, 0x0027df3b, 0x0029bf3b,
0x002b5f2f, 0x002d3f2e, 0x002f5f2a, 0x002fff15,
0x00315f0b, 0x0032defa, 0x0033beeb, 0x0034fed9,
0x00353ec5, 0x00361eb0, 0x00363e9b, 0x0036be87,
0x0036be70, 0x0038fe67, 0x0044beb2, 0x00513ef3,
0x00595f11, 0x00669f3d, 0x0078dfdf, 0x00a143aa,
0x01642fff, 0x0162afff, 0x01620fff, 0x0160cfff,
0x015f0fff, 0x015dafff, 0x015bcfff, 0x015bcfff,
0x015b4fff, 0x015acfff, 0x01590fff, 0x0156cfff,
};
static const u32 b43_httab_0x20[] = {
0x0b5e002d, 0x0ae2002f, 0x0a3b0032, 0x09a70035,
0x09220038, 0x08ab003b, 0x081f003f, 0x07a20043,
0x07340047, 0x06d2004b, 0x067a004f, 0x06170054,
0x05bf0059, 0x0571005e, 0x051e0064, 0x04d3006a,
0x04910070, 0x044c0077, 0x040f007e, 0x03d90085,
0x03a1008d, 0x036f0095, 0x033d009e, 0x030b00a8,
0x02e000b2, 0x02b900bc, 0x029200c7, 0x026d00d3,
0x024900e0, 0x022900ed, 0x020a00fb, 0x01ec010a,
0x01d20119, 0x01b7012a, 0x019e013c, 0x0188014e,
0x01720162, 0x015d0177, 0x0149018e, 0x013701a5,
0x012601be, 0x011501d8, 0x010601f4, 0x00f70212,
0x00e90231, 0x00dc0253, 0x00d00276, 0x00c4029b,
0x00b902c3, 0x00af02ed, 0x00a50319, 0x009c0348,
0x0093037a, 0x008b03af, 0x008303e6, 0x007c0422,
0x00750460, 0x006e04a3, 0x006804e9, 0x00620533,
0x005d0582, 0x005805d6, 0x0053062e, 0x004e068c,
};
static const u32 b43_httab_0x22[] = {
0x0b5e002d, 0x0ae2002f, 0x0a3b0032, 0x09a70035,
0x09220038, 0x08ab003b, 0x081f003f, 0x07a20043,
0x07340047, 0x06d2004b, 0x067a004f, 0x06170054,
0x05bf0059, 0x0571005e, 0x051e0064, 0x04d3006a,
0x04910070, 0x044c0077, 0x040f007e, 0x03d90085,
0x03a1008d, 0x036f0095, 0x033d009e, 0x030b00a8,
0x02e000b2, 0x02b900bc, 0x029200c7, 0x026d00d3,
0x024900e0, 0x022900ed, 0x020a00fb, 0x01ec010a,
0x01d20119, 0x01b7012a, 0x019e013c, 0x0188014e,
0x01720162, 0x015d0177, 0x0149018e, 0x013701a5,
0x012601be, 0x011501d8, 0x010601f4, 0x00f70212,
0x00e90231, 0x00dc0253, 0x00d00276, 0x00c4029b,
0x00b902c3, 0x00af02ed, 0x00a50319, 0x009c0348,
0x0093037a, 0x008b03af, 0x008303e6, 0x007c0422,
0x00750460, 0x006e04a3, 0x006804e9, 0x00620533,
0x005d0582, 0x005805d6, 0x0053062e, 0x004e068c,
};
static const u32 b43_httab_0x24[] = {
0x0b5e002d, 0x0ae2002f, 0x0a3b0032, 0x09a70035,
0x09220038, 0x08ab003b, 0x081f003f, 0x07a20043,
0x07340047, 0x06d2004b, 0x067a004f, 0x06170054,
0x05bf0059, 0x0571005e, 0x051e0064, 0x04d3006a,
0x04910070, 0x044c0077, 0x040f007e, 0x03d90085,
0x03a1008d, 0x036f0095, 0x033d009e, 0x030b00a8,
0x02e000b2, 0x02b900bc, 0x029200c7, 0x026d00d3,
0x024900e0, 0x022900ed, 0x020a00fb, 0x01ec010a,
0x01d20119, 0x01b7012a, 0x019e013c, 0x0188014e,
0x01720162, 0x015d0177, 0x0149018e, 0x013701a5,
0x012601be, 0x011501d8, 0x010601f4, 0x00f70212,
0x00e90231, 0x00dc0253, 0x00d00276, 0x00c4029b,
0x00b902c3, 0x00af02ed, 0x00a50319, 0x009c0348,
0x0093037a, 0x008b03af, 0x008303e6, 0x007c0422,
0x00750460, 0x006e04a3, 0x006804e9, 0x00620533,
0x005d0582, 0x005805d6, 0x0053062e, 0x004e068c,
};
/* Some late-init table */
const u32 b43_httab_0x1a_0xc0_late[] = {
0x10f90040, 0x10e10040, 0x10e1003c, 0x10c9003d,
0x10b9003c, 0x10a9003d, 0x10a1003c, 0x1099003b,
0x1091003b, 0x1089003a, 0x1081003a, 0x10790039,
0x10710039, 0x1069003a, 0x1061003b, 0x1059003d,
0x1051003f, 0x10490042, 0x1049003e, 0x1049003b,
0x1041003e, 0x1041003b, 0x1039003e, 0x1039003b,
0x10390038, 0x10390035, 0x1031003a, 0x10310036,
0x10310033, 0x1029003a, 0x10290037, 0x10290034,
0x10290031, 0x10210039, 0x10210036, 0x10210033,
0x10210030, 0x1019003c, 0x10190039, 0x10190036,
0x10190033, 0x10190030, 0x1019002d, 0x1019002b,
0x10190028, 0x1011003a, 0x10110036, 0x10110033,
0x10110030, 0x1011002e, 0x1011002b, 0x10110029,
0x10110027, 0x10110024, 0x10110022, 0x10110020,
0x1011001f, 0x1011001d, 0x1009003a, 0x10090037,
0x10090034, 0x10090031, 0x1009002e, 0x1009002c,
0x10090029, 0x10090027, 0x10090025, 0x10090023,
0x10090021, 0x1009001f, 0x1009001d, 0x1009001b,
0x1009001a, 0x10090018, 0x10090017, 0x10090016,
0x10090015, 0x10090013, 0x10090012, 0x10090011,
0x10090010, 0x1009000f, 0x1009000f, 0x1009000e,
0x1009000d, 0x1009000c, 0x1009000c, 0x1009000b,
0x1009000a, 0x1009000a, 0x10090009, 0x10090009,
0x10090008, 0x10090008, 0x10090007, 0x10090007,
0x10090007, 0x10090006, 0x10090006, 0x10090005,
0x10090005, 0x10090005, 0x10090005, 0x10090004,
0x10090004, 0x10090004, 0x10090004, 0x10090003,
0x10090003, 0x10090003, 0x10090003, 0x10090003,
0x10090003, 0x10090002, 0x10090002, 0x10090002,
0x10090002, 0x10090002, 0x10090002, 0x10090002,
0x10090002, 0x10090002, 0x10090001, 0x10090001,
0x10090001, 0x10090001, 0x10090001, 0x10090001,
};
/**************************************************
* R/W ops.
**************************************************/
u32 b43_httab_read(struct b43_wldev *dev, u32 offset)
{
u32 type, value;
type = offset & B43_HTTAB_TYPEMASK;
offset &= ~B43_HTTAB_TYPEMASK;
B43_WARN_ON(offset > 0xFFFF);
switch (type) {
case B43_HTTAB_8BIT:
b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
value = b43_phy_read(dev, B43_PHY_HT_TABLE_DATALO) & 0xFF;
break;
case B43_HTTAB_16BIT:
b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
value = b43_phy_read(dev, B43_PHY_HT_TABLE_DATALO);
break;
case B43_HTTAB_32BIT:
b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
value = b43_phy_read(dev, B43_PHY_HT_TABLE_DATAHI);
value <<= 16;
value |= b43_phy_read(dev, B43_PHY_HT_TABLE_DATALO);
break;
default:
B43_WARN_ON(1);
value = 0;
}
return value;
}
void b43_httab_read_bulk(struct b43_wldev *dev, u32 offset,
unsigned int nr_elements, void *_data)
{
u32 type;
u8 *data = _data;
unsigned int i;
type = offset & B43_HTTAB_TYPEMASK;
offset &= ~B43_HTTAB_TYPEMASK;
B43_WARN_ON(offset > 0xFFFF);
b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) {
switch (type) {
case B43_HTTAB_8BIT:
*data = b43_phy_read(dev, B43_PHY_HT_TABLE_DATALO) & 0xFF;
data++;
break;
case B43_HTTAB_16BIT:
*((u16 *)data) = b43_phy_read(dev, B43_PHY_HT_TABLE_DATALO);
data += 2;
break;
case B43_HTTAB_32BIT:
*((u32 *)data) = b43_phy_read(dev, B43_PHY_HT_TABLE_DATAHI);
*((u32 *)data) <<= 16;
*((u32 *)data) |= b43_phy_read(dev, B43_PHY_HT_TABLE_DATALO);
data += 4;
break;
default:
B43_WARN_ON(1);
}
}
}
void b43_httab_write(struct b43_wldev *dev, u32 offset, u32 value)
{
u32 type;
type = offset & B43_HTTAB_TYPEMASK;
offset &= 0xFFFF;
switch (type) {
case B43_HTTAB_8BIT:
B43_WARN_ON(value & ~0xFF);
b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value);
break;
case B43_HTTAB_16BIT:
B43_WARN_ON(value & ~0xFFFF);
b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value);
break;
case B43_HTTAB_32BIT:
b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
b43_phy_write(dev, B43_PHY_HT_TABLE_DATAHI, value >> 16);
b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value & 0xFFFF);
break;
default:
B43_WARN_ON(1);
}
return;
}
void b43_httab_write_few(struct b43_wldev *dev, u32 offset, size_t num, ...)
{
va_list args;
u32 type, value;
unsigned int i;
type = offset & B43_HTTAB_TYPEMASK;
offset &= 0xFFFF;
va_start(args, num);
switch (type) {
case B43_HTTAB_8BIT:
b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
for (i = 0; i < num; i++) {
value = va_arg(args, int);
B43_WARN_ON(value & ~0xFF);
b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value);
}
break;
case B43_HTTAB_16BIT:
b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
for (i = 0; i < num; i++) {
value = va_arg(args, int);
B43_WARN_ON(value & ~0xFFFF);
b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value);
}
break;
case B43_HTTAB_32BIT:
b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
for (i = 0; i < num; i++) {
value = va_arg(args, int);
b43_phy_write(dev, B43_PHY_HT_TABLE_DATAHI,
value >> 16);
b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO,
value & 0xFFFF);
}
break;
default:
B43_WARN_ON(1);
}
va_end(args);
return;
}
void b43_httab_write_bulk(struct b43_wldev *dev, u32 offset,
unsigned int nr_elements, const void *_data)
{
u32 type, value;
const u8 *data = _data;
unsigned int i;
type = offset & B43_HTTAB_TYPEMASK;
offset &= ~B43_HTTAB_TYPEMASK;
B43_WARN_ON(offset > 0xFFFF);
b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) {
switch (type) {
case B43_HTTAB_8BIT:
value = *data;
data++;
B43_WARN_ON(value & ~0xFF);
b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value);
break;
case B43_HTTAB_16BIT:
value = *((u16 *)data);
data += 2;
B43_WARN_ON(value & ~0xFFFF);
b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value);
break;
case B43_HTTAB_32BIT:
value = *((u32 *)data);
data += 4;
b43_phy_write(dev, B43_PHY_HT_TABLE_DATAHI, value >> 16);
b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO,
value & 0xFFFF);
break;
default:
B43_WARN_ON(1);
}
}
}
/**************************************************
* Tables ops.
**************************************************/
#define httab_upload(dev, offset, data) do { \
b43_httab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \
} while (0)
void b43_phy_ht_tables_init(struct b43_wldev *dev)
{
BUILD_BUG_ON(ARRAY_SIZE(b43_httab_0x1a_0xc0_late) !=
B43_HTTAB_1A_C0_LATE_SIZE);
httab_upload(dev, B43_HTTAB16(0x12, 0), b43_httab_0x12);
httab_upload(dev, B43_HTTAB16(0x27, 0), b43_httab_0x27);
httab_upload(dev, B43_HTTAB16(0x26, 0), b43_httab_0x26);
httab_upload(dev, B43_HTTAB32(0x25, 0), b43_httab_0x25);
httab_upload(dev, B43_HTTAB32(0x2f, 0), b43_httab_0x2f);
httab_upload(dev, B43_HTTAB16(0x1a, 0), b43_httab_0x1a);
httab_upload(dev, B43_HTTAB16(0x1b, 0), b43_httab_0x1b);
httab_upload(dev, B43_HTTAB16(0x1c, 0), b43_httab_0x1c);
httab_upload(dev, B43_HTTAB32(0x1a, 0x0c0), b43_httab_0x1a_0xc0);
httab_upload(dev, B43_HTTAB32(0x1a, 0x140), b43_httab_0x1a_0x140);
httab_upload(dev, B43_HTTAB32(0x1b, 0x140), b43_httab_0x1b_0x140);
httab_upload(dev, B43_HTTAB32(0x1c, 0x140), b43_httab_0x1c_0x140);
httab_upload(dev, B43_HTTAB16(0x1a, 0x1c0), b43_httab_0x1a_0x1c0);
httab_upload(dev, B43_HTTAB16(0x1b, 0x1c0), b43_httab_0x1b_0x1c0);
httab_upload(dev, B43_HTTAB16(0x1c, 0x1c0), b43_httab_0x1c_0x1c0);
httab_upload(dev, B43_HTTAB16(0x1a, 0x240), b43_httab_0x1a_0x240);
httab_upload(dev, B43_HTTAB16(0x1b, 0x240), b43_httab_0x1b_0x240);
httab_upload(dev, B43_HTTAB16(0x1c, 0x240), b43_httab_0x1c_0x240);
httab_upload(dev, B43_HTTAB32(0x1f, 0), b43_httab_0x1f);
httab_upload(dev, B43_HTTAB32(0x21, 0), b43_httab_0x21);
httab_upload(dev, B43_HTTAB32(0x23, 0), b43_httab_0x23);
httab_upload(dev, B43_HTTAB32(0x20, 0), b43_httab_0x20);
httab_upload(dev, B43_HTTAB32(0x22, 0), b43_httab_0x22);
httab_upload(dev, B43_HTTAB32(0x24, 0), b43_httab_0x24);
}
| gpl-2.0 |
MassStash/htc_m8_kernel_GPE_5.0.1 | net/rmnet_data/rmnet_data_config.c | 255 | 18928 | /*
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data configuration engine
*
*/
#include <net/sock.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/rmnet_data.h>
#include "rmnet_data_config.h"
#include "rmnet_data_handlers.h"
#include "rmnet_data_vnd.h"
#include "rmnet_data_private.h"
/* ***************** Local Definitions and Declarations ********************* */
static struct sock *nl_socket_handle;
#define RMNET_KERNEL_PRE_3_8
#ifndef RMNET_KERNEL_PRE_3_8
static struct netlink_kernel_cfg rmnet_netlink_cfg = {
.input = rmnet_config_netlink_msg_handler
};
#endif
#define RMNET_NL_MSG_SIZE(Y) (sizeof(((struct rmnet_nl_msg_s *)0)->Y))
/* ***************** Init and Cleanup *************************************** */
#ifdef RMNET_KERNEL_PRE_3_8
static struct sock *_rmnet_config_start_netlink(void)
{
return netlink_kernel_create(&init_net,
RMNET_NETLINK_PROTO,
0,
rmnet_config_netlink_msg_handler,
NULL,
THIS_MODULE);
}
#else
static struct sock *_rmnet_config_start_netlink(void)
{
return netlink_kernel_create(&init_net,
RMNET_NETLINK_PROTO,
&rmnet_netlink_cfg);
}
#endif /* RMNET_KERNEL_PRE_3_8 */
/**
* rmnet_config_init() - Startup init
*
* Registers netlink protocol with kernel and opens socket. Netlink handler is
* registered with kernel.
*/
int rmnet_config_init(void)
{
nl_socket_handle = _rmnet_config_start_netlink();
if (!nl_socket_handle) {
LOGE("%s(): Failed to init netlink socket", __func__);
return RMNET_INIT_ERROR;
}
return 0;
}
/**
* rmnet_config_exit() - Cleans up all netlink related resources
*/
void rmnet_config_exit(void)
{
netlink_kernel_release(nl_socket_handle);
}
/* ***************** Helper Functions *************************************** */
/**
* _rmnet_is_physical_endpoint_associated() - Determines if device is associated
* @dev: Device to get check
*
* Compares device rx_handler callback pointer against known funtion
*
* Return:
* - 1 if associated
* - 0 if NOT associated
*/
static inline int _rmnet_is_physical_endpoint_associated(struct net_device *dev)
{
rx_handler_func_t *rx_handler;
rx_handler = rcu_dereference(dev->rx_handler);
if (rx_handler == rmnet_rx_handler)
return 1;
else
return 0;
}
/**
* _rmnet_get_phys_ep_config() - Get physical ep config for an associated device
* @dev: Device to get endpoint configuration from
*
* Return:
* - pointer to configuration if successful
* - 0 (null) if device is not associated
*/
static inline struct rmnet_phys_ep_conf_s *_rmnet_get_phys_ep_config
(struct net_device *dev)
{
if (_rmnet_is_physical_endpoint_associated(dev))
return (struct rmnet_phys_ep_conf_s *)
rcu_dereference(dev->rx_handler_data);
else
return 0;
}
/* ***************** Netlink Handler **************************************** */
#define _RMNET_NETLINK_NULL_CHECKS() do { if (!rmnet_header || !resp_rmnet) \
BUG(); \
} while (0)
static void _rmnet_netlink_set_link_egress_data_format
(struct rmnet_nl_msg_s *rmnet_header,
struct rmnet_nl_msg_s *resp_rmnet)
{
struct net_device *dev;
_RMNET_NETLINK_NULL_CHECKS();
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
if (!dev) {
resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
return;
}
resp_rmnet->return_code =
rmnet_set_egress_data_format(dev,
rmnet_header->data_format.flags,
rmnet_header->data_format.agg_size,
rmnet_header->data_format.agg_count
);
}
static void _rmnet_netlink_set_link_ingress_data_format
(struct rmnet_nl_msg_s *rmnet_header,
struct rmnet_nl_msg_s *resp_rmnet)
{
struct net_device *dev;
_RMNET_NETLINK_NULL_CHECKS();
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
if (!dev) {
resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
return;
}
resp_rmnet->return_code =
rmnet_set_ingress_data_format(dev,
rmnet_header->data_format.flags);
}
static void _rmnet_netlink_set_logical_ep_config
(struct rmnet_nl_msg_s *rmnet_header,
struct rmnet_nl_msg_s *resp_rmnet)
{
struct net_device *dev, *dev2;
_RMNET_NETLINK_NULL_CHECKS();
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
if (rmnet_header->local_ep_config.ep_id < -1
|| rmnet_header->local_ep_config.ep_id > 254) {
resp_rmnet->return_code = RMNET_CONFIG_BAD_ARGUMENTS;
return;
}
dev = dev_get_by_name(&init_net,
rmnet_header->local_ep_config.dev);
dev2 = dev_get_by_name(&init_net,
rmnet_header->local_ep_config.next_dev);
if (dev != 0 && dev2 != 0)
resp_rmnet->return_code =
rmnet_set_logical_endpoint_config(
dev,
rmnet_header->local_ep_config.ep_id,
rmnet_header->local_ep_config.operating_mode,
dev2);
else
resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
}
static void _rmnet_netlink_associate_network_device
(struct rmnet_nl_msg_s *rmnet_header,
struct rmnet_nl_msg_s *resp_rmnet)
{
struct net_device *dev;
_RMNET_NETLINK_NULL_CHECKS();
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
dev = dev_get_by_name(&init_net, rmnet_header->data);
if (!dev) {
resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
return;
}
resp_rmnet->return_code = rmnet_associate_network_device(dev);
}
static void _rmnet_netlink_unassociate_network_device
(struct rmnet_nl_msg_s *rmnet_header,
struct rmnet_nl_msg_s *resp_rmnet)
{
struct net_device *dev;
_RMNET_NETLINK_NULL_CHECKS();
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
dev = dev_get_by_name(&init_net, rmnet_header->data);
if (!dev) {
resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
return;
}
resp_rmnet->return_code = rmnet_unassociate_network_device(dev);
}
static inline void _rmnet_netlink_get_link_egress_data_format
(struct rmnet_nl_msg_s *rmnet_header,
struct rmnet_nl_msg_s *resp_rmnet)
{
struct net_device *dev;
struct rmnet_phys_ep_conf_s *config;
_RMNET_NETLINK_NULL_CHECKS();
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
if (!dev) {
resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
return;
}
config = _rmnet_get_phys_ep_config(dev);
if (!config) {
resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
return;
}
/* Begin Data */
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
resp_rmnet->arg_length = RMNET_NL_MSG_SIZE(data_format);
resp_rmnet->data_format.flags = config->egress_data_format;
resp_rmnet->data_format.agg_count = config->egress_agg_count;
resp_rmnet->data_format.agg_size = config->egress_agg_size;
}
static inline void _rmnet_netlink_get_link_ingress_data_format
(struct rmnet_nl_msg_s *rmnet_header,
struct rmnet_nl_msg_s *resp_rmnet)
{
struct net_device *dev;
struct rmnet_phys_ep_conf_s *config;
_RMNET_NETLINK_NULL_CHECKS();
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
if (!dev) {
resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
return;
}
config = _rmnet_get_phys_ep_config(dev);
if (!config) {
resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
return;
}
/* Begin Data */
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
resp_rmnet->arg_length = RMNET_NL_MSG_SIZE(data_format);
resp_rmnet->data_format.flags = config->ingress_data_format;
}
/**
* rmnet_config_netlink_msg_handler() - Netlink message handler callback
* @skb: Packet containing netlink messages
*
* Standard kernel-expected format for a netlink message handler. Processes SKBs
* which contain RmNet data specific netlink messages.
*/
void rmnet_config_netlink_msg_handler(struct sk_buff *skb)
{
struct nlmsghdr *nlmsg_header, *resp_nlmsg;
struct rmnet_nl_msg_s *rmnet_header, *resp_rmnet;
int return_pid, response_data_length;
struct sk_buff *skb_response;
response_data_length = 0;
nlmsg_header = (struct nlmsghdr *) skb->data;
rmnet_header = (struct rmnet_nl_msg_s *) nlmsg_data(nlmsg_header);
LOGL("%s(): Netlink message pid=%d, seq=%d, length=%d, rmnet_type=%d\n",
__func__,
nlmsg_header->nlmsg_pid,
nlmsg_header->nlmsg_seq,
nlmsg_header->nlmsg_len,
rmnet_header->message_type);
return_pid = nlmsg_header->nlmsg_pid;
skb_response = nlmsg_new(sizeof(struct nlmsghdr)
+ sizeof(struct rmnet_nl_msg_s),
GFP_KERNEL);
if (!skb_response) {
LOGH("%s(): Failed to allocate response buffer\n", __func__);
return;
}
resp_nlmsg = nlmsg_put(skb_response,
0,
nlmsg_header->nlmsg_seq,
NLMSG_DONE,
sizeof(struct rmnet_nl_msg_s),
0);
resp_rmnet = nlmsg_data(resp_nlmsg);
if (!resp_rmnet)
BUG();
resp_rmnet->message_type = rmnet_header->message_type;
rtnl_lock();
switch (rmnet_header->message_type) {
case RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE:
_rmnet_netlink_associate_network_device
(rmnet_header, resp_rmnet);
break;
case RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE:
_rmnet_netlink_unassociate_network_device
(rmnet_header, resp_rmnet);
break;
case RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT:
_rmnet_netlink_set_link_egress_data_format
(rmnet_header, resp_rmnet);
break;
case RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT:
_rmnet_netlink_get_link_egress_data_format
(rmnet_header, resp_rmnet);
break;
case RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT:
_rmnet_netlink_set_link_ingress_data_format
(rmnet_header, resp_rmnet);
break;
case RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT:
_rmnet_netlink_get_link_ingress_data_format
(rmnet_header, resp_rmnet);
break;
case RMNET_NETLINK_SET_LOGICAL_EP_CONFIG:
_rmnet_netlink_set_logical_ep_config(rmnet_header, resp_rmnet);
break;
case RMNET_NETLINK_NEW_VND:
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
resp_rmnet->return_code =
rmnet_create_vnd(rmnet_header->vnd.id);
break;
default:
resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
resp_rmnet->return_code = RMNET_CONFIG_UNKNOWN_MESSAGE;
break;
}
rtnl_unlock();
nlmsg_unicast(nl_socket_handle, skb_response, return_pid);
}
/* ***************** Configuration API ************************************** */
/**
* rmnet_unassociate_network_device() - Unassociate network device
* @dev: Device to unassociate
*
* Frees all structures generate for device. Unregisters rx_handler
* todo: needs to do some sanity verification first (is device in use, etc...)
*
* Return:
* - RMNET_CONFIG_OK if successful
* - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
* - RMNET_CONFIG_INVALID_REQUEST if device is not already associated
* - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
*/
int rmnet_unassociate_network_device(struct net_device *dev)
{
struct rmnet_phys_ep_conf_s *config;
ASSERT_RTNL();
LOGL("%s(%s);", __func__, dev->name);
if (!dev)
return RMNET_CONFIG_NO_SUCH_DEVICE;
if (!_rmnet_is_physical_endpoint_associated(dev))
return RMNET_CONFIG_INVALID_REQUEST;
config = (struct rmnet_phys_ep_conf_s *)
rcu_dereference(dev->rx_handler_data);
if (!config)
return RMNET_CONFIG_UNKNOWN_ERROR;
kfree(config);
netdev_rx_handler_unregister(dev);
return RMNET_CONFIG_OK;
}
/**
* rmnet_set_ingress_data_format() - Set ingress data format on network device
* @dev: Device to ingress data format on
* @egress_data_format: 32-bit unsigned bitmask of ingress format
*
* Network device must already have association with RmNet Data driver
*
* Return:
* - RMNET_CONFIG_OK if successful
* - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
* - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
*/
int rmnet_set_ingress_data_format(struct net_device *dev,
uint32_t ingress_data_format)
{
struct rmnet_phys_ep_conf_s *config;
ASSERT_RTNL();
LOGL("%s(%s,0x%08X);", __func__, dev->name, ingress_data_format);
if (!dev)
return RMNET_CONFIG_NO_SUCH_DEVICE;
config = _rmnet_get_phys_ep_config(dev);
if (!config)
return RMNET_CONFIG_INVALID_REQUEST;
config->ingress_data_format = ingress_data_format;
return RMNET_CONFIG_OK;
}
/**
* rmnet_set_egress_data_format() - Set egress data format on network device
* @dev: Device to egress data format on
* @egress_data_format: 32-bit unsigned bitmask of egress format
*
* Network device must already have association with RmNet Data driver
* todo: Bounds check on agg_*
*
* Return:
* - RMNET_CONFIG_OK if successful
* - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
* - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
*/
int rmnet_set_egress_data_format(struct net_device *dev,
uint32_t egress_data_format,
uint16_t agg_size,
uint16_t agg_count)
{
struct rmnet_phys_ep_conf_s *config;
ASSERT_RTNL();
LOGL("%s(%s,0x%08X, %d, %d);",
__func__, dev->name, egress_data_format, agg_size, agg_count);
if (!dev)
return RMNET_CONFIG_NO_SUCH_DEVICE;
config = _rmnet_get_phys_ep_config(dev);
if (!config)
return RMNET_CONFIG_UNKNOWN_ERROR;
config->egress_data_format = egress_data_format;
config->egress_agg_size = agg_size;
config->egress_agg_count = agg_count;
return RMNET_CONFIG_OK;
}
/**
* rmnet_associate_network_device() - Associate network device
* @dev: Device to register with RmNet data
*
* Typically used on physical network devices. Registers RX handler and private
* metadata structures.
*
* Return:
* - RMNET_CONFIG_OK if successful
* - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
* - RMNET_CONFIG_DEVICE_IN_USE if dev rx_handler is already filled
* - RMNET_CONFIG_DEVICE_IN_USE if netdev_rx_handler_register() fails
*/
int rmnet_associate_network_device(struct net_device *dev)
{
struct rmnet_phys_ep_conf_s *config;
int rc;
ASSERT_RTNL();
LOGL("%s(%s);", __func__, dev->name);
if (!dev)
return RMNET_CONFIG_NO_SUCH_DEVICE;
if (_rmnet_is_physical_endpoint_associated(dev)) {
LOGM("%s(): %s is already regestered\n", __func__, dev->name);
return RMNET_CONFIG_DEVICE_IN_USE;
}
config = (struct rmnet_phys_ep_conf_s *)
kmalloc(sizeof(struct rmnet_phys_ep_conf_s), GFP_ATOMIC);
if (!config)
return RMNET_CONFIG_NOMEM;
memset(config, 0, sizeof(struct rmnet_phys_ep_conf_s));
config->dev = dev;
spin_lock_init(&config->agg_lock);
rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config);
if (rc) {
LOGM("%s(): netdev_rx_handler_register returns %d\n",
__func__, rc);
kfree(config);
return RMNET_CONFIG_DEVICE_IN_USE;
}
return RMNET_CONFIG_OK;
}
/**
* _rmnet_set_logical_endpoint_config() - Set logical endpoing config on device
* @dev: Device to set endpoint configuration on
* @config_id: logical endpoint id on device
* @epconfig: endpoing configuration structure to set
*
* Return:
* - RMNET_CONFIG_OK if successful
* - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
* - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null
* - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
*/
int _rmnet_set_logical_endpoint_config(struct net_device *dev,
int config_id,
struct rmnet_logical_ep_conf_s *epconfig)
{
struct rmnet_phys_ep_conf_s *config;
struct rmnet_logical_ep_conf_s *epconfig_l;
ASSERT_RTNL();
if (!dev)
return RMNET_CONFIG_NO_SUCH_DEVICE;
if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT
|| config_id >= RMNET_DATA_MAX_LOGICAL_EP)
return RMNET_CONFIG_BAD_ARGUMENTS;
if (rmnet_vnd_is_vnd(dev))
epconfig_l = rmnet_vnd_get_le_config(dev);
else {
config = _rmnet_get_phys_ep_config(dev);
if (!config)
return RMNET_CONFIG_UNKNOWN_ERROR;
if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT)
epconfig_l = &config->local_ep;
else
epconfig_l = &config->muxed_ep[config_id];
}
memcpy(epconfig_l, epconfig, sizeof(struct rmnet_logical_ep_conf_s));
if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT)
epconfig_l->mux_id = 0;
else
epconfig_l->mux_id = config_id;
return RMNET_CONFIG_OK;
}
/**
* rmnet_set_logical_endpoint_config() - Set logical endpoing configuration on a device
* @dev: Device to set endpoint configuration on
* @config_id: logical endpoint id on device
* @rmnet_mode: endpoint mode. Values from: rmnet_config_endpoint_modes_e
* @egress_device: device node to forward packet to once done processing in
* ingress/egress handlers
*
* Creates a logical_endpoint_config structure and fills in the information from
* function arguments. Calls _rmnet_set_logical_endpoint_config() to finish
* configuration. Network device must already have association with RmNet Data
* driver
*
* Return:
* - RMNET_CONFIG_OK if successful
* - RMNET_CONFIG_BAD_EGRESS_DEVICE if egress device is null
* - RMNET_CONFIG_BAD_EGRESS_DEVICE if egress device is not handled by
* RmNet data module
* - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
* - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null
* - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
*/
int rmnet_set_logical_endpoint_config(struct net_device *dev,
int config_id,
uint8_t rmnet_mode,
struct net_device *egress_dev)
{
struct rmnet_logical_ep_conf_s epconfig;
LOGL("%s(%s, %d, %d, %s);",
__func__, dev->name, config_id, rmnet_mode, egress_dev->name);
if (!egress_dev
|| ((!_rmnet_is_physical_endpoint_associated(egress_dev))
&& (!rmnet_vnd_is_vnd(egress_dev)))) {
return RMNET_CONFIG_BAD_EGRESS_DEVICE;
}
memset(&epconfig, 0, sizeof(struct rmnet_logical_ep_conf_s));
epconfig.refcount = 1;
epconfig.rmnet_mode = rmnet_mode;
epconfig.egress_dev = egress_dev;
return _rmnet_set_logical_endpoint_config(dev, config_id, &epconfig);
}
/**
* rmnet_create_vnd() - Create virtual network device node
* @id: RmNet virtual device node id
*
* Return:
* - result of rmnet_vnd_create_dev()
*/
int rmnet_create_vnd(int id)
{
struct net_device *dev;
ASSERT_RTNL();
LOGL("%s(%d);", __func__, id);
return rmnet_vnd_create_dev(id, &dev);
}
| gpl-2.0 |
pawo99/stock_mm_dpgw | drivers/gpu/arm/mali400/mali/common/mali_gp_scheduler.c | 255 | 14036 | /*
* Copyright (C) 2011-2012 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained from Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_gp_scheduler.h"
#include "mali_kernel_common.h"
#include "mali_osk.h"
#include "mali_osk_list.h"
#include "mali_scheduler.h"
#include "mali_gp.h"
#include "mali_gp_job.h"
#include "mali_group.h"
#include "mali_pm.h"
enum mali_gp_slot_state
{
MALI_GP_SLOT_STATE_IDLE,
MALI_GP_SLOT_STATE_WORKING,
};
/* A render slot is an entity which jobs can be scheduled onto */
struct mali_gp_slot
{
struct mali_group *group;
/*
* We keep track of the state here as well as in the group object
* so we don't need to take the group lock so often (and also avoid clutter with the working lock)
*/
enum mali_gp_slot_state state;
u32 returned_cookie;
};
static u32 gp_version = 0;
static _MALI_OSK_LIST_HEAD(job_queue); /* List of jobs with some unscheduled work */
static struct mali_gp_slot slot;
/* Variables to allow safe pausing of the scheduler */
static _mali_osk_wait_queue_t *gp_scheduler_working_wait_queue = NULL;
static u32 pause_count = 0;
static mali_bool mali_gp_scheduler_is_suspended(void);
static _mali_osk_lock_t *gp_scheduler_lock = NULL;
/* Contains tid of thread that locked the scheduler or 0, if not locked */
_mali_osk_errcode_t mali_gp_scheduler_initialize(void)
{
u32 num_groups;
u32 i;
_MALI_OSK_INIT_LIST_HEAD(&job_queue);
gp_scheduler_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER);
if (NULL == gp_scheduler_lock)
{
return _MALI_OSK_ERR_NOMEM;
}
gp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
if (NULL == gp_scheduler_working_wait_queue)
{
_mali_osk_lock_term(gp_scheduler_lock);
return _MALI_OSK_ERR_NOMEM;
}
/* Find all the available GP cores */
num_groups = mali_group_get_glob_num_groups();
for (i = 0; i < num_groups; i++)
{
struct mali_group *group = mali_group_get_glob_group(i);
struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
if (NULL != gp_core)
{
if (0 == gp_version)
{
/* Retrieve GP version */
gp_version = mali_gp_core_get_version(gp_core);
}
slot.group = group;
slot.state = MALI_GP_SLOT_STATE_IDLE;
break; /* There is only one GP, no point in looking for more */
}
}
return _MALI_OSK_ERR_OK;
}
void mali_gp_scheduler_terminate(void)
{
MALI_DEBUG_ASSERT(MALI_GP_SLOT_STATE_IDLE == slot.state);
MALI_DEBUG_ASSERT_POINTER(slot.group);
mali_group_delete(slot.group);
_mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);
_mali_osk_lock_term(gp_scheduler_lock);
}
MALI_STATIC_INLINE void mali_gp_scheduler_lock(void)
{
if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW))
{
/* Non-interruptable lock failed: this should never happen. */
MALI_DEBUG_ASSERT(0);
}
MALI_DEBUG_PRINT(5, ("Mali GP scheduler: GP scheduler lock taken\n"));
}
MALI_STATIC_INLINE void mali_gp_scheduler_unlock(void)
{
MALI_DEBUG_PRINT(5, ("Mali GP scheduler: Releasing GP scheduler lock\n"));
_mali_osk_lock_signal(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW);
}
#ifdef DEBUG
MALI_STATIC_INLINE void mali_gp_scheduler_assert_locked(void)
{
MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
}
#define MALI_ASSERT_GP_SCHEDULER_LOCKED() mali_gp_scheduler_assert_locked()
#else
#define MALI_ASSERT_GP_SCHEDULER_LOCKED()
#endif
static void mali_gp_scheduler_schedule(void)
{
struct mali_gp_job *job;
mali_gp_scheduler_lock();
if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue))
{
MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
mali_gp_scheduler_unlock();
return; /* Nothing to do, so early out */
}
/* Get (and remove) next job in queue */
job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);
_mali_osk_list_del(&job->list);
/* Mark slot as busy */
slot.state = MALI_GP_SLOT_STATE_WORKING;
mali_gp_scheduler_unlock();
MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));
mali_group_lock(slot.group);
if (_MALI_OSK_ERR_OK != mali_group_start_gp_job(slot.group, job))
{
MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Failed to start GP job\n"));
MALI_DEBUG_ASSERT(0); /* @@@@ todo: this cant fail on Mali-300+, no need to implement put back of job */
}
mali_group_unlock(slot.group);
}
/* @@@@ todo: pass the job in as a param to this function, so that we don't have to take the scheduler lock again */
static void mali_gp_scheduler_schedule_on_group(struct mali_group *group)
{
struct mali_gp_job *job;
MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue))
{
mali_gp_scheduler_unlock();
MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
return; /* Nothing to do, so early out */
}
/* Get (and remove) next job in queue */
job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);
_mali_osk_list_del(&job->list);
/* Mark slot as busy */
slot.state = MALI_GP_SLOT_STATE_WORKING;
mali_gp_scheduler_unlock();
MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));
if (_MALI_OSK_ERR_OK != mali_group_start_gp_job(slot.group, job))
{
MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Failed to start GP job\n"));
MALI_DEBUG_ASSERT(0); /* @@@@ todo: this cant fail on Mali-300+, no need to implement put back of job */
}
}
static void mali_gp_scheduler_return_job_to_user(struct mali_gp_job *job, mali_bool success)
{
_mali_uk_gp_job_finished_s *jobres = job->finished_notification->result_buffer;
_mali_osk_memset(jobres, 0, sizeof(_mali_uk_gp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
jobres->user_job_ptr = mali_gp_job_get_user_id(job);
if (MALI_TRUE == success)
{
jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
}
else
{
jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
}
jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
mali_session_send_notification(mali_gp_job_get_session(job), job->finished_notification);
job->finished_notification = NULL;
mali_gp_job_delete(job);
}
void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success)
{
MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) completed (%s)\n", mali_gp_job_get_id(job), job, success ? "success" : "failure"));
mali_gp_scheduler_return_job_to_user(job, success);
mali_gp_scheduler_lock();
/* Mark slot as idle again */
slot.state = MALI_GP_SLOT_STATE_IDLE;
/* If paused, then this was the last job, so wake up sleeping workers */
if (pause_count > 0)
{
_mali_osk_wait_queue_wake_up(gp_scheduler_working_wait_queue);
}
mali_gp_scheduler_schedule_on_group(group);
/* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */
mali_pm_core_event(MALI_CORE_EVENT_GP_STOP);
}
void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job)
{
_mali_uk_gp_job_suspended_s * jobres;
_mali_osk_notification_t * notification;
mali_gp_scheduler_lock();
notification = job->oom_notification;
job->oom_notification = NULL;
slot.returned_cookie = mali_gp_job_get_id(job);
jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
jobres->user_job_ptr = mali_gp_job_get_user_id(job);
jobres->cookie = mali_gp_job_get_id(job);
mali_gp_scheduler_unlock();
jobres->reason = _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY;
mali_session_send_notification(mali_gp_job_get_session(job), notification);
/*
* If this function failed, then we could return the job to user space right away,
* but there is a job timer anyway that will do that eventually.
* This is not exactly a common case anyway.
*/
}
void mali_gp_scheduler_suspend(void)
{
mali_gp_scheduler_lock();
pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
mali_gp_scheduler_unlock();
_mali_osk_wait_queue_wait_event(gp_scheduler_working_wait_queue, mali_gp_scheduler_is_suspended);
}
void mali_gp_scheduler_resume(void)
{
mali_gp_scheduler_lock();
pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
mali_gp_scheduler_unlock();
if (0 == pause_count)
{
mali_gp_scheduler_schedule();
}
}
_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx, _mali_uk_gp_start_job_s *uargs)
{
struct mali_session_data *session;
struct mali_gp_job *job;
MALI_DEBUG_ASSERT_POINTER(uargs);
MALI_DEBUG_ASSERT_POINTER(ctx);
session = (struct mali_session_data*)ctx;
job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id());
if (NULL == job)
{
return _MALI_OSK_ERR_NOMEM;
}
#if PROFILING_SKIP_PP_AND_GP_JOBS
#warning GP jobs will not be executed
mali_gp_scheduler_return_job_to_user(job, MALI_TRUE);
return _MALI_OSK_ERR_OK;
#endif
mali_pm_core_event(MALI_CORE_EVENT_GP_START);
mali_gp_scheduler_lock();
_mali_osk_list_addtail(&job->list, &job_queue);
mali_gp_scheduler_unlock();
MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));
mali_gp_scheduler_schedule();
return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
{
MALI_DEBUG_ASSERT_POINTER(args);
MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
args->number_of_cores = 1;
return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
{
MALI_DEBUG_ASSERT_POINTER(args);
MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
args->version = gp_version;
return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
{
struct mali_session_data *session;
struct mali_gp_job *resumed_job;
_mali_osk_notification_t *new_notification = 0;
MALI_DEBUG_ASSERT_POINTER(args);
if (NULL == args->ctx)
{
return _MALI_OSK_ERR_INVALID_ARGS;
}
session = (struct mali_session_data*)args->ctx;
if (NULL == session)
{
return _MALI_OSK_ERR_FAULT;
}
if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code)
{
new_notification = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
if (NULL == new_notification)
{
MALI_PRINT_ERROR(("Mali GP scheduler: Failed to allocate notification object. Will abort GP job.\n"));
mali_group_lock(slot.group);
mali_group_abort_gp_job(slot.group, args->cookie);
mali_group_unlock(slot.group);
return _MALI_OSK_ERR_FAULT;
}
}
mali_group_lock(slot.group);
if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code)
{
MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Resuming job %u with new heap; 0x%08X - 0x%08X\n", args->cookie, args->arguments[0], args->arguments[1]));
resumed_job = mali_group_resume_gp_with_new_heap(slot.group, args->cookie, args->arguments[0], args->arguments[1]);
if (NULL != resumed_job)
{
/* @@@@ todo: move this and other notification handling into the job object itself */
resumed_job->oom_notification = new_notification;
mali_group_unlock(slot.group);
return _MALI_OSK_ERR_OK;
}
else
{
mali_group_unlock(slot.group);
_mali_osk_notification_delete(new_notification);
return _MALI_OSK_ERR_FAULT;
}
}
MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting job %u, no new heap provided\n", args->cookie));
mali_group_abort_gp_job(slot.group, args->cookie);
mali_group_unlock(slot.group);
return _MALI_OSK_ERR_OK;
}
void mali_gp_scheduler_abort_session(struct mali_session_data *session)
{
struct mali_gp_job *job, *tmp;
mali_gp_scheduler_lock();
MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting all jobs from session 0x%08x\n", session));
/* Check queue for jobs and remove */
_MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_gp_job, list)
{
if (mali_gp_job_get_session(job) == session)
{
MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Removing GP job 0x%08x from queue\n", job));
_mali_osk_list_del(&(job->list));
mali_gp_job_delete(job);
mali_pm_core_event(MALI_CORE_EVENT_GP_STOP);
}
}
mali_gp_scheduler_unlock();
mali_group_abort_session(slot.group, session);
}
static mali_bool mali_gp_scheduler_is_suspended(void)
{
mali_bool ret;
mali_gp_scheduler_lock();
ret = pause_count > 0 && slot.state == MALI_GP_SLOT_STATE_IDLE;
mali_gp_scheduler_unlock();
return ret;
}
#if MALI_STATE_TRACKING
u32 mali_gp_scheduler_dump_state(char *buf, u32 size)
{
int n = 0;
n += _mali_osk_snprintf(buf + n, size - n, "GP\n");
n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty");
n += mali_group_dump_state(slot.group, buf + n, size - n);
n += _mali_osk_snprintf(buf + n, size - n, "\n");
return n;
}
#endif
void mali_gp_scheduler_reset_all_groups(void)
{
if (NULL != slot.group)
{
mali_group_reset(slot.group);
}
}
void mali_gp_scheduler_zap_all_active(struct mali_session_data *session)
{
if (NULL != slot.group)
{
mali_group_zap_session(slot.group, session);
}
}
| gpl-2.0 |
c313742678/qt210_kernel | drivers/gpu/vga/vgaarb.c | 767 | 33002 | /*
* vgaarb.c: Implements the VGA arbitration. For details refer to
* Documentation/vgaarbiter.txt
*
*
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
* (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/spinlock.h>
#include <linux/poll.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vgaarb.h>
static void vga_arbiter_notify_clients(void);
/*
* We keep a list of all vga devices in the system to speed
* up the various operations of the arbiter
*/
struct vga_device {
struct list_head list;
struct pci_dev *pdev;
unsigned int decodes; /* what does it decodes */
unsigned int owns; /* what does it owns */
unsigned int locks; /* what does it locks */
unsigned int io_lock_cnt; /* legacy IO lock count */
unsigned int mem_lock_cnt; /* legacy MEM lock count */
unsigned int io_norm_cnt; /* normal IO count */
unsigned int mem_norm_cnt; /* normal MEM count */
/* allow IRQ enable/disable hook */
void *cookie;
void (*irq_set_state)(void *cookie, bool enable);
unsigned int (*set_vga_decode)(void *cookie, bool decode);
};
static LIST_HEAD(vga_list);
static int vga_count, vga_decode_count;
static bool vga_arbiter_used;
static DEFINE_SPINLOCK(vga_lock);
static DECLARE_WAIT_QUEUE_HEAD(vga_wait_queue);
static const char *vga_iostate_to_str(unsigned int iostate)
{
/* Ignore VGA_RSRC_IO and VGA_RSRC_MEM */
iostate &= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
switch (iostate) {
case VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM:
return "io+mem";
case VGA_RSRC_LEGACY_IO:
return "io";
case VGA_RSRC_LEGACY_MEM:
return "mem";
}
return "none";
}
static int vga_str_to_iostate(char *buf, int str_size, int *io_state)
{
/* we could in theory hand out locks on IO and mem
* separately to userspace but it can cause deadlocks */
if (strncmp(buf, "none", 4) == 0) {
*io_state = VGA_RSRC_NONE;
return 1;
}
/* XXX We're not chekcing the str_size! */
if (strncmp(buf, "io+mem", 6) == 0)
goto both;
else if (strncmp(buf, "io", 2) == 0)
goto both;
else if (strncmp(buf, "mem", 3) == 0)
goto both;
return 0;
both:
*io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
return 1;
}
#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
/* this is only used a cookie - it should not be dereferenced */
static struct pci_dev *vga_default;
#endif
static void vga_arb_device_card_gone(struct pci_dev *pdev);
/* Find somebody in our list */
static struct vga_device *vgadev_find(struct pci_dev *pdev)
{
struct vga_device *vgadev;
list_for_each_entry(vgadev, &vga_list, list)
if (pdev == vgadev->pdev)
return vgadev;
return NULL;
}
/* Returns the default VGA device (vgacon's babe) */
#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
struct pci_dev *vga_default_device(void)
{
return vga_default;
}
#endif
static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
{
if (vgadev->irq_set_state)
vgadev->irq_set_state(vgadev->cookie, state);
}
/* If we don't ever use VGA arb we should avoid
turning off anything anywhere due to old X servers getting
confused about the boot device not being VGA */
static void vga_check_first_use(void)
{
/* we should inform all GPUs in the system that
* VGA arb has occured and to try and disable resources
* if they can */
if (!vga_arbiter_used) {
vga_arbiter_used = true;
vga_arbiter_notify_clients();
}
}
static struct vga_device *__vga_tryget(struct vga_device *vgadev,
unsigned int rsrc)
{
unsigned int wants, legacy_wants, match;
struct vga_device *conflict;
unsigned int pci_bits;
/* Account for "normal" resources to lock. If we decode the legacy,
* counterpart, we need to request it as well
*/
if ((rsrc & VGA_RSRC_NORMAL_IO) &&
(vgadev->decodes & VGA_RSRC_LEGACY_IO))
rsrc |= VGA_RSRC_LEGACY_IO;
if ((rsrc & VGA_RSRC_NORMAL_MEM) &&
(vgadev->decodes & VGA_RSRC_LEGACY_MEM))
rsrc |= VGA_RSRC_LEGACY_MEM;
pr_debug("%s: %d\n", __func__, rsrc);
pr_debug("%s: owns: %d\n", __func__, vgadev->owns);
/* Check what resources we need to acquire */
wants = rsrc & ~vgadev->owns;
/* We already own everything, just mark locked & bye bye */
if (wants == 0)
goto lock_them;
/* We don't need to request a legacy resource, we just enable
* appropriate decoding and go
*/
legacy_wants = wants & VGA_RSRC_LEGACY_MASK;
if (legacy_wants == 0)
goto enable_them;
/* Ok, we don't, let's find out how we need to kick off */
list_for_each_entry(conflict, &vga_list, list) {
unsigned int lwants = legacy_wants;
unsigned int change_bridge = 0;
/* Don't conflict with myself */
if (vgadev == conflict)
continue;
/* Check if the architecture allows a conflict between those
* 2 devices or if they are on separate domains
*/
if (!vga_conflicts(vgadev->pdev, conflict->pdev))
continue;
/* We have a possible conflict. before we go further, we must
* check if we sit on the same bus as the conflicting device.
* if we don't, then we must tie both IO and MEM resources
* together since there is only a single bit controlling
* VGA forwarding on P2P bridges
*/
if (vgadev->pdev->bus != conflict->pdev->bus) {
change_bridge = 1;
lwants = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
}
/* Check if the guy has a lock on the resource. If he does,
* return the conflicting entry
*/
if (conflict->locks & lwants)
return conflict;
/* Ok, now check if he owns the resource we want. We don't need
* to check "decodes" since it should be impossible to own
* own legacy resources you don't decode unless I have a bug
* in this code...
*/
WARN_ON(conflict->owns & ~conflict->decodes);
match = lwants & conflict->owns;
if (!match)
continue;
/* looks like he doesn't have a lock, we can steal
* them from him
*/
vga_irq_set_state(conflict, false);
pci_bits = 0;
if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
pci_bits |= PCI_COMMAND_MEMORY;
if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
pci_bits |= PCI_COMMAND_IO;
pci_set_vga_state(conflict->pdev, false, pci_bits,
change_bridge);
conflict->owns &= ~lwants;
/* If he also owned non-legacy, that is no longer the case */
if (lwants & VGA_RSRC_LEGACY_MEM)
conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
if (lwants & VGA_RSRC_LEGACY_IO)
conflict->owns &= ~VGA_RSRC_NORMAL_IO;
}
enable_them:
/* ok dude, we got it, everybody conflicting has been disabled, let's
* enable us. Make sure we don't mark a bit in "owns" that we don't
* also have in "decodes". We can lock resources we don't decode but
* not own them.
*/
pci_bits = 0;
if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
pci_bits |= PCI_COMMAND_MEMORY;
if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
pci_bits |= PCI_COMMAND_IO;
pci_set_vga_state(vgadev->pdev, true, pci_bits, !!(wants & VGA_RSRC_LEGACY_MASK));
vga_irq_set_state(vgadev, true);
vgadev->owns |= (wants & vgadev->decodes);
lock_them:
vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
if (rsrc & VGA_RSRC_LEGACY_IO)
vgadev->io_lock_cnt++;
if (rsrc & VGA_RSRC_LEGACY_MEM)
vgadev->mem_lock_cnt++;
if (rsrc & VGA_RSRC_NORMAL_IO)
vgadev->io_norm_cnt++;
if (rsrc & VGA_RSRC_NORMAL_MEM)
vgadev->mem_norm_cnt++;
return NULL;
}
static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
{
unsigned int old_locks = vgadev->locks;
pr_debug("%s\n", __func__);
/* Update our counters, and account for equivalent legacy resources
* if we decode them
*/
if ((rsrc & VGA_RSRC_NORMAL_IO) && vgadev->io_norm_cnt > 0) {
vgadev->io_norm_cnt--;
if (vgadev->decodes & VGA_RSRC_LEGACY_IO)
rsrc |= VGA_RSRC_LEGACY_IO;
}
if ((rsrc & VGA_RSRC_NORMAL_MEM) && vgadev->mem_norm_cnt > 0) {
vgadev->mem_norm_cnt--;
if (vgadev->decodes & VGA_RSRC_LEGACY_MEM)
rsrc |= VGA_RSRC_LEGACY_MEM;
}
if ((rsrc & VGA_RSRC_LEGACY_IO) && vgadev->io_lock_cnt > 0)
vgadev->io_lock_cnt--;
if ((rsrc & VGA_RSRC_LEGACY_MEM) && vgadev->mem_lock_cnt > 0)
vgadev->mem_lock_cnt--;
/* Just clear lock bits, we do lazy operations so we don't really
* have to bother about anything else at this point
*/
if (vgadev->io_lock_cnt == 0)
vgadev->locks &= ~VGA_RSRC_LEGACY_IO;
if (vgadev->mem_lock_cnt == 0)
vgadev->locks &= ~VGA_RSRC_LEGACY_MEM;
/* Kick the wait queue in case somebody was waiting if we actually
* released something
*/
if (old_locks != vgadev->locks)
wake_up_all(&vga_wait_queue);
}
int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
{
struct vga_device *vgadev, *conflict;
unsigned long flags;
wait_queue_t wait;
int rc = 0;
vga_check_first_use();
/* The one who calls us should check for this, but lets be sure... */
if (pdev == NULL)
pdev = vga_default_device();
if (pdev == NULL)
return 0;
for (;;) {
spin_lock_irqsave(&vga_lock, flags);
vgadev = vgadev_find(pdev);
if (vgadev == NULL) {
spin_unlock_irqrestore(&vga_lock, flags);
rc = -ENODEV;
break;
}
conflict = __vga_tryget(vgadev, rsrc);
spin_unlock_irqrestore(&vga_lock, flags);
if (conflict == NULL)
break;
/* We have a conflict, we wait until somebody kicks the
* work queue. Currently we have one work queue that we
* kick each time some resources are released, but it would
* be fairly easy to have a per device one so that we only
* need to attach to the conflicting device
*/
init_waitqueue_entry(&wait, current);
add_wait_queue(&vga_wait_queue, &wait);
set_current_state(interruptible ?
TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
if (signal_pending(current)) {
rc = -EINTR;
break;
}
schedule();
remove_wait_queue(&vga_wait_queue, &wait);
set_current_state(TASK_RUNNING);
}
return rc;
}
EXPORT_SYMBOL(vga_get);
int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
{
struct vga_device *vgadev;
unsigned long flags;
int rc = 0;
vga_check_first_use();
/* The one who calls us should check for this, but lets be sure... */
if (pdev == NULL)
pdev = vga_default_device();
if (pdev == NULL)
return 0;
spin_lock_irqsave(&vga_lock, flags);
vgadev = vgadev_find(pdev);
if (vgadev == NULL) {
rc = -ENODEV;
goto bail;
}
if (__vga_tryget(vgadev, rsrc))
rc = -EBUSY;
bail:
spin_unlock_irqrestore(&vga_lock, flags);
return rc;
}
EXPORT_SYMBOL(vga_tryget);
void vga_put(struct pci_dev *pdev, unsigned int rsrc)
{
struct vga_device *vgadev;
unsigned long flags;
/* The one who calls us should check for this, but lets be sure... */
if (pdev == NULL)
pdev = vga_default_device();
if (pdev == NULL)
return;
spin_lock_irqsave(&vga_lock, flags);
vgadev = vgadev_find(pdev);
if (vgadev == NULL)
goto bail;
__vga_put(vgadev, rsrc);
bail:
spin_unlock_irqrestore(&vga_lock, flags);
}
EXPORT_SYMBOL(vga_put);
/*
* Currently, we assume that the "initial" setup of the system is
* not sane, that is we come up with conflicting devices and let
* the arbiter's client decides if devices decodes or not legacy
* things.
*/
static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
{
struct vga_device *vgadev;
unsigned long flags;
struct pci_bus *bus;
struct pci_dev *bridge;
u16 cmd;
/* Only deal with VGA class devices */
if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
return false;
/* Allocate structure */
vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL);
if (vgadev == NULL) {
pr_err("vgaarb: failed to allocate pci device\n");
/* What to do on allocation failure ? For now, let's
* just do nothing, I'm not sure there is anything saner
* to be done
*/
return false;
}
memset(vgadev, 0, sizeof(*vgadev));
/* Take lock & check for duplicates */
spin_lock_irqsave(&vga_lock, flags);
if (vgadev_find(pdev) != NULL) {
BUG_ON(1);
goto fail;
}
vgadev->pdev = pdev;
/* By default, assume we decode everything */
vgadev->decodes = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
/* by default mark it as decoding */
vga_decode_count++;
/* Mark that we "own" resources based on our enables, we will
* clear that below if the bridge isn't forwarding
*/
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
if (cmd & PCI_COMMAND_IO)
vgadev->owns |= VGA_RSRC_LEGACY_IO;
if (cmd & PCI_COMMAND_MEMORY)
vgadev->owns |= VGA_RSRC_LEGACY_MEM;
/* Check if VGA cycles can get down to us */
bus = pdev->bus;
while (bus) {
bridge = bus->self;
if (bridge) {
u16 l;
pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
&l);
if (!(l & PCI_BRIDGE_CTL_VGA)) {
vgadev->owns = 0;
break;
}
}
bus = bus->parent;
}
/* Deal with VGA default device. Use first enabled one
* by default if arch doesn't have it's own hook
*/
#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
if (vga_default == NULL &&
((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK))
vga_default = pci_dev_get(pdev);
#endif
/* Add to the list */
list_add(&vgadev->list, &vga_list);
vga_count++;
pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
pci_name(pdev),
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns),
vga_iostate_to_str(vgadev->locks));
spin_unlock_irqrestore(&vga_lock, flags);
return true;
fail:
spin_unlock_irqrestore(&vga_lock, flags);
kfree(vgadev);
return false;
}
static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
{
struct vga_device *vgadev;
unsigned long flags;
bool ret = true;
spin_lock_irqsave(&vga_lock, flags);
vgadev = vgadev_find(pdev);
if (vgadev == NULL) {
ret = false;
goto bail;
}
if (vga_default == pdev) {
pci_dev_put(vga_default);
vga_default = NULL;
}
if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
vga_decode_count--;
/* Remove entry from list */
list_del(&vgadev->list);
vga_count--;
/* Notify userland driver that the device is gone so it discards
* it's copies of the pci_dev pointer
*/
vga_arb_device_card_gone(pdev);
/* Wake up all possible waiters */
wake_up_all(&vga_wait_queue);
bail:
spin_unlock_irqrestore(&vga_lock, flags);
kfree(vgadev);
return ret;
}
/* this is called with the lock */
static inline void vga_update_device_decodes(struct vga_device *vgadev,
int new_decodes)
{
int old_decodes;
struct vga_device *new_vgadev, *conflict;
old_decodes = vgadev->decodes;
vgadev->decodes = new_decodes;
pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
pci_name(vgadev->pdev),
vga_iostate_to_str(old_decodes),
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns));
/* if we own the decodes we should move them along to
another card */
if ((vgadev->owns & old_decodes) && (vga_count > 1)) {
/* set us to own nothing */
vgadev->owns &= ~old_decodes;
list_for_each_entry(new_vgadev, &vga_list, list) {
if ((new_vgadev != vgadev) &&
(new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
if (!conflict)
__vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
break;
}
}
}
/* change decodes counter */
if (old_decodes != new_decodes) {
if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
vga_decode_count++;
else
vga_decode_count--;
}
pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
}
void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
{
struct vga_device *vgadev;
unsigned long flags;
decodes &= VGA_RSRC_LEGACY_MASK;
spin_lock_irqsave(&vga_lock, flags);
vgadev = vgadev_find(pdev);
if (vgadev == NULL)
goto bail;
/* don't let userspace futz with kernel driver decodes */
if (userspace && vgadev->set_vga_decode)
goto bail;
/* update the device decodes + counter */
vga_update_device_decodes(vgadev, decodes);
/* XXX if somebody is going from "doesn't decode" to "decodes" state
* here, additional care must be taken as we may have pending owner
* ship of non-legacy region ...
*/
bail:
spin_unlock_irqrestore(&vga_lock, flags);
}
void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes)
{
__vga_set_legacy_decoding(pdev, decodes, false);
}
EXPORT_SYMBOL(vga_set_legacy_decoding);
/* call with NULL to unregister */
int vga_client_register(struct pci_dev *pdev, void *cookie,
void (*irq_set_state)(void *cookie, bool state),
unsigned int (*set_vga_decode)(void *cookie, bool decode))
{
int ret = -1;
struct vga_device *vgadev;
unsigned long flags;
spin_lock_irqsave(&vga_lock, flags);
vgadev = vgadev_find(pdev);
if (!vgadev)
goto bail;
vgadev->irq_set_state = irq_set_state;
vgadev->set_vga_decode = set_vga_decode;
vgadev->cookie = cookie;
ret = 0;
bail:
spin_unlock_irqrestore(&vga_lock, flags);
return ret;
}
EXPORT_SYMBOL(vga_client_register);
/*
* Char driver implementation
*
* Semantics is:
*
* open : open user instance of the arbitrer. by default, it's
* attached to the default VGA device of the system.
*
* close : close user instance, release locks
*
* read : return a string indicating the status of the target.
* an IO state string is of the form {io,mem,io+mem,none},
* mc and ic are respectively mem and io lock counts (for
* debugging/diagnostic only). "decodes" indicate what the
* card currently decodes, "owns" indicates what is currently
* enabled on it, and "locks" indicates what is locked by this
* card. If the card is unplugged, we get "invalid" then for
* card_ID and an -ENODEV error is returned for any command
* until a new card is targeted
*
* "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)"
*
* write : write a command to the arbiter. List of commands is:
*
* target <card_ID> : switch target to card <card_ID> (see below)
* lock <io_state> : acquires locks on target ("none" is invalid io_state)
* trylock <io_state> : non-blocking acquire locks on target
* unlock <io_state> : release locks on target
* unlock all : release all locks on target held by this user
* decodes <io_state> : set the legacy decoding attributes for the card
*
* poll : event if something change on any card (not just the target)
*
* card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default"
* to go back to the system default card (TODO: not implemented yet).
* Currently, only PCI is supported as a prefix, but the userland API may
* support other bus types in the future, even if the current kernel
* implementation doesn't.
*
* Note about locks:
*
* The driver keeps track of which user has what locks on which card. It
* supports stacking, like the kernel one. This complexifies the implementation
* a bit, but makes the arbiter more tolerant to userspace problems and able
* to properly cleanup in all cases when a process dies.
* Currently, a max of 16 cards simultaneously can have locks issued from
* userspace for a given user (file descriptor instance) of the arbiter.
*
* If the device is hot-unplugged, there is a hook inside the module to notify
* they being added/removed in the system and automatically added/removed in
* the arbiter.
*/
#define MAX_USER_CARDS CONFIG_VGA_ARB_MAX_GPUS
#define PCI_INVALID_CARD ((struct pci_dev *)-1UL)
/*
* Each user has an array of these, tracking which cards have locks
*/
struct vga_arb_user_card {
struct pci_dev *pdev;
unsigned int mem_cnt;
unsigned int io_cnt;
};
struct vga_arb_private {
struct list_head list;
struct pci_dev *target;
struct vga_arb_user_card cards[MAX_USER_CARDS];
spinlock_t lock;
};
static LIST_HEAD(vga_user_list);
static DEFINE_SPINLOCK(vga_user_lock);
/*
* This function gets a string in the format: "PCI:domain:bus:dev.fn" and
* returns the respective values. If the string is not in this format,
* it returns 0.
*/
static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain,
unsigned int *bus, unsigned int *devfn)
{
int n;
unsigned int slot, func;
n = sscanf(buf, "PCI:%x:%x:%x.%x", domain, bus, &slot, &func);
if (n != 4)
return 0;
*devfn = PCI_DEVFN(slot, func);
return 1;
}
static ssize_t vga_arb_read(struct file *file, char __user * buf,
size_t count, loff_t *ppos)
{
struct vga_arb_private *priv = file->private_data;
struct vga_device *vgadev;
struct pci_dev *pdev;
unsigned long flags;
size_t len;
int rc;
char *lbuf;
lbuf = kmalloc(1024, GFP_KERNEL);
if (lbuf == NULL)
return -ENOMEM;
/* Shields against vga_arb_device_card_gone (pci_dev going
* away), and allows access to vga list
*/
spin_lock_irqsave(&vga_lock, flags);
/* If we are targetting the default, use it */
pdev = priv->target;
if (pdev == NULL || pdev == PCI_INVALID_CARD) {
spin_unlock_irqrestore(&vga_lock, flags);
len = sprintf(lbuf, "invalid");
goto done;
}
/* Find card vgadev structure */
vgadev = vgadev_find(pdev);
if (vgadev == NULL) {
/* Wow, it's not in the list, that shouldn't happen,
* let's fix us up and return invalid card
*/
if (pdev == priv->target)
vga_arb_device_card_gone(pdev);
spin_unlock_irqrestore(&vga_lock, flags);
len = sprintf(lbuf, "invalid");
goto done;
}
/* Fill the buffer with infos */
len = snprintf(lbuf, 1024,
"count:%d,PCI:%s,decodes=%s,owns=%s,locks=%s(%d:%d)\n",
vga_decode_count, pci_name(pdev),
vga_iostate_to_str(vgadev->decodes),
vga_iostate_to_str(vgadev->owns),
vga_iostate_to_str(vgadev->locks),
vgadev->io_lock_cnt, vgadev->mem_lock_cnt);
spin_unlock_irqrestore(&vga_lock, flags);
done:
/* Copy that to user */
if (len > count)
len = count;
rc = copy_to_user(buf, lbuf, len);
kfree(lbuf);
if (rc)
return -EFAULT;
return len;
}
/*
* TODO: To avoid parsing inside kernel and to improve the speed we may
* consider use ioctl here
*/
static ssize_t vga_arb_write(struct file *file, const char __user * buf,
size_t count, loff_t *ppos)
{
struct vga_arb_private *priv = file->private_data;
struct vga_arb_user_card *uc = NULL;
struct pci_dev *pdev;
unsigned int io_state;
char *kbuf, *curr_pos;
size_t remaining = count;
int ret_val;
int i;
kbuf = kmalloc(count + 1, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
if (copy_from_user(kbuf, buf, count)) {
kfree(kbuf);
return -EFAULT;
}
curr_pos = kbuf;
kbuf[count] = '\0'; /* Just to make sure... */
if (strncmp(curr_pos, "lock ", 5) == 0) {
curr_pos += 5;
remaining -= 5;
pr_debug("client 0x%p called 'lock'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
goto done;
}
if (io_state == VGA_RSRC_NONE) {
ret_val = -EPROTO;
goto done;
}
pdev = priv->target;
if (priv->target == NULL) {
ret_val = -ENODEV;
goto done;
}
vga_get_uninterruptible(pdev, io_state);
/* Update the client's locks lists... */
for (i = 0; i < MAX_USER_CARDS; i++) {
if (priv->cards[i].pdev == pdev) {
if (io_state & VGA_RSRC_LEGACY_IO)
priv->cards[i].io_cnt++;
if (io_state & VGA_RSRC_LEGACY_MEM)
priv->cards[i].mem_cnt++;
break;
}
}
ret_val = count;
goto done;
} else if (strncmp(curr_pos, "unlock ", 7) == 0) {
curr_pos += 7;
remaining -= 7;
pr_debug("client 0x%p called 'unlock'\n", priv);
if (strncmp(curr_pos, "all", 3) == 0)
io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
else {
if (!vga_str_to_iostate
(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
goto done;
}
/* TODO: Add this?
if (io_state == VGA_RSRC_NONE) {
ret_val = -EPROTO;
goto done;
}
*/
}
pdev = priv->target;
if (priv->target == NULL) {
ret_val = -ENODEV;
goto done;
}
for (i = 0; i < MAX_USER_CARDS; i++) {
if (priv->cards[i].pdev == pdev)
uc = &priv->cards[i];
}
if (!uc)
return -EINVAL;
if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
return -EINVAL;
if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
return -EINVAL;
vga_put(pdev, io_state);
if (io_state & VGA_RSRC_LEGACY_IO)
uc->io_cnt--;
if (io_state & VGA_RSRC_LEGACY_MEM)
uc->mem_cnt--;
ret_val = count;
goto done;
} else if (strncmp(curr_pos, "trylock ", 8) == 0) {
curr_pos += 8;
remaining -= 8;
pr_debug("client 0x%p called 'trylock'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
goto done;
}
/* TODO: Add this?
if (io_state == VGA_RSRC_NONE) {
ret_val = -EPROTO;
goto done;
}
*/
pdev = priv->target;
if (priv->target == NULL) {
ret_val = -ENODEV;
goto done;
}
if (vga_tryget(pdev, io_state)) {
/* Update the client's locks lists... */
for (i = 0; i < MAX_USER_CARDS; i++) {
if (priv->cards[i].pdev == pdev) {
if (io_state & VGA_RSRC_LEGACY_IO)
priv->cards[i].io_cnt++;
if (io_state & VGA_RSRC_LEGACY_MEM)
priv->cards[i].mem_cnt++;
break;
}
}
ret_val = count;
goto done;
} else {
ret_val = -EBUSY;
goto done;
}
} else if (strncmp(curr_pos, "target ", 7) == 0) {
struct pci_bus *pbus;
unsigned int domain, bus, devfn;
struct vga_device *vgadev;
curr_pos += 7;
remaining -= 7;
pr_debug("client 0x%p called 'target'\n", priv);
/* if target is default */
if (!strncmp(curr_pos, "default", 7))
pdev = pci_dev_get(vga_default_device());
else {
if (!vga_pci_str_to_vars(curr_pos, remaining,
&domain, &bus, &devfn)) {
ret_val = -EPROTO;
goto done;
}
pr_debug("vgaarb: %s ==> %x:%x:%x.%x\n", curr_pos,
domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
pbus = pci_find_bus(domain, bus);
pr_debug("vgaarb: pbus %p\n", pbus);
if (pbus == NULL) {
pr_err("vgaarb: invalid PCI domain and/or bus address %x:%x\n",
domain, bus);
ret_val = -ENODEV;
goto done;
}
pdev = pci_get_slot(pbus, devfn);
pr_debug("vgaarb: pdev %p\n", pdev);
if (!pdev) {
pr_err("vgaarb: invalid PCI address %x:%x\n",
bus, devfn);
ret_val = -ENODEV;
goto done;
}
}
vgadev = vgadev_find(pdev);
pr_debug("vgaarb: vgadev %p\n", vgadev);
if (vgadev == NULL) {
pr_err("vgaarb: this pci device is not a vga device\n");
pci_dev_put(pdev);
ret_val = -ENODEV;
goto done;
}
priv->target = pdev;
for (i = 0; i < MAX_USER_CARDS; i++) {
if (priv->cards[i].pdev == pdev)
break;
if (priv->cards[i].pdev == NULL) {
priv->cards[i].pdev = pdev;
priv->cards[i].io_cnt = 0;
priv->cards[i].mem_cnt = 0;
break;
}
}
if (i == MAX_USER_CARDS) {
pr_err("vgaarb: maximum user cards (%d) number reached!\n",
MAX_USER_CARDS);
pci_dev_put(pdev);
/* XXX: which value to return? */
ret_val = -ENOMEM;
goto done;
}
ret_val = count;
pci_dev_put(pdev);
goto done;
} else if (strncmp(curr_pos, "decodes ", 8) == 0) {
curr_pos += 8;
remaining -= 8;
pr_debug("vgaarb: client 0x%p called 'decodes'\n", priv);
if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
ret_val = -EPROTO;
goto done;
}
pdev = priv->target;
if (priv->target == NULL) {
ret_val = -ENODEV;
goto done;
}
__vga_set_legacy_decoding(pdev, io_state, true);
ret_val = count;
goto done;
}
/* If we got here, the message written is not part of the protocol! */
kfree(kbuf);
return -EPROTO;
done:
kfree(kbuf);
return ret_val;
}
static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
{
struct vga_arb_private *priv = file->private_data;
pr_debug("%s\n", __func__);
if (priv == NULL)
return -ENODEV;
poll_wait(file, &vga_wait_queue, wait);
return POLLIN;
}
static int vga_arb_open(struct inode *inode, struct file *file)
{
struct vga_arb_private *priv;
unsigned long flags;
pr_debug("%s\n", __func__);
priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
memset(priv, 0, sizeof(*priv));
spin_lock_init(&priv->lock);
file->private_data = priv;
spin_lock_irqsave(&vga_user_lock, flags);
list_add(&priv->list, &vga_user_list);
spin_unlock_irqrestore(&vga_user_lock, flags);
/* Set the client' lists of locks */
priv->target = vga_default_device(); /* Maybe this is still null! */
priv->cards[0].pdev = priv->target;
priv->cards[0].io_cnt = 0;
priv->cards[0].mem_cnt = 0;
return 0;
}
static int vga_arb_release(struct inode *inode, struct file *file)
{
struct vga_arb_private *priv = file->private_data;
struct vga_arb_user_card *uc;
unsigned long flags;
int i;
pr_debug("%s\n", __func__);
if (priv == NULL)
return -ENODEV;
spin_lock_irqsave(&vga_user_lock, flags);
list_del(&priv->list);
for (i = 0; i < MAX_USER_CARDS; i++) {
uc = &priv->cards[i];
if (uc->pdev == NULL)
continue;
pr_debug("uc->io_cnt == %d, uc->mem_cnt == %d\n",
uc->io_cnt, uc->mem_cnt);
while (uc->io_cnt--)
vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
while (uc->mem_cnt--)
vga_put(uc->pdev, VGA_RSRC_LEGACY_MEM);
}
spin_unlock_irqrestore(&vga_user_lock, flags);
kfree(priv);
return 0;
}
static void vga_arb_device_card_gone(struct pci_dev *pdev)
{
}
/*
* callback any registered clients to let them know we have a
* change in VGA cards
*/
static void vga_arbiter_notify_clients(void)
{
struct vga_device *vgadev;
unsigned long flags;
uint32_t new_decodes;
bool new_state;
if (!vga_arbiter_used)
return;
spin_lock_irqsave(&vga_lock, flags);
list_for_each_entry(vgadev, &vga_list, list) {
if (vga_count > 1)
new_state = false;
else
new_state = true;
if (vgadev->set_vga_decode) {
new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state);
vga_update_device_decodes(vgadev, new_decodes);
}
}
spin_unlock_irqrestore(&vga_lock, flags);
}
static int pci_notify(struct notifier_block *nb, unsigned long action,
void *data)
{
struct device *dev = data;
struct pci_dev *pdev = to_pci_dev(dev);
bool notify = false;
pr_debug("%s\n", __func__);
/* For now we're only intereted in devices added and removed. I didn't
* test this thing here, so someone needs to double check for the
* cases of hotplugable vga cards. */
if (action == BUS_NOTIFY_ADD_DEVICE)
notify = vga_arbiter_add_pci_device(pdev);
else if (action == BUS_NOTIFY_DEL_DEVICE)
notify = vga_arbiter_del_pci_device(pdev);
if (notify)
vga_arbiter_notify_clients();
return 0;
}
static struct notifier_block pci_notifier = {
.notifier_call = pci_notify,
};
static const struct file_operations vga_arb_device_fops = {
.read = vga_arb_read,
.write = vga_arb_write,
.poll = vga_arb_fpoll,
.open = vga_arb_open,
.release = vga_arb_release,
};
static struct miscdevice vga_arb_device = {
MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops
};
static int __init vga_arb_device_init(void)
{
int rc;
struct pci_dev *pdev;
rc = misc_register(&vga_arb_device);
if (rc < 0)
pr_err("vgaarb: error %d registering device\n", rc);
bus_register_notifier(&pci_bus_type, &pci_notifier);
/* We add all pci devices satisfying vga class in the arbiter by
* default */
pdev = NULL;
while ((pdev =
pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_ANY_ID, pdev)) != NULL)
vga_arbiter_add_pci_device(pdev);
pr_info("vgaarb: loaded\n");
return rc;
}
subsys_initcall(vga_arb_device_init);
| gpl-2.0 |
KOala888/GB_kernel | drivers/staging/rtl8192su/ieee80211/ieee80211_crypt_wep.c | 767 | 7342 | /*
* Host AP crypt: host-based WEP encryption implementation for Host AP driver
*
* Copyright (c) 2002-2004, Jouni Malinen <jkmaline@cc.hut.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. See README and COPYING for
* more details.
*/
//#include <linux/config.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <asm/string.h>
#include "ieee80211.h"
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <linux/crc32.h>
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Host AP crypt: WEP");
MODULE_LICENSE("GPL");
struct prism2_wep_data {
u32 iv;
#define WEP_KEY_LEN 13
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
struct crypto_blkcipher *tx_tfm;
struct crypto_blkcipher *rx_tfm;
};
static void * prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
priv->key_idx = keyidx;
priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm)) {
printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
"crypto API arc4\n");
priv->tx_tfm = NULL;
goto fail;
}
priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm)) {
printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
"crypto API arc4\n");
priv->rx_tfm = NULL;
goto fail;
}
/* start WEP IV from a random value */
get_random_bytes(&priv->iv, 4);
return priv;
fail:
if (priv) {
if (priv->tx_tfm)
crypto_free_blkcipher(priv->tx_tfm);
if (priv->rx_tfm)
crypto_free_blkcipher(priv->rx_tfm);
kfree(priv);
}
return NULL;
}
static void prism2_wep_deinit(void *priv)
{
struct prism2_wep_data *_priv = priv;
if (_priv) {
if (_priv->tx_tfm)
crypto_free_blkcipher(_priv->tx_tfm);
if (_priv->rx_tfm)
crypto_free_blkcipher(_priv->rx_tfm);
}
kfree(priv);
}
/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
* for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
* so the payload length increases with 8 bytes.
*
* WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
*/
static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct prism2_wep_data *wep = priv;
u32 klen, len;
u8 key[WEP_KEY_LEN + 3];
u8 *pos;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
struct blkcipher_desc desc = { .tfm = wep->tx_tfm };
u32 crc;
u8 *icv;
struct scatterlist sg;
if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
skb->len < hdr_len)
return -1;
len = skb->len - hdr_len;
pos = skb_push(skb, 4);
memmove(pos, pos + 4, hdr_len);
pos += hdr_len;
klen = 3 + wep->key_len;
wep->iv++;
/* Fluhrer, Mantin, and Shamir have reported weaknesses in the key
* scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N)
* can be used to speedup attacks, so avoid using them. */
if ((wep->iv & 0xff00) == 0xff00) {
u8 B = (wep->iv >> 16) & 0xff;
if (B >= 3 && B < klen)
wep->iv += 0x0100;
}
/* Prepend 24-bit IV to RC4 key and TX frame */
*pos++ = key[0] = (wep->iv >> 16) & 0xff;
*pos++ = key[1] = (wep->iv >> 8) & 0xff;
*pos++ = key[2] = wep->iv & 0xff;
*pos++ = wep->key_idx << 6;
/* Copy rest of the WEP key (the secret part) */
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec)
{
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
icv = skb_put(skb, 4);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
sg_init_one(&sg, pos, len+4);
return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
}
return 0;
}
/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
* the frame: IV (4 bytes), encrypted payload (including SNAP header),
* ICV (4 bytes). len includes both IV and ICV.
*
* Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
* failure. If frame is OK, IV and ICV will be removed.
*/
static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct prism2_wep_data *wep = priv;
u32 klen, plen;
u8 key[WEP_KEY_LEN + 3];
u8 keyidx, *pos;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
struct blkcipher_desc desc = { .tfm = wep->rx_tfm };
u32 crc;
u8 icv[4];
struct scatterlist sg;
if (skb->len < hdr_len + 8)
return -1;
pos = skb->data + hdr_len;
key[0] = *pos++;
key[1] = *pos++;
key[2] = *pos++;
keyidx = *pos++ >> 6;
if (keyidx != wep->key_idx)
return -1;
klen = 3 + wep->key_len;
/* Copy rest of the WEP key (the secret part) */
memcpy(key + 3, wep->key, wep->key_len);
/* Apply RC4 to data and compute CRC32 over decrypted data */
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec)
{
crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
sg_init_one(&sg, pos, plen + 4);
if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
return -7;
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
if (memcmp(icv, pos + plen, 4) != 0) {
/* ICV mismatch - drop frame */
return -2;
}
}
/* Remove IV and ICV */
memmove(skb->data + 4, skb->data, hdr_len);
skb_pull(skb, 4);
skb_trim(skb, skb->len - 4);
return 0;
}
static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
{
struct prism2_wep_data *wep = priv;
if (len < 0 || len > WEP_KEY_LEN)
return -1;
memcpy(wep->key, key, len);
wep->key_len = len;
return 0;
}
static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
{
struct prism2_wep_data *wep = priv;
if (len < wep->key_len)
return -1;
memcpy(key, wep->key, wep->key_len);
return wep->key_len;
}
static char * prism2_wep_print_stats(char *p, void *priv)
{
struct prism2_wep_data *wep = priv;
p += sprintf(p, "key[%d] alg=WEP len=%d\n",
wep->key_idx, wep->key_len);
return p;
}
static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
.name = "WEP",
.init = prism2_wep_init,
.deinit = prism2_wep_deinit,
.encrypt_mpdu = prism2_wep_encrypt,
.decrypt_mpdu = prism2_wep_decrypt,
.encrypt_msdu = NULL,
.decrypt_msdu = NULL,
.set_key = prism2_wep_set_key,
.get_key = prism2_wep_get_key,
.print_stats = prism2_wep_print_stats,
.extra_prefix_len = 4, /* IV */
.extra_postfix_len = 4, /* ICV */
.owner = THIS_MODULE,
};
int __init ieee80211_crypto_wep_init(void)
{
return ieee80211_register_crypto_ops(&ieee80211_crypt_wep);
}
void __exit ieee80211_crypto_wep_exit(void)
{
ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
}
void ieee80211_wep_null(void)
{
// printk("============>%s()\n", __FUNCTION__);
return;
}
| gpl-2.0 |
Outernet-Project/rpi-linux | drivers/tty/amiserial.c | 767 | 46980 | /*
* Serial driver for the amiga builtin port.
*
* This code was created by taking serial.c version 4.30 from kernel
* release 2.3.22, replacing all hardware related stuff with the
* corresponding amiga hardware actions, and removing all irrelevant
* code. As a consequence, it uses many of the constants and names
* associated with the registers and bits of 16550 compatible UARTS -
* but only to keep track of status, etc in the state variables. It
* was done this was to make it easier to keep the code in line with
* (non hardware specific) changes to serial.c.
*
* The port is registered with the tty driver as minor device 64, and
* therefore other ports should should only use 65 upwards.
*
* Richard Lucock 28/12/99
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997,
* 1998, 1999 Theodore Ts'o
*
*/
/*
* Serial driver configuration section. Here are the various options:
*
* SERIAL_PARANOIA_CHECK
* Check the magic number for the async_structure where
* ever possible.
*/
#include <linux/delay.h>
#undef SERIAL_PARANOIA_CHECK
#define SERIAL_DO_RESTART
/* Set of debugging defines */
#undef SERIAL_DEBUG_INTR
#undef SERIAL_DEBUG_OPEN
#undef SERIAL_DEBUG_FLOW
#undef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
/* Sanity checks */
#if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT)
#define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \
tty->name, (info->tport.flags), serial_driver->refcount,info->count,tty->count,s)
#else
#define DBG_CNT(s)
#endif
/*
* End of serial driver configuration section.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
static char *serial_version = "4.30";
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/circ_buf.h>
#include <linux/console.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
struct serial_state {
struct tty_port tport;
struct circ_buf xmit;
struct async_icount icount;
unsigned long port;
int baud_base;
int xmit_fifo_size;
int custom_divisor;
int read_status_mask;
int ignore_status_mask;
int timeout;
int quot;
int IER; /* Interrupt Enable Register */
int MCR; /* Modem control register */
int x_char; /* xon/xoff character */
};
#define custom amiga_custom
static char *serial_name = "Amiga-builtin serial driver";
static struct tty_driver *serial_driver;
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
static unsigned char current_ctl_bits;
static void change_speed(struct tty_struct *tty, struct serial_state *info,
struct ktermios *old);
static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
static struct serial_state rs_table[1];
#define NR_PORTS ARRAY_SIZE(rs_table)
#include <asm/uaccess.h>
#define serial_isroot() (capable(CAP_SYS_ADMIN))
static inline int serial_paranoia_check(struct serial_state *info,
char *name, const char *routine)
{
#ifdef SERIAL_PARANOIA_CHECK
static const char *badmagic =
"Warning: bad magic number for serial struct (%s) in %s\n";
static const char *badinfo =
"Warning: null async_struct for (%s) in %s\n";
if (!info) {
printk(badinfo, name, routine);
return 1;
}
if (info->magic != SERIAL_MAGIC) {
printk(badmagic, name, routine);
return 1;
}
#endif
return 0;
}
/* some serial hardware definitions */
#define SDR_OVRUN (1<<15)
#define SDR_RBF (1<<14)
#define SDR_TBE (1<<13)
#define SDR_TSRE (1<<12)
#define SERPER_PARENB (1<<15)
#define AC_SETCLR (1<<15)
#define AC_UARTBRK (1<<11)
#define SER_DTR (1<<7)
#define SER_RTS (1<<6)
#define SER_DCD (1<<5)
#define SER_CTS (1<<4)
#define SER_DSR (1<<3)
static __inline__ void rtsdtr_ctrl(int bits)
{
ciab.pra = ((bits & (SER_RTS | SER_DTR)) ^ (SER_RTS | SER_DTR)) | (ciab.pra & ~(SER_RTS | SER_DTR));
}
/*
* ------------------------------------------------------------
* rs_stop() and rs_start()
*
* This routines are called before setting or resetting tty->stopped.
* They enable or disable transmitter interrupts, as necessary.
* ------------------------------------------------------------
*/
static void rs_stop(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
if (serial_paranoia_check(info, tty->name, "rs_stop"))
return;
local_irq_save(flags);
if (info->IER & UART_IER_THRI) {
info->IER &= ~UART_IER_THRI;
/* disable Tx interrupt and remove any pending interrupts */
custom.intena = IF_TBE;
mb();
custom.intreq = IF_TBE;
mb();
}
local_irq_restore(flags);
}
static void rs_start(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
if (serial_paranoia_check(info, tty->name, "rs_start"))
return;
local_irq_save(flags);
if (info->xmit.head != info->xmit.tail
&& info->xmit.buf
&& !(info->IER & UART_IER_THRI)) {
info->IER |= UART_IER_THRI;
custom.intena = IF_SETCLR | IF_TBE;
mb();
/* set a pending Tx Interrupt, transmitter should restart now */
custom.intreq = IF_SETCLR | IF_TBE;
mb();
}
local_irq_restore(flags);
}
/*
* ----------------------------------------------------------------------
*
* Here starts the interrupt handling routines. All of the following
* subroutines are declared as inline and are folded into
* rs_interrupt(). They were separated out for readability's sake.
*
* Note: rs_interrupt() is a "fast" interrupt, which means that it
* runs with interrupts turned off. People who may want to modify
* rs_interrupt() should try to keep the interrupt handler as fast as
* possible. After you are done making modifications, it is not a bad
* idea to do:
*
* gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c
*
* and look at the resulting assemble code in serial.s.
*
* - Ted Ts'o (tytso@mit.edu), 7-Mar-93
* -----------------------------------------------------------------------
*/
static void receive_chars(struct serial_state *info)
{
int status;
int serdatr;
unsigned char ch, flag;
struct async_icount *icount;
int oe = 0;
icount = &info->icount;
status = UART_LSR_DR; /* We obviously have a character! */
serdatr = custom.serdatr;
mb();
custom.intreq = IF_RBF;
mb();
if((serdatr & 0x1ff) == 0)
status |= UART_LSR_BI;
if(serdatr & SDR_OVRUN)
status |= UART_LSR_OE;
ch = serdatr & 0xff;
icount->rx++;
#ifdef SERIAL_DEBUG_INTR
printk("DR%02x:%02x...", ch, status);
#endif
flag = TTY_NORMAL;
/*
* We don't handle parity or frame errors - but I have left
* the code in, since I'm not sure that the errors can't be
* detected.
*/
if (status & (UART_LSR_BI | UART_LSR_PE |
UART_LSR_FE | UART_LSR_OE)) {
/*
* For statistics only
*/
if (status & UART_LSR_BI) {
status &= ~(UART_LSR_FE | UART_LSR_PE);
icount->brk++;
} else if (status & UART_LSR_PE)
icount->parity++;
else if (status & UART_LSR_FE)
icount->frame++;
if (status & UART_LSR_OE)
icount->overrun++;
/*
* Now check to see if character should be
* ignored, and mask off conditions which
* should be ignored.
*/
if (status & info->ignore_status_mask)
goto out;
status &= info->read_status_mask;
if (status & (UART_LSR_BI)) {
#ifdef SERIAL_DEBUG_INTR
printk("handling break....");
#endif
flag = TTY_BREAK;
if (info->tport.flags & ASYNC_SAK)
do_SAK(info->tport.tty);
} else if (status & UART_LSR_PE)
flag = TTY_PARITY;
else if (status & UART_LSR_FE)
flag = TTY_FRAME;
if (status & UART_LSR_OE) {
/*
* Overrun is special, since it's
* reported immediately, and doesn't
* affect the current character
*/
oe = 1;
}
}
tty_insert_flip_char(&info->tport, ch, flag);
if (oe == 1)
tty_insert_flip_char(&info->tport, 0, TTY_OVERRUN);
tty_flip_buffer_push(&info->tport);
out:
return;
}
static void transmit_chars(struct serial_state *info)
{
custom.intreq = IF_TBE;
mb();
if (info->x_char) {
custom.serdat = info->x_char | 0x100;
mb();
info->icount.tx++;
info->x_char = 0;
return;
}
if (info->xmit.head == info->xmit.tail
|| info->tport.tty->stopped
|| info->tport.tty->hw_stopped) {
info->IER &= ~UART_IER_THRI;
custom.intena = IF_TBE;
mb();
return;
}
custom.serdat = info->xmit.buf[info->xmit.tail++] | 0x100;
mb();
info->xmit.tail = info->xmit.tail & (SERIAL_XMIT_SIZE-1);
info->icount.tx++;
if (CIRC_CNT(info->xmit.head,
info->xmit.tail,
SERIAL_XMIT_SIZE) < WAKEUP_CHARS)
tty_wakeup(info->tport.tty);
#ifdef SERIAL_DEBUG_INTR
printk("THRE...");
#endif
if (info->xmit.head == info->xmit.tail) {
custom.intena = IF_TBE;
mb();
info->IER &= ~UART_IER_THRI;
}
}
static void check_modem_status(struct serial_state *info)
{
struct tty_port *port = &info->tport;
unsigned char status = ciab.pra & (SER_DCD | SER_CTS | SER_DSR);
unsigned char dstatus;
struct async_icount *icount;
/* Determine bits that have changed */
dstatus = status ^ current_ctl_bits;
current_ctl_bits = status;
if (dstatus) {
icount = &info->icount;
/* update input line counters */
if (dstatus & SER_DSR)
icount->dsr++;
if (dstatus & SER_DCD) {
icount->dcd++;
}
if (dstatus & SER_CTS)
icount->cts++;
wake_up_interruptible(&port->delta_msr_wait);
}
if ((port->flags & ASYNC_CHECK_CD) && (dstatus & SER_DCD)) {
#if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR))
printk("ttyS%d CD now %s...", info->line,
(!(status & SER_DCD)) ? "on" : "off");
#endif
if (!(status & SER_DCD))
wake_up_interruptible(&port->open_wait);
else {
#ifdef SERIAL_DEBUG_OPEN
printk("doing serial hangup...");
#endif
if (port->tty)
tty_hangup(port->tty);
}
}
if (tty_port_cts_enabled(port)) {
if (port->tty->hw_stopped) {
if (!(status & SER_CTS)) {
#if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW))
printk("CTS tx start...");
#endif
port->tty->hw_stopped = 0;
info->IER |= UART_IER_THRI;
custom.intena = IF_SETCLR | IF_TBE;
mb();
/* set a pending Tx Interrupt, transmitter should restart now */
custom.intreq = IF_SETCLR | IF_TBE;
mb();
tty_wakeup(port->tty);
return;
}
} else {
if ((status & SER_CTS)) {
#if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW))
printk("CTS tx stop...");
#endif
port->tty->hw_stopped = 1;
info->IER &= ~UART_IER_THRI;
/* disable Tx interrupt and remove any pending interrupts */
custom.intena = IF_TBE;
mb();
custom.intreq = IF_TBE;
mb();
}
}
}
}
static irqreturn_t ser_vbl_int( int irq, void *data)
{
/* vbl is just a periodic interrupt we tie into to update modem status */
struct serial_state *info = data;
/*
* TBD - is it better to unregister from this interrupt or to
* ignore it if MSI is clear ?
*/
if(info->IER & UART_IER_MSI)
check_modem_status(info);
return IRQ_HANDLED;
}
static irqreturn_t ser_rx_int(int irq, void *dev_id)
{
struct serial_state *info = dev_id;
#ifdef SERIAL_DEBUG_INTR
printk("ser_rx_int...");
#endif
if (!info->tport.tty)
return IRQ_NONE;
receive_chars(info);
#ifdef SERIAL_DEBUG_INTR
printk("end.\n");
#endif
return IRQ_HANDLED;
}
static irqreturn_t ser_tx_int(int irq, void *dev_id)
{
struct serial_state *info = dev_id;
if (custom.serdatr & SDR_TBE) {
#ifdef SERIAL_DEBUG_INTR
printk("ser_tx_int...");
#endif
if (!info->tport.tty)
return IRQ_NONE;
transmit_chars(info);
#ifdef SERIAL_DEBUG_INTR
printk("end.\n");
#endif
}
return IRQ_HANDLED;
}
/*
* -------------------------------------------------------------------
* Here ends the serial interrupt routines.
* -------------------------------------------------------------------
*/
/*
* ---------------------------------------------------------------
* Low level utility subroutines for the serial driver: routines to
* figure out the appropriate timeout for an interrupt chain, routines
* to initialize and startup a serial port, and routines to shutdown a
* serial port. Useful stuff like that.
* ---------------------------------------------------------------
*/
static int startup(struct tty_struct *tty, struct serial_state *info)
{
struct tty_port *port = &info->tport;
unsigned long flags;
int retval=0;
unsigned long page;
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
local_irq_save(flags);
if (port->flags & ASYNC_INITIALIZED) {
free_page(page);
goto errout;
}
if (info->xmit.buf)
free_page(page);
else
info->xmit.buf = (unsigned char *) page;
#ifdef SERIAL_DEBUG_OPEN
printk("starting up ttys%d ...", info->line);
#endif
/* Clear anything in the input buffer */
custom.intreq = IF_RBF;
mb();
retval = request_irq(IRQ_AMIGA_VERTB, ser_vbl_int, 0, "serial status", info);
if (retval) {
if (serial_isroot()) {
set_bit(TTY_IO_ERROR, &tty->flags);
retval = 0;
}
goto errout;
}
/* enable both Rx and Tx interrupts */
custom.intena = IF_SETCLR | IF_RBF | IF_TBE;
mb();
info->IER = UART_IER_MSI;
/* remember current state of the DCD and CTS bits */
current_ctl_bits = ciab.pra & (SER_DCD | SER_CTS | SER_DSR);
info->MCR = 0;
if (C_BAUD(tty))
info->MCR = SER_DTR | SER_RTS;
rtsdtr_ctrl(info->MCR);
clear_bit(TTY_IO_ERROR, &tty->flags);
info->xmit.head = info->xmit.tail = 0;
/*
* Set up the tty->alt_speed kludge
*/
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
tty->alt_speed = 57600;
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
tty->alt_speed = 115200;
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
tty->alt_speed = 230400;
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
tty->alt_speed = 460800;
/*
* and set the speed of the serial port
*/
change_speed(tty, info, NULL);
port->flags |= ASYNC_INITIALIZED;
local_irq_restore(flags);
return 0;
errout:
local_irq_restore(flags);
return retval;
}
/*
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on.
*/
static void shutdown(struct tty_struct *tty, struct serial_state *info)
{
unsigned long flags;
struct serial_state *state;
if (!(info->tport.flags & ASYNC_INITIALIZED))
return;
state = info;
#ifdef SERIAL_DEBUG_OPEN
printk("Shutting down serial port %d ....\n", info->line);
#endif
local_irq_save(flags); /* Disable interrupts */
/*
* clear delta_msr_wait queue to avoid mem leaks: we may free the irq
* here so the queue might never be waken up
*/
wake_up_interruptible(&info->tport.delta_msr_wait);
/*
* Free the IRQ, if necessary
*/
free_irq(IRQ_AMIGA_VERTB, info);
if (info->xmit.buf) {
free_page((unsigned long) info->xmit.buf);
info->xmit.buf = NULL;
}
info->IER = 0;
custom.intena = IF_RBF | IF_TBE;
mb();
/* disable break condition */
custom.adkcon = AC_UARTBRK;
mb();
if (tty->termios.c_cflag & HUPCL)
info->MCR &= ~(SER_DTR|SER_RTS);
rtsdtr_ctrl(info->MCR);
set_bit(TTY_IO_ERROR, &tty->flags);
info->tport.flags &= ~ASYNC_INITIALIZED;
local_irq_restore(flags);
}
/*
* This routine is called to set the UART divisor registers to match
* the specified baud rate for a serial port.
*/
static void change_speed(struct tty_struct *tty, struct serial_state *info,
struct ktermios *old_termios)
{
struct tty_port *port = &info->tport;
int quot = 0, baud_base, baud;
unsigned cflag, cval = 0;
int bits;
unsigned long flags;
cflag = tty->termios.c_cflag;
/* Byte size is always 8 bits plus parity bit if requested */
cval = 3; bits = 10;
if (cflag & CSTOPB) {
cval |= 0x04;
bits++;
}
if (cflag & PARENB) {
cval |= UART_LCR_PARITY;
bits++;
}
if (!(cflag & PARODD))
cval |= UART_LCR_EPAR;
#ifdef CMSPAR
if (cflag & CMSPAR)
cval |= UART_LCR_SPAR;
#endif
/* Determine divisor based on baud rate */
baud = tty_get_baud_rate(tty);
if (!baud)
baud = 9600; /* B0 transition handled in rs_set_termios */
baud_base = info->baud_base;
if (baud == 38400 && (port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)
quot = info->custom_divisor;
else {
if (baud == 134)
/* Special case since 134 is really 134.5 */
quot = (2*baud_base / 269);
else if (baud)
quot = baud_base / baud;
}
/* If the quotient is zero refuse the change */
if (!quot && old_termios) {
/* FIXME: Will need updating for new tty in the end */
tty->termios.c_cflag &= ~CBAUD;
tty->termios.c_cflag |= (old_termios->c_cflag & CBAUD);
baud = tty_get_baud_rate(tty);
if (!baud)
baud = 9600;
if (baud == 38400 &&
(port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)
quot = info->custom_divisor;
else {
if (baud == 134)
/* Special case since 134 is really 134.5 */
quot = (2*baud_base / 269);
else if (baud)
quot = baud_base / baud;
}
}
/* As a last resort, if the quotient is zero, default to 9600 bps */
if (!quot)
quot = baud_base / 9600;
info->quot = quot;
info->timeout = ((info->xmit_fifo_size*HZ*bits*quot) / baud_base);
info->timeout += HZ/50; /* Add .02 seconds of slop */
/* CTS flow control flag and modem status interrupts */
info->IER &= ~UART_IER_MSI;
if (port->flags & ASYNC_HARDPPS_CD)
info->IER |= UART_IER_MSI;
if (cflag & CRTSCTS) {
port->flags |= ASYNC_CTS_FLOW;
info->IER |= UART_IER_MSI;
} else
port->flags &= ~ASYNC_CTS_FLOW;
if (cflag & CLOCAL)
port->flags &= ~ASYNC_CHECK_CD;
else {
port->flags |= ASYNC_CHECK_CD;
info->IER |= UART_IER_MSI;
}
/* TBD:
* Does clearing IER_MSI imply that we should disable the VBL interrupt ?
*/
/*
* Set up parity check flag
*/
info->read_status_mask = UART_LSR_OE | UART_LSR_DR;
if (I_INPCK(tty))
info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (I_BRKINT(tty) || I_PARMRK(tty))
info->read_status_mask |= UART_LSR_BI;
/*
* Characters to ignore
*/
info->ignore_status_mask = 0;
if (I_IGNPAR(tty))
info->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
if (I_IGNBRK(tty)) {
info->ignore_status_mask |= UART_LSR_BI;
/*
* If we're ignore parity and break indicators, ignore
* overruns too. (For real raw support).
*/
if (I_IGNPAR(tty))
info->ignore_status_mask |= UART_LSR_OE;
}
/*
* !!! ignore all characters if CREAD is not set
*/
if ((cflag & CREAD) == 0)
info->ignore_status_mask |= UART_LSR_DR;
local_irq_save(flags);
{
short serper;
/* Set up the baud rate */
serper = quot - 1;
/* Enable or disable parity bit */
if(cval & UART_LCR_PARITY)
serper |= (SERPER_PARENB);
custom.serper = serper;
mb();
}
local_irq_restore(flags);
}
static int rs_put_char(struct tty_struct *tty, unsigned char ch)
{
struct serial_state *info;
unsigned long flags;
info = tty->driver_data;
if (serial_paranoia_check(info, tty->name, "rs_put_char"))
return 0;
if (!info->xmit.buf)
return 0;
local_irq_save(flags);
if (CIRC_SPACE(info->xmit.head,
info->xmit.tail,
SERIAL_XMIT_SIZE) == 0) {
local_irq_restore(flags);
return 0;
}
info->xmit.buf[info->xmit.head++] = ch;
info->xmit.head &= SERIAL_XMIT_SIZE-1;
local_irq_restore(flags);
return 1;
}
static void rs_flush_chars(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
return;
if (info->xmit.head == info->xmit.tail
|| tty->stopped
|| tty->hw_stopped
|| !info->xmit.buf)
return;
local_irq_save(flags);
info->IER |= UART_IER_THRI;
custom.intena = IF_SETCLR | IF_TBE;
mb();
/* set a pending Tx Interrupt, transmitter should restart now */
custom.intreq = IF_SETCLR | IF_TBE;
mb();
local_irq_restore(flags);
}
static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count)
{
int c, ret = 0;
struct serial_state *info = tty->driver_data;
unsigned long flags;
if (serial_paranoia_check(info, tty->name, "rs_write"))
return 0;
if (!info->xmit.buf)
return 0;
local_irq_save(flags);
while (1) {
c = CIRC_SPACE_TO_END(info->xmit.head,
info->xmit.tail,
SERIAL_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0) {
break;
}
memcpy(info->xmit.buf + info->xmit.head, buf, c);
info->xmit.head = ((info->xmit.head + c) &
(SERIAL_XMIT_SIZE-1));
buf += c;
count -= c;
ret += c;
}
local_irq_restore(flags);
if (info->xmit.head != info->xmit.tail
&& !tty->stopped
&& !tty->hw_stopped
&& !(info->IER & UART_IER_THRI)) {
info->IER |= UART_IER_THRI;
local_irq_disable();
custom.intena = IF_SETCLR | IF_TBE;
mb();
/* set a pending Tx Interrupt, transmitter should restart now */
custom.intreq = IF_SETCLR | IF_TBE;
mb();
local_irq_restore(flags);
}
return ret;
}
static int rs_write_room(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
if (serial_paranoia_check(info, tty->name, "rs_write_room"))
return 0;
return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
static int rs_chars_in_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
return 0;
return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
static void rs_flush_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
return;
local_irq_save(flags);
info->xmit.head = info->xmit.tail = 0;
local_irq_restore(flags);
tty_wakeup(tty);
}
/*
* This function is used to send a high-priority XON/XOFF character to
* the device
*/
static void rs_send_xchar(struct tty_struct *tty, char ch)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
if (serial_paranoia_check(info, tty->name, "rs_send_char"))
return;
info->x_char = ch;
if (ch) {
/* Make sure transmit interrupts are on */
/* Check this ! */
local_irq_save(flags);
if(!(custom.intenar & IF_TBE)) {
custom.intena = IF_SETCLR | IF_TBE;
mb();
/* set a pending Tx Interrupt, transmitter should restart now */
custom.intreq = IF_SETCLR | IF_TBE;
mb();
}
local_irq_restore(flags);
info->IER |= UART_IER_THRI;
}
}
/*
* ------------------------------------------------------------
* rs_throttle()
*
* This routine is called by the upper-layer tty layer to signal that
* incoming characters should be throttled.
* ------------------------------------------------------------
*/
static void rs_throttle(struct tty_struct * tty)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
#ifdef SERIAL_DEBUG_THROTTLE
char buf[64];
printk("throttle %s: %d....\n", tty_name(tty, buf),
tty->ldisc.chars_in_buffer(tty));
#endif
if (serial_paranoia_check(info, tty->name, "rs_throttle"))
return;
if (I_IXOFF(tty))
rs_send_xchar(tty, STOP_CHAR(tty));
if (tty->termios.c_cflag & CRTSCTS)
info->MCR &= ~SER_RTS;
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
local_irq_restore(flags);
}
static void rs_unthrottle(struct tty_struct * tty)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
#ifdef SERIAL_DEBUG_THROTTLE
char buf[64];
printk("unthrottle %s: %d....\n", tty_name(tty, buf),
tty->ldisc.chars_in_buffer(tty));
#endif
if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
return;
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
else
rs_send_xchar(tty, START_CHAR(tty));
}
if (tty->termios.c_cflag & CRTSCTS)
info->MCR |= SER_RTS;
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
local_irq_restore(flags);
}
/*
* ------------------------------------------------------------
* rs_ioctl() and friends
* ------------------------------------------------------------
*/
static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
struct serial_struct __user * retinfo)
{
struct serial_struct tmp;
if (!retinfo)
return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
tty_lock(tty);
tmp.line = tty->index;
tmp.port = state->port;
tmp.flags = state->tport.flags;
tmp.xmit_fifo_size = state->xmit_fifo_size;
tmp.baud_base = state->baud_base;
tmp.close_delay = state->tport.close_delay;
tmp.closing_wait = state->tport.closing_wait;
tmp.custom_divisor = state->custom_divisor;
tty_unlock(tty);
if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
return -EFAULT;
return 0;
}
static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
struct serial_struct __user * new_info)
{
struct tty_port *port = &state->tport;
struct serial_struct new_serial;
bool change_spd;
int retval = 0;
if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
return -EFAULT;
tty_lock(tty);
change_spd = ((new_serial.flags ^ port->flags) & ASYNC_SPD_MASK) ||
new_serial.custom_divisor != state->custom_divisor;
if (new_serial.irq || new_serial.port != state->port ||
new_serial.xmit_fifo_size != state->xmit_fifo_size) {
tty_unlock(tty);
return -EINVAL;
}
if (!serial_isroot()) {
if ((new_serial.baud_base != state->baud_base) ||
(new_serial.close_delay != port->close_delay) ||
(new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
((new_serial.flags & ~ASYNC_USR_MASK) !=
(port->flags & ~ASYNC_USR_MASK))) {
tty_unlock(tty);
return -EPERM;
}
port->flags = ((port->flags & ~ASYNC_USR_MASK) |
(new_serial.flags & ASYNC_USR_MASK));
state->custom_divisor = new_serial.custom_divisor;
goto check_and_exit;
}
if (new_serial.baud_base < 9600) {
tty_unlock(tty);
return -EINVAL;
}
/*
* OK, past this point, all the error checking has been done.
* At this point, we start making changes.....
*/
state->baud_base = new_serial.baud_base;
port->flags = ((port->flags & ~ASYNC_FLAGS) |
(new_serial.flags & ASYNC_FLAGS));
state->custom_divisor = new_serial.custom_divisor;
port->close_delay = new_serial.close_delay * HZ/100;
port->closing_wait = new_serial.closing_wait * HZ/100;
port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
check_and_exit:
if (port->flags & ASYNC_INITIALIZED) {
if (change_spd) {
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
tty->alt_speed = 57600;
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
tty->alt_speed = 115200;
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
tty->alt_speed = 230400;
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
tty->alt_speed = 460800;
change_speed(tty, state, NULL);
}
} else
retval = startup(tty, state);
tty_unlock(tty);
return retval;
}
/*
* get_lsr_info - get line status register info
*
* Purpose: Let user call ioctl() to get info when the UART physically
* is emptied. On bus types like RS485, the transmitter must
* release the bus after transmitting. This must be done when
* the transmit shift register is empty, not be done when the
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
static int get_lsr_info(struct serial_state *info, unsigned int __user *value)
{
unsigned char status;
unsigned int result;
unsigned long flags;
local_irq_save(flags);
status = custom.serdatr;
mb();
local_irq_restore(flags);
result = ((status & SDR_TSRE) ? TIOCSER_TEMT : 0);
if (copy_to_user(value, &result, sizeof(int)))
return -EFAULT;
return 0;
}
static int rs_tiocmget(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
unsigned char control, status;
unsigned long flags;
if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
control = info->MCR;
local_irq_save(flags);
status = ciab.pra;
local_irq_restore(flags);
return ((control & SER_RTS) ? TIOCM_RTS : 0)
| ((control & SER_DTR) ? TIOCM_DTR : 0)
| (!(status & SER_DCD) ? TIOCM_CAR : 0)
| (!(status & SER_DSR) ? TIOCM_DSR : 0)
| (!(status & SER_CTS) ? TIOCM_CTS : 0);
}
static int rs_tiocmset(struct tty_struct *tty, unsigned int set,
unsigned int clear)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
return -ENODEV;
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
local_irq_save(flags);
if (set & TIOCM_RTS)
info->MCR |= SER_RTS;
if (set & TIOCM_DTR)
info->MCR |= SER_DTR;
if (clear & TIOCM_RTS)
info->MCR &= ~SER_RTS;
if (clear & TIOCM_DTR)
info->MCR &= ~SER_DTR;
rtsdtr_ctrl(info->MCR);
local_irq_restore(flags);
return 0;
}
/*
* rs_break() --- routine which turns the break handling on or off
*/
static int rs_break(struct tty_struct *tty, int break_state)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
if (serial_paranoia_check(info, tty->name, "rs_break"))
return -EINVAL;
local_irq_save(flags);
if (break_state == -1)
custom.adkcon = AC_SETCLR | AC_UARTBRK;
else
custom.adkcon = AC_UARTBRK;
mb();
local_irq_restore(flags);
return 0;
}
/*
* Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
* Return: write counters to the user passed counter struct
* NB: both 1->0 and 0->1 transitions are counted except for
* RI where only 0->1 is counted.
*/
static int rs_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct serial_state *info = tty->driver_data;
struct async_icount cnow;
unsigned long flags;
local_irq_save(flags);
cnow = info->icount;
local_irq_restore(flags);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
icount->rng = cnow.rng;
icount->dcd = cnow.dcd;
icount->rx = cnow.rx;
icount->tx = cnow.tx;
icount->frame = cnow.frame;
icount->overrun = cnow.overrun;
icount->parity = cnow.parity;
icount->brk = cnow.brk;
icount->buf_overrun = cnow.buf_overrun;
return 0;
}
static int rs_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct serial_state *info = tty->driver_data;
struct async_icount cprev, cnow; /* kernel counter temps */
void __user *argp = (void __user *)arg;
unsigned long flags;
DEFINE_WAIT(wait);
int ret;
if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
return -ENODEV;
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
(cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
}
switch (cmd) {
case TIOCGSERIAL:
return get_serial_info(tty, info, argp);
case TIOCSSERIAL:
return set_serial_info(tty, info, argp);
case TIOCSERCONFIG:
return 0;
case TIOCSERGETLSR: /* Get line status register */
return get_lsr_info(info, argp);
case TIOCSERGSTRUCT:
if (copy_to_user(argp,
info, sizeof(struct serial_state)))
return -EFAULT;
return 0;
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
* (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
* Caller should use TIOCGICOUNT to see which one it was
*/
case TIOCMIWAIT:
local_irq_save(flags);
/* note the counters on entry */
cprev = info->icount;
local_irq_restore(flags);
while (1) {
prepare_to_wait(&info->tport.delta_msr_wait,
&wait, TASK_INTERRUPTIBLE);
local_irq_save(flags);
cnow = info->icount; /* atomic copy */
local_irq_restore(flags);
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
ret = -EIO; /* no change => error */
break;
}
if ( ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)) ) {
ret = 0;
break;
}
schedule();
/* see if a signal did it */
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
cprev = cnow;
}
finish_wait(&info->tport.delta_msr_wait, &wait);
return ret;
case TIOCSERGWILD:
case TIOCSERSWILD:
/* "setserial -W" is called in Debian boot */
printk ("TIOCSER?WILD ioctl obsolete, ignored.\n");
return 0;
default:
return -ENOIOCTLCMD;
}
return 0;
}
static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
unsigned int cflag = tty->termios.c_cflag;
change_speed(tty, info, old_termios);
/* Handle transition to B0 status */
if ((old_termios->c_cflag & CBAUD) &&
!(cflag & CBAUD)) {
info->MCR &= ~(SER_DTR|SER_RTS);
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
local_irq_restore(flags);
}
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) &&
(cflag & CBAUD)) {
info->MCR |= SER_DTR;
if (!(tty->termios.c_cflag & CRTSCTS) ||
!test_bit(TTY_THROTTLED, &tty->flags)) {
info->MCR |= SER_RTS;
}
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
local_irq_restore(flags);
}
/* Handle turning off CRTSCTS */
if ((old_termios->c_cflag & CRTSCTS) &&
!(tty->termios.c_cflag & CRTSCTS)) {
tty->hw_stopped = 0;
rs_start(tty);
}
#if 0
/*
* No need to wake up processes in open wait, since they
* sample the CLOCAL flag once, and don't recheck it.
* XXX It's not clear whether the current behavior is correct
* or not. Hence, this may change.....
*/
if (!(old_termios->c_cflag & CLOCAL) &&
(tty->termios.c_cflag & CLOCAL))
wake_up_interruptible(&info->open_wait);
#endif
}
/*
* ------------------------------------------------------------
* rs_close()
*
* This routine is called when the serial port gets closed. First, we
* wait for the last remaining data to be sent. Then, we unlink its
* async structure from the interrupt chain if necessary, and we free
* that IRQ if nothing is left in the chain.
* ------------------------------------------------------------
*/
static void rs_close(struct tty_struct *tty, struct file * filp)
{
struct serial_state *state = tty->driver_data;
struct tty_port *port = &state->tport;
if (serial_paranoia_check(state, tty->name, "rs_close"))
return;
if (tty_port_close_start(port, tty, filp) == 0)
return;
/*
* At this point we stop accepting input. To do this, we
* disable the receive line status interrupts, and tell the
* interrupt driver to stop checking the data ready bit in the
* line status register.
*/
state->read_status_mask &= ~UART_LSR_DR;
if (port->flags & ASYNC_INITIALIZED) {
/* disable receive interrupts */
custom.intena = IF_RBF;
mb();
/* clear any pending receive interrupt */
custom.intreq = IF_RBF;
mb();
/*
* Before we drop DTR, make sure the UART transmitter
* has completely drained; this is especially
* important if there is a transmit FIFO!
*/
rs_wait_until_sent(tty, state->timeout);
}
shutdown(tty, state);
rs_flush_buffer(tty);
tty_ldisc_flush(tty);
port->tty = NULL;
tty_port_close_end(port, tty);
}
/*
* rs_wait_until_sent() --- wait until the transmitter is empty
*/
static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct serial_state *info = tty->driver_data;
unsigned long orig_jiffies, char_time;
int lsr;
if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent"))
return;
if (info->xmit_fifo_size == 0)
return; /* Just in case.... */
orig_jiffies = jiffies;
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
* interval should also be less than the timeout.
*
* Note: we have to use pretty tight timings here to satisfy
* the NIST-PCTS.
*/
char_time = (info->timeout - HZ/50) / info->xmit_fifo_size;
char_time = char_time / 5;
if (char_time == 0)
char_time = 1;
if (timeout)
char_time = min_t(unsigned long, char_time, timeout);
/*
* If the transmitter hasn't cleared in twice the approximate
* amount of time to send the entire FIFO, it probably won't
* ever clear. This assumes the UART isn't doing flow
* control, which is currently the case. Hence, if it ever
* takes longer than info->timeout, this is probably due to a
* UART bug of some kind. So, we clamp the timeout parameter at
* 2*info->timeout.
*/
if (!timeout || timeout > 2*info->timeout)
timeout = 2*info->timeout;
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("In rs_wait_until_sent(%d) check=%lu...", timeout, char_time);
printk("jiff=%lu...", jiffies);
#endif
while(!((lsr = custom.serdatr) & SDR_TSRE)) {
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("serdatr = %d (jiff=%lu)...", lsr, jiffies);
#endif
msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
break;
}
__set_current_state(TASK_RUNNING);
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
#endif
}
/*
* rs_hangup() --- called by tty_hangup() when a hangup is signaled.
*/
static void rs_hangup(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
if (serial_paranoia_check(info, tty->name, "rs_hangup"))
return;
rs_flush_buffer(tty);
shutdown(tty, info);
info->tport.count = 0;
info->tport.flags &= ~ASYNC_NORMAL_ACTIVE;
info->tport.tty = NULL;
wake_up_interruptible(&info->tport.open_wait);
}
/*
* This routine is called whenever a serial port is opened. It
* enables interrupts for a serial port, linking in its async structure into
* the IRQ chain. It also performs the serial-specific
* initialization for the tty structure.
*/
static int rs_open(struct tty_struct *tty, struct file * filp)
{
struct serial_state *info = rs_table + tty->index;
struct tty_port *port = &info->tport;
int retval;
port->count++;
port->tty = tty;
tty->driver_data = info;
tty->port = port;
if (serial_paranoia_check(info, tty->name, "rs_open"))
return -ENODEV;
port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
retval = startup(tty, info);
if (retval) {
return retval;
}
return tty_port_block_til_ready(port, tty, filp);
}
/*
* /proc fs routines....
*/
static inline void line_info(struct seq_file *m, int line,
struct serial_state *state)
{
char stat_buf[30], control, status;
unsigned long flags;
seq_printf(m, "%d: uart:amiga_builtin", line);
local_irq_save(flags);
status = ciab.pra;
control = (state->tport.flags & ASYNC_INITIALIZED) ? state->MCR : status;
local_irq_restore(flags);
stat_buf[0] = 0;
stat_buf[1] = 0;
if(!(control & SER_RTS))
strcat(stat_buf, "|RTS");
if(!(status & SER_CTS))
strcat(stat_buf, "|CTS");
if(!(control & SER_DTR))
strcat(stat_buf, "|DTR");
if(!(status & SER_DSR))
strcat(stat_buf, "|DSR");
if(!(status & SER_DCD))
strcat(stat_buf, "|CD");
if (state->quot)
seq_printf(m, " baud:%d", state->baud_base / state->quot);
seq_printf(m, " tx:%d rx:%d", state->icount.tx, state->icount.rx);
if (state->icount.frame)
seq_printf(m, " fe:%d", state->icount.frame);
if (state->icount.parity)
seq_printf(m, " pe:%d", state->icount.parity);
if (state->icount.brk)
seq_printf(m, " brk:%d", state->icount.brk);
if (state->icount.overrun)
seq_printf(m, " oe:%d", state->icount.overrun);
/*
* Last thing is the RS-232 status lines
*/
seq_printf(m, " %s\n", stat_buf+1);
}
static int rs_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version);
line_info(m, 0, &rs_table[0]);
return 0;
}
static int rs_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, rs_proc_show, NULL);
}
static const struct file_operations rs_proc_fops = {
.owner = THIS_MODULE,
.open = rs_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* ---------------------------------------------------------------------
* rs_init() and friends
*
* rs_init() is called at boot-time to initialize the serial driver.
* ---------------------------------------------------------------------
*/
/*
* This routine prints out the appropriate serial driver version
* number, and identifies which options were configured into this
* driver.
*/
static void show_serial_version(void)
{
printk(KERN_INFO "%s version %s\n", serial_name, serial_version);
}
static const struct tty_operations serial_ops = {
.open = rs_open,
.close = rs_close,
.write = rs_write,
.put_char = rs_put_char,
.flush_chars = rs_flush_chars,
.write_room = rs_write_room,
.chars_in_buffer = rs_chars_in_buffer,
.flush_buffer = rs_flush_buffer,
.ioctl = rs_ioctl,
.throttle = rs_throttle,
.unthrottle = rs_unthrottle,
.set_termios = rs_set_termios,
.stop = rs_stop,
.start = rs_start,
.hangup = rs_hangup,
.break_ctl = rs_break,
.send_xchar = rs_send_xchar,
.wait_until_sent = rs_wait_until_sent,
.tiocmget = rs_tiocmget,
.tiocmset = rs_tiocmset,
.get_icount = rs_get_icount,
.proc_fops = &rs_proc_fops,
};
static int amiga_carrier_raised(struct tty_port *port)
{
return !(ciab.pra & SER_DCD);
}
static void amiga_dtr_rts(struct tty_port *port, int raise)
{
struct serial_state *info = container_of(port, struct serial_state,
tport);
unsigned long flags;
if (raise)
info->MCR |= SER_DTR|SER_RTS;
else
info->MCR &= ~(SER_DTR|SER_RTS);
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
local_irq_restore(flags);
}
static const struct tty_port_operations amiga_port_ops = {
.carrier_raised = amiga_carrier_raised,
.dtr_rts = amiga_dtr_rts,
};
/*
* The serial driver boot-time initialization code!
*/
static int __init amiga_serial_probe(struct platform_device *pdev)
{
unsigned long flags;
struct serial_state * state;
int error;
serial_driver = alloc_tty_driver(NR_PORTS);
if (!serial_driver)
return -ENOMEM;
show_serial_version();
/* Initialize the tty_driver structure */
serial_driver->driver_name = "amiserial";
serial_driver->name = "ttyS";
serial_driver->major = TTY_MAJOR;
serial_driver->minor_start = 64;
serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
serial_driver->subtype = SERIAL_TYPE_NORMAL;
serial_driver->init_termios = tty_std_termios;
serial_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
serial_driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(serial_driver, &serial_ops);
state = rs_table;
state->port = (int)&custom.serdatr; /* Just to give it a value */
state->custom_divisor = 0;
state->icount.cts = state->icount.dsr =
state->icount.rng = state->icount.dcd = 0;
state->icount.rx = state->icount.tx = 0;
state->icount.frame = state->icount.parity = 0;
state->icount.overrun = state->icount.brk = 0;
tty_port_init(&state->tport);
state->tport.ops = &amiga_port_ops;
tty_port_link_device(&state->tport, serial_driver, 0);
error = tty_register_driver(serial_driver);
if (error)
goto fail_put_tty_driver;
printk(KERN_INFO "ttyS0 is the amiga builtin serial port\n");
/* Hardware set up */
state->baud_base = amiga_colorclock;
state->xmit_fifo_size = 1;
/* set ISRs, and then disable the rx interrupts */
error = request_irq(IRQ_AMIGA_TBE, ser_tx_int, 0, "serial TX", state);
if (error)
goto fail_unregister;
error = request_irq(IRQ_AMIGA_RBF, ser_rx_int, 0,
"serial RX", state);
if (error)
goto fail_free_irq;
local_irq_save(flags);
/* turn off Rx and Tx interrupts */
custom.intena = IF_RBF | IF_TBE;
mb();
/* clear any pending interrupt */
custom.intreq = IF_RBF | IF_TBE;
mb();
local_irq_restore(flags);
/*
* set the appropriate directions for the modem control flags,
* and clear RTS and DTR
*/
ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */
ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */
platform_set_drvdata(pdev, state);
return 0;
fail_free_irq:
free_irq(IRQ_AMIGA_TBE, state);
fail_unregister:
tty_unregister_driver(serial_driver);
fail_put_tty_driver:
tty_port_destroy(&state->tport);
put_tty_driver(serial_driver);
return error;
}
static int __exit amiga_serial_remove(struct platform_device *pdev)
{
int error;
struct serial_state *state = platform_get_drvdata(pdev);
/* printk("Unloading %s: version %s\n", serial_name, serial_version); */
if ((error = tty_unregister_driver(serial_driver)))
printk("SERIAL: failed to unregister serial driver (%d)\n",
error);
put_tty_driver(serial_driver);
tty_port_destroy(&state->tport);
free_irq(IRQ_AMIGA_TBE, state);
free_irq(IRQ_AMIGA_RBF, state);
return error;
}
static struct platform_driver amiga_serial_driver = {
.remove = __exit_p(amiga_serial_remove),
.driver = {
.name = "amiga-serial",
.owner = THIS_MODULE,
},
};
module_platform_driver_probe(amiga_serial_driver, amiga_serial_probe);
#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE)
/*
* ------------------------------------------------------------
* Serial console driver
* ------------------------------------------------------------
*/
static void amiga_serial_putc(char c)
{
custom.serdat = (unsigned char)c | 0x100;
while (!(custom.serdatr & 0x2000))
barrier();
}
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
* The console must be locked when we get here.
*/
static void serial_console_write(struct console *co, const char *s,
unsigned count)
{
unsigned short intena = custom.intenar;
custom.intena = IF_TBE;
while (count--) {
if (*s == '\n')
amiga_serial_putc('\r');
amiga_serial_putc(*s++);
}
custom.intena = IF_SETCLR | (intena & IF_TBE);
}
static struct tty_driver *serial_console_device(struct console *c, int *index)
{
*index = 0;
return serial_driver;
}
static struct console sercons = {
.name = "ttyS",
.write = serial_console_write,
.device = serial_console_device,
.flags = CON_PRINTBUFFER,
.index = -1,
};
/*
* Register console.
*/
static int __init amiserial_console_init(void)
{
if (!MACH_IS_AMIGA)
return -ENODEV;
register_console(&sercons);
return 0;
}
console_initcall(amiserial_console_init);
#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:amiga-serial");
| gpl-2.0 |
Entropy512/android_kernel_samsung_dempsey | arch/arm/mach-u300/core.c | 767 | 49412 | /*
*
* arch/arm/mach-u300/core.c
*
*
* Copyright (C) 2007-2010 ST-Ericsson AB
* License terms: GNU General Public License (GPL) version 2
* Core platform support, IRQ handling and device definitions.
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/termios.h>
#include <linux/amba/bus.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <mach/coh901318.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/hardware/vic.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <mach/syscon.h>
#include <mach/dma_channels.h>
#include "clock.h"
#include "mmc.h"
#include "spi.h"
#include "i2c.h"
/*
* Static I/O mappings that are needed for booting the U300 platforms. The
* only things we need are the areas where we find the timer, syscon and
* intcon, since the remaining device drivers will map their own memory
* physical to virtual as the need arise.
*/
static struct map_desc u300_io_desc[] __initdata = {
{
.virtual = U300_SLOW_PER_VIRT_BASE,
.pfn = __phys_to_pfn(U300_SLOW_PER_PHYS_BASE),
.length = SZ_64K,
.type = MT_DEVICE,
},
{
.virtual = U300_AHB_PER_VIRT_BASE,
.pfn = __phys_to_pfn(U300_AHB_PER_PHYS_BASE),
.length = SZ_32K,
.type = MT_DEVICE,
},
{
.virtual = U300_FAST_PER_VIRT_BASE,
.pfn = __phys_to_pfn(U300_FAST_PER_PHYS_BASE),
.length = SZ_32K,
.type = MT_DEVICE,
},
{
.virtual = 0xffff2000, /* TCM memory */
.pfn = __phys_to_pfn(0xffff2000),
.length = SZ_16K,
.type = MT_DEVICE,
},
/*
* This overlaps with the IRQ vectors etc at 0xffff0000, so these
* may have to be moved to 0x00000000 in order to use the ROM.
*/
/*
{
.virtual = U300_BOOTROM_VIRT_BASE,
.pfn = __phys_to_pfn(U300_BOOTROM_PHYS_BASE),
.length = SZ_64K,
.type = MT_ROM,
},
*/
};
void __init u300_map_io(void)
{
iotable_init(u300_io_desc, ARRAY_SIZE(u300_io_desc));
}
/*
* Declaration of devices found on the U300 board and
* their respective memory locations.
*/
static struct amba_device uart0_device = {
.dev = {
.init_name = "uart0", /* Slow device at 0x3000 offset */
.platform_data = NULL,
},
.res = {
.start = U300_UART0_BASE,
.end = U300_UART0_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_U300_UART0, NO_IRQ },
};
/* The U335 have an additional UART1 on the APP CPU */
#ifdef CONFIG_MACH_U300_BS335
static struct amba_device uart1_device = {
.dev = {
.init_name = "uart1", /* Fast device at 0x7000 offset */
.platform_data = NULL,
},
.res = {
.start = U300_UART1_BASE,
.end = U300_UART1_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_U300_UART1, NO_IRQ },
};
#endif
static struct amba_device pl172_device = {
.dev = {
.init_name = "pl172", /* AHB device at 0x4000 offset */
.platform_data = NULL,
},
.res = {
.start = U300_EMIF_CFG_BASE,
.end = U300_EMIF_CFG_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
/*
* Everything within this next ifdef deals with external devices connected to
* the APP SPI bus.
*/
static struct amba_device pl022_device = {
.dev = {
.coherent_dma_mask = ~0,
.init_name = "pl022", /* Fast device at 0x6000 offset */
},
.res = {
.start = U300_SPI_BASE,
.end = U300_SPI_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
.irq = {IRQ_U300_SPI, NO_IRQ },
/*
* This device has a DMA channel but the Linux driver does not use
* it currently.
*/
};
static struct amba_device mmcsd_device = {
.dev = {
.init_name = "mmci", /* Fast device at 0x1000 offset */
.platform_data = NULL, /* Added later */
},
.res = {
.start = U300_MMCSD_BASE,
.end = U300_MMCSD_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
.irq = {IRQ_U300_MMCSD_MCIINTR0, IRQ_U300_MMCSD_MCIINTR1 },
/*
* This device has a DMA channel but the Linux driver does not use
* it currently.
*/
};
/*
* The order of device declaration may be important, since some devices
* have dependencies on other devices being initialized first.
*/
static struct amba_device *amba_devs[] __initdata = {
&uart0_device,
#ifdef CONFIG_MACH_U300_BS335
&uart1_device,
#endif
&pl022_device,
&pl172_device,
&mmcsd_device,
};
/* Here follows a list of all hw resources that the platform devices
* allocate. Note, clock dependencies are not included
*/
static struct resource gpio_resources[] = {
{
.start = U300_GPIO_BASE,
.end = (U300_GPIO_BASE + SZ_4K - 1),
.flags = IORESOURCE_MEM,
},
{
.name = "gpio0",
.start = IRQ_U300_GPIO_PORT0,
.end = IRQ_U300_GPIO_PORT0,
.flags = IORESOURCE_IRQ,
},
{
.name = "gpio1",
.start = IRQ_U300_GPIO_PORT1,
.end = IRQ_U300_GPIO_PORT1,
.flags = IORESOURCE_IRQ,
},
{
.name = "gpio2",
.start = IRQ_U300_GPIO_PORT2,
.end = IRQ_U300_GPIO_PORT2,
.flags = IORESOURCE_IRQ,
},
#ifdef U300_COH901571_3
{
.name = "gpio3",
.start = IRQ_U300_GPIO_PORT3,
.end = IRQ_U300_GPIO_PORT3,
.flags = IORESOURCE_IRQ,
},
{
.name = "gpio4",
.start = IRQ_U300_GPIO_PORT4,
.end = IRQ_U300_GPIO_PORT4,
.flags = IORESOURCE_IRQ,
},
#ifdef CONFIG_MACH_U300_BS335
{
.name = "gpio5",
.start = IRQ_U300_GPIO_PORT5,
.end = IRQ_U300_GPIO_PORT5,
.flags = IORESOURCE_IRQ,
},
{
.name = "gpio6",
.start = IRQ_U300_GPIO_PORT6,
.end = IRQ_U300_GPIO_PORT6,
.flags = IORESOURCE_IRQ,
},
#endif /* CONFIG_MACH_U300_BS335 */
#endif /* U300_COH901571_3 */
};
static struct resource keypad_resources[] = {
{
.start = U300_KEYPAD_BASE,
.end = U300_KEYPAD_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "coh901461-press",
.start = IRQ_U300_KEYPAD_KEYBF,
.end = IRQ_U300_KEYPAD_KEYBF,
.flags = IORESOURCE_IRQ,
},
{
.name = "coh901461-release",
.start = IRQ_U300_KEYPAD_KEYBR,
.end = IRQ_U300_KEYPAD_KEYBR,
.flags = IORESOURCE_IRQ,
},
};
static struct resource rtc_resources[] = {
{
.start = U300_RTC_BASE,
.end = U300_RTC_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_U300_RTC,
.end = IRQ_U300_RTC,
.flags = IORESOURCE_IRQ,
},
};
/*
* Fsmc does have IRQs: #43 and #44 (NFIF and NFIF2)
* but these are not yet used by the driver.
*/
static struct resource fsmc_resources[] = {
{
.start = U300_NAND_IF_PHYS_BASE,
.end = U300_NAND_IF_PHYS_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct resource i2c0_resources[] = {
{
.start = U300_I2C0_BASE,
.end = U300_I2C0_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_U300_I2C0,
.end = IRQ_U300_I2C0,
.flags = IORESOURCE_IRQ,
},
};
static struct resource i2c1_resources[] = {
{
.start = U300_I2C1_BASE,
.end = U300_I2C1_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_U300_I2C1,
.end = IRQ_U300_I2C1,
.flags = IORESOURCE_IRQ,
},
};
static struct resource wdog_resources[] = {
{
.start = U300_WDOG_BASE,
.end = U300_WDOG_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_U300_WDOG,
.end = IRQ_U300_WDOG,
.flags = IORESOURCE_IRQ,
}
};
/* TODO: These should be protected by suitable #ifdef's */
static struct resource ave_resources[] = {
{
.name = "AVE3e I/O Area",
.start = U300_VIDEOENC_BASE,
.end = U300_VIDEOENC_BASE + SZ_512K - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "AVE3e IRQ0",
.start = IRQ_U300_VIDEO_ENC_0,
.end = IRQ_U300_VIDEO_ENC_0,
.flags = IORESOURCE_IRQ,
},
{
.name = "AVE3e IRQ1",
.start = IRQ_U300_VIDEO_ENC_1,
.end = IRQ_U300_VIDEO_ENC_1,
.flags = IORESOURCE_IRQ,
},
{
.name = "AVE3e Physmem Area",
.start = 0, /* 0 will be remapped to reserved memory */
.end = SZ_1M - 1,
.flags = IORESOURCE_MEM,
},
/*
* The AVE3e requires two regions of 256MB that it considers
* "invisible". The hardware will not be able to access these
* addresses, so they should never point to system RAM.
*/
{
.name = "AVE3e Reserved 0",
.start = 0xd0000000,
.end = 0xd0000000 + SZ_256M - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "AVE3e Reserved 1",
.start = 0xe0000000,
.end = 0xe0000000 + SZ_256M - 1,
.flags = IORESOURCE_MEM,
},
};
static struct resource dma_resource[] = {
{
.start = U300_DMAC_BASE,
.end = U300_DMAC_BASE + PAGE_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_U300_DMA,
.end = IRQ_U300_DMA,
.flags = IORESOURCE_IRQ,
}
};
#ifdef CONFIG_MACH_U300_BS335
/* points out all dma slave channels.
* Syntax is [A1, B1, A2, B2, .... ,-1,-1]
* Select all channels from A to B, end of list is marked with -1,-1
*/
static int dma_slave_channels[] = {
U300_DMA_MSL_TX_0, U300_DMA_SPI_RX,
U300_DMA_UART1_TX, U300_DMA_UART1_RX, -1, -1};
/* points out all dma memcpy channels. */
static int dma_memcpy_channels[] = {
U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_8, -1, -1};
#else /* CONFIG_MACH_U300_BS335 */
static int dma_slave_channels[] = {U300_DMA_MSL_TX_0, U300_DMA_SPI_RX, -1, -1};
static int dma_memcpy_channels[] = {
U300_DMA_GENERAL_PURPOSE_0, U300_DMA_GENERAL_PURPOSE_10, -1, -1};
#endif
/** register dma for memory access
*
* active 1 means dma intends to access memory
* 0 means dma wont access memory
*/
static void coh901318_access_memory_state(struct device *dev, bool active)
{
}
#define flags_memcpy_config (COH901318_CX_CFG_CH_DISABLE | \
COH901318_CX_CFG_RM_MEMORY_TO_MEMORY | \
COH901318_CX_CFG_LCR_DISABLE | \
COH901318_CX_CFG_TC_IRQ_ENABLE | \
COH901318_CX_CFG_BE_IRQ_ENABLE)
#define flags_memcpy_lli_chained (COH901318_CX_CTRL_TC_ENABLE | \
COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
COH901318_CX_CTRL_MASTER_MODE_M1RW | \
COH901318_CX_CTRL_TCP_DISABLE | \
COH901318_CX_CTRL_TC_IRQ_DISABLE | \
COH901318_CX_CTRL_HSP_DISABLE | \
COH901318_CX_CTRL_HSS_DISABLE | \
COH901318_CX_CTRL_DDMA_LEGACY | \
COH901318_CX_CTRL_PRDD_SOURCE)
#define flags_memcpy_lli (COH901318_CX_CTRL_TC_ENABLE | \
COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
COH901318_CX_CTRL_MASTER_MODE_M1RW | \
COH901318_CX_CTRL_TCP_DISABLE | \
COH901318_CX_CTRL_TC_IRQ_DISABLE | \
COH901318_CX_CTRL_HSP_DISABLE | \
COH901318_CX_CTRL_HSS_DISABLE | \
COH901318_CX_CTRL_DDMA_LEGACY | \
COH901318_CX_CTRL_PRDD_SOURCE)
#define flags_memcpy_lli_last (COH901318_CX_CTRL_TC_ENABLE | \
COH901318_CX_CTRL_BURST_COUNT_32_BYTES | \
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS | \
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE | \
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS | \
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE | \
COH901318_CX_CTRL_MASTER_MODE_M1RW | \
COH901318_CX_CTRL_TCP_DISABLE | \
COH901318_CX_CTRL_TC_IRQ_ENABLE | \
COH901318_CX_CTRL_HSP_DISABLE | \
COH901318_CX_CTRL_HSS_DISABLE | \
COH901318_CX_CTRL_DDMA_LEGACY | \
COH901318_CX_CTRL_PRDD_SOURCE)
const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
{
.number = U300_DMA_MSL_TX_0,
.name = "MSL TX 0",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 0 * 0x40 + 0x20,
},
{
.number = U300_DMA_MSL_TX_1,
.name = "MSL TX 1",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 1 * 0x40 + 0x20,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
},
{
.number = U300_DMA_MSL_TX_2,
.name = "MSL TX 2",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 2 * 0x40 + 0x20,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
.desc_nbr_max = 10,
},
{
.number = U300_DMA_MSL_TX_3,
.name = "MSL TX 3",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 3 * 0x40 + 0x20,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
},
{
.number = U300_DMA_MSL_TX_4,
.name = "MSL TX 4",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 4 * 0x40 + 0x20,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1R_M2W |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
},
{
.number = U300_DMA_MSL_TX_5,
.name = "MSL TX 5",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 5 * 0x40 + 0x20,
},
{
.number = U300_DMA_MSL_TX_6,
.name = "MSL TX 6",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 6 * 0x40 + 0x20,
},
{
.number = U300_DMA_MSL_RX_0,
.name = "MSL RX 0",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 0 * 0x40 + 0x220,
},
{
.number = U300_DMA_MSL_RX_1,
.name = "MSL RX 1",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 1 * 0x40 + 0x220,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
.param.ctrl_lli = 0,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
},
{
.number = U300_DMA_MSL_RX_2,
.name = "MSL RX 2",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 2 * 0x40 + 0x220,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
},
{
.number = U300_DMA_MSL_RX_3,
.name = "MSL RX 3",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 3 * 0x40 + 0x220,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
},
{
.number = U300_DMA_MSL_RX_4,
.name = "MSL RX 4",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 4 * 0x40 + 0x220,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
},
{
.number = U300_DMA_MSL_RX_5,
.name = "MSL RX 5",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 5 * 0x40 + 0x220,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M2R_M1W |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_DEMAND_DMA1 |
COH901318_CX_CTRL_PRDD_DEST,
},
{
.number = U300_DMA_MSL_RX_6,
.name = "MSL RX 6",
.priority_high = 0,
.dev_addr = U300_MSL_BASE + 6 * 0x40 + 0x220,
},
{
.number = U300_DMA_MMCSD_RX_TX,
.name = "MMCSD RX TX",
.priority_high = 0,
.dev_addr = U300_MMCSD_BASE + 0x080,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_32_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY,
},
{
.number = U300_DMA_MSPRO_TX,
.name = "MSPRO TX",
.priority_high = 0,
},
{
.number = U300_DMA_MSPRO_RX,
.name = "MSPRO RX",
.priority_high = 0,
},
{
.number = U300_DMA_UART0_TX,
.name = "UART0 TX",
.priority_high = 0,
},
{
.number = U300_DMA_UART0_RX,
.name = "UART0 RX",
.priority_high = 0,
},
{
.number = U300_DMA_APEX_TX,
.name = "APEX TX",
.priority_high = 0,
},
{
.number = U300_DMA_APEX_RX,
.name = "APEX RX",
.priority_high = 0,
},
{
.number = U300_DMA_PCM_I2S0_TX,
.name = "PCM I2S0 TX",
.priority_high = 1,
.dev_addr = U300_PCM_I2S0_BASE + 0x14,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
},
{
.number = U300_DMA_PCM_I2S0_RX,
.name = "PCM I2S0 RX",
.priority_high = 1,
.dev_addr = U300_PCM_I2S0_BASE + 0x10,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_DEST,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_DEST,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_DEST,
},
{
.number = U300_DMA_PCM_I2S1_TX,
.name = "PCM I2S1 TX",
.priority_high = 1,
.dev_addr = U300_PCM_I2S1_BASE + 0x14,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_DISABLE |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_SOURCE,
},
{
.number = U300_DMA_PCM_I2S1_RX,
.name = "PCM I2S1 RX",
.priority_high = 1,
.dev_addr = U300_PCM_I2S1_BASE + 0x10,
.param.config = COH901318_CX_CFG_CH_DISABLE |
COH901318_CX_CFG_LCR_DISABLE |
COH901318_CX_CFG_TC_IRQ_ENABLE |
COH901318_CX_CFG_BE_IRQ_ENABLE,
.param.ctrl_lli_chained = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_DISABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_DEST,
.param.ctrl_lli = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_DISABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_DEST,
.param.ctrl_lli_last = 0 |
COH901318_CX_CTRL_TC_ENABLE |
COH901318_CX_CTRL_BURST_COUNT_16_BYTES |
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE |
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE |
COH901318_CX_CTRL_MASTER_MODE_M1RW |
COH901318_CX_CTRL_TCP_ENABLE |
COH901318_CX_CTRL_TC_IRQ_ENABLE |
COH901318_CX_CTRL_HSP_ENABLE |
COH901318_CX_CTRL_HSS_DISABLE |
COH901318_CX_CTRL_DDMA_LEGACY |
COH901318_CX_CTRL_PRDD_DEST,
},
{
.number = U300_DMA_XGAM_CDI,
.name = "XGAM CDI",
.priority_high = 0,
},
{
.number = U300_DMA_XGAM_PDI,
.name = "XGAM PDI",
.priority_high = 0,
},
{
.number = U300_DMA_SPI_TX,
.name = "SPI TX",
.priority_high = 0,
},
{
.number = U300_DMA_SPI_RX,
.name = "SPI RX",
.priority_high = 0,
},
{
.number = U300_DMA_GENERAL_PURPOSE_0,
.name = "GENERAL 00",
.priority_high = 0,
.param.config = flags_memcpy_config,
.param.ctrl_lli_chained = flags_memcpy_lli_chained,
.param.ctrl_lli = flags_memcpy_lli,
.param.ctrl_lli_last = flags_memcpy_lli_last,
},
{
.number = U300_DMA_GENERAL_PURPOSE_1,
.name = "GENERAL 01",
.priority_high = 0,
.param.config = flags_memcpy_config,
.param.ctrl_lli_chained = flags_memcpy_lli_chained,
.param.ctrl_lli = flags_memcpy_lli,
.param.ctrl_lli_last = flags_memcpy_lli_last,
},
{
.number = U300_DMA_GENERAL_PURPOSE_2,
.name = "GENERAL 02",
.priority_high = 0,
.param.config = flags_memcpy_config,
.param.ctrl_lli_chained = flags_memcpy_lli_chained,
.param.ctrl_lli = flags_memcpy_lli,
.param.ctrl_lli_last = flags_memcpy_lli_last,
},
{
.number = U300_DMA_GENERAL_PURPOSE_3,
.name = "GENERAL 03",
.priority_high = 0,
.param.config = flags_memcpy_config,
.param.ctrl_lli_chained = flags_memcpy_lli_chained,
.param.ctrl_lli = flags_memcpy_lli,
.param.ctrl_lli_last = flags_memcpy_lli_last,
},
{
.number = U300_DMA_GENERAL_PURPOSE_4,
.name = "GENERAL 04",
.priority_high = 0,
.param.config = flags_memcpy_config,
.param.ctrl_lli_chained = flags_memcpy_lli_chained,
.param.ctrl_lli = flags_memcpy_lli,
.param.ctrl_lli_last = flags_memcpy_lli_last,
},
{
.number = U300_DMA_GENERAL_PURPOSE_5,
.name = "GENERAL 05",
.priority_high = 0,
.param.config = flags_memcpy_config,
.param.ctrl_lli_chained = flags_memcpy_lli_chained,
.param.ctrl_lli = flags_memcpy_lli,
.param.ctrl_lli_last = flags_memcpy_lli_last,
},
{
.number = U300_DMA_GENERAL_PURPOSE_6,
.name = "GENERAL 06",
.priority_high = 0,
.param.config = flags_memcpy_config,
.param.ctrl_lli_chained = flags_memcpy_lli_chained,
.param.ctrl_lli = flags_memcpy_lli,
.param.ctrl_lli_last = flags_memcpy_lli_last,
},
{
.number = U300_DMA_GENERAL_PURPOSE_7,
.name = "GENERAL 07",
.priority_high = 0,
.param.config = flags_memcpy_config,
.param.ctrl_lli_chained = flags_memcpy_lli_chained,
.param.ctrl_lli = flags_memcpy_lli,
.param.ctrl_lli_last = flags_memcpy_lli_last,
},
{
.number = U300_DMA_GENERAL_PURPOSE_8,
.name = "GENERAL 08",
.priority_high = 0,
.param.config = flags_memcpy_config,
.param.ctrl_lli_chained = flags_memcpy_lli_chained,
.param.ctrl_lli = flags_memcpy_lli,
.param.ctrl_lli_last = flags_memcpy_lli_last,
},
#ifdef CONFIG_MACH_U300_BS335
{
.number = U300_DMA_UART1_TX,
.name = "UART1 TX",
.priority_high = 0,
},
{
.number = U300_DMA_UART1_RX,
.name = "UART1 RX",
.priority_high = 0,
}
#else
{
.number = U300_DMA_GENERAL_PURPOSE_9,
.name = "GENERAL 09",
.priority_high = 0,
.param.config = flags_memcpy_config,
.param.ctrl_lli_chained = flags_memcpy_lli_chained,
.param.ctrl_lli = flags_memcpy_lli,
.param.ctrl_lli_last = flags_memcpy_lli_last,
},
{
.number = U300_DMA_GENERAL_PURPOSE_10,
.name = "GENERAL 10",
.priority_high = 0,
.param.config = flags_memcpy_config,
.param.ctrl_lli_chained = flags_memcpy_lli_chained,
.param.ctrl_lli = flags_memcpy_lli,
.param.ctrl_lli_last = flags_memcpy_lli_last,
}
#endif
};
static struct coh901318_platform coh901318_platform = {
.chans_slave = dma_slave_channels,
.chans_memcpy = dma_memcpy_channels,
.access_memory_state = coh901318_access_memory_state,
.chan_conf = chan_config,
.max_channels = U300_DMA_CHANNELS,
};
static struct platform_device wdog_device = {
.name = "coh901327_wdog",
.id = -1,
.num_resources = ARRAY_SIZE(wdog_resources),
.resource = wdog_resources,
};
static struct platform_device i2c0_device = {
.name = "stu300",
.id = 0,
.num_resources = ARRAY_SIZE(i2c0_resources),
.resource = i2c0_resources,
};
static struct platform_device i2c1_device = {
.name = "stu300",
.id = 1,
.num_resources = ARRAY_SIZE(i2c1_resources),
.resource = i2c1_resources,
};
static struct platform_device gpio_device = {
.name = "u300-gpio",
.id = -1,
.num_resources = ARRAY_SIZE(gpio_resources),
.resource = gpio_resources,
};
static struct platform_device keypad_device = {
.name = "keypad",
.id = -1,
.num_resources = ARRAY_SIZE(keypad_resources),
.resource = keypad_resources,
};
static struct platform_device rtc_device = {
.name = "rtc-coh901331",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
static struct platform_device fsmc_device = {
.name = "nandif",
.id = -1,
.num_resources = ARRAY_SIZE(fsmc_resources),
.resource = fsmc_resources,
};
static struct platform_device ave_device = {
.name = "video_enc",
.id = -1,
.num_resources = ARRAY_SIZE(ave_resources),
.resource = ave_resources,
};
static struct platform_device dma_device = {
.name = "coh901318",
.id = -1,
.resource = dma_resource,
.num_resources = ARRAY_SIZE(dma_resource),
.dev = {
.platform_data = &coh901318_platform,
.coherent_dma_mask = ~0,
},
};
/*
* Notice that AMBA devices are initialized before platform devices.
*
*/
static struct platform_device *platform_devs[] __initdata = {
&dma_device,
&i2c0_device,
&i2c1_device,
&keypad_device,
&rtc_device,
&gpio_device,
&fsmc_device,
&wdog_device,
&ave_device
};
/*
* Interrupts: the U300 platforms have two pl190 ARM PrimeCells connected
* together so some interrupts are connected to the first one and some
* to the second one.
*/
void __init u300_init_irq(void)
{
u32 mask[2] = {0, 0};
int i;
for (i = 0; i < NR_IRQS; i++)
set_bit(i, (unsigned long *) &mask[0]);
u300_enable_intcon_clock();
vic_init((void __iomem *) U300_INTCON0_VBASE, 0, mask[0], mask[0]);
vic_init((void __iomem *) U300_INTCON1_VBASE, 32, mask[1], mask[1]);
}
/*
* U300 platforms peripheral handling
*/
struct db_chip {
u16 chipid;
const char *name;
};
/*
* This is a list of the Digital Baseband chips used in the U300 platform.
*/
static struct db_chip db_chips[] __initdata = {
{
.chipid = 0xb800,
.name = "DB3000",
},
{
.chipid = 0xc000,
.name = "DB3100",
},
{
.chipid = 0xc800,
.name = "DB3150",
},
{
.chipid = 0xd800,
.name = "DB3200",
},
{
.chipid = 0xe000,
.name = "DB3250",
},
{
.chipid = 0xe800,
.name = "DB3210",
},
{
.chipid = 0xf000,
.name = "DB3350 P1x",
},
{
.chipid = 0xf100,
.name = "DB3350 P2x",
},
{
.chipid = 0x0000, /* List terminator */
.name = NULL,
}
};
static void __init u300_init_check_chip(void)
{
u16 val;
struct db_chip *chip;
const char *chipname;
const char unknown[] = "UNKNOWN";
/* Read out and print chip ID */
val = readw(U300_SYSCON_VBASE + U300_SYSCON_CIDR);
/* This is in funky bigendian order... */
val = (val & 0xFFU) << 8 | (val >> 8);
chip = db_chips;
chipname = unknown;
for ( ; chip->chipid; chip++) {
if (chip->chipid == (val & 0xFF00U)) {
chipname = chip->name;
break;
}
}
printk(KERN_INFO "Initializing U300 system on %s baseband chip " \
"(chip ID 0x%04x)\n", chipname, val);
#ifdef CONFIG_MACH_U300_BS26
if ((val & 0xFF00U) != 0xc800) {
printk(KERN_ERR "Platform configured for BS25/BS26 " \
"with DB3150 but %s detected, expect problems!",
chipname);
}
#endif
#ifdef CONFIG_MACH_U300_BS330
if ((val & 0xFF00U) != 0xd800) {
printk(KERN_ERR "Platform configured for BS330 " \
"with DB3200 but %s detected, expect problems!",
chipname);
}
#endif
#ifdef CONFIG_MACH_U300_BS335
if ((val & 0xFF00U) != 0xf000 && (val & 0xFF00U) != 0xf100) {
printk(KERN_ERR "Platform configured for BS365 " \
" with DB3350 but %s detected, expect problems!",
chipname);
}
#endif
#ifdef CONFIG_MACH_U300_BS365
if ((val & 0xFF00U) != 0xe800) {
printk(KERN_ERR "Platform configured for BS365 " \
"with DB3210 but %s detected, expect problems!",
chipname);
}
#endif
}
/*
* Some devices and their resources require reserved physical memory from
* the end of the available RAM. This function traverses the list of devices
* and assigns actual addresses to these.
*/
static void __init u300_assign_physmem(void)
{
unsigned long curr_start = __pa(high_memory);
int i, j;
for (i = 0; i < ARRAY_SIZE(platform_devs); i++) {
for (j = 0; j < platform_devs[i]->num_resources; j++) {
struct resource *const res =
&platform_devs[i]->resource[j];
if (IORESOURCE_MEM == res->flags &&
0 == res->start) {
res->start = curr_start;
res->end += curr_start;
curr_start += (res->end - res->start + 1);
printk(KERN_INFO "core.c: Mapping RAM " \
"%#x-%#x to device %s:%s\n",
res->start, res->end,
platform_devs[i]->name, res->name);
}
}
}
}
void __init u300_init_devices(void)
{
int i;
u16 val;
/* Check what platform we run and print some status information */
u300_init_check_chip();
/* Set system to run at PLL208, max performance, a known state. */
val = readw(U300_SYSCON_VBASE + U300_SYSCON_CCR);
val &= ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
writew(val, U300_SYSCON_VBASE + U300_SYSCON_CCR);
/* Wait for the PLL208 to lock if not locked in yet */
while (!(readw(U300_SYSCON_VBASE + U300_SYSCON_CSR) &
U300_SYSCON_CSR_PLL208_LOCK_IND));
/* Initialize SPI device with some board specifics */
u300_spi_init(&pl022_device);
/* Register the AMBA devices in the AMBA bus abstraction layer */
u300_clock_primecells();
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
amba_device_register(d, &iomem_resource);
}
u300_unclock_primecells();
u300_assign_physmem();
/* Register subdevices on the I2C buses */
u300_i2c_register_board_devices();
/* Register subdevices on the SPI bus */
u300_spi_register_board_devices();
/* Register the platform devices */
platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
#ifndef CONFIG_MACH_U300_SEMI_IS_SHARED
/*
* Enable SEMI self refresh. Self-refresh of the SDRAM is entered when
* both subsystems are requesting this mode.
* If we not share the Acc SDRAM, this is never the case. Therefore
* enable it here from the App side.
*/
val = readw(U300_SYSCON_VBASE + U300_SYSCON_SMCR) |
U300_SYSCON_SMCR_SEMI_SREFREQ_ENABLE;
writew(val, U300_SYSCON_VBASE + U300_SYSCON_SMCR);
#endif /* CONFIG_MACH_U300_SEMI_IS_SHARED */
}
static int core_module_init(void)
{
/*
* This needs to be initialized later: it needs the input framework
* to be initialized first.
*/
return mmc_init(&mmcsd_device);
}
module_init(core_module_init);
| gpl-2.0 |
Dazzozo/huawei-kernel-3.4 | fs/fat/file.c | 3327 | 11218 | /*
* linux/fs/fat/file.c
*
* Written 1992,1993 by Werner Almesberger
*
* regular file handling primitives for fat-based filesystems
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/mount.h>
#include <linux/time.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/fsnotify.h>
#include <linux/security.h>
#include "fat.h"
static int fat_ioctl_get_attributes(struct inode *inode, u32 __user *user_attr)
{
u32 attr;
mutex_lock(&inode->i_mutex);
attr = fat_make_attrs(inode);
mutex_unlock(&inode->i_mutex);
return put_user(attr, user_attr);
}
static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
int is_dir = S_ISDIR(inode->i_mode);
u32 attr, oldattr;
struct iattr ia;
int err;
err = get_user(attr, user_attr);
if (err)
goto out;
mutex_lock(&inode->i_mutex);
err = mnt_want_write_file(file);
if (err)
goto out_unlock_inode;
/*
* ATTR_VOLUME and ATTR_DIR cannot be changed; this also
* prevents the user from turning us into a VFAT
* longname entry. Also, we obviously can't set
* any of the NTFS attributes in the high 24 bits.
*/
attr &= 0xff & ~(ATTR_VOLUME | ATTR_DIR);
/* Merge in ATTR_VOLUME and ATTR_DIR */
attr |= (MSDOS_I(inode)->i_attrs & ATTR_VOLUME) |
(is_dir ? ATTR_DIR : 0);
oldattr = fat_make_attrs(inode);
/* Equivalent to a chmod() */
ia.ia_valid = ATTR_MODE | ATTR_CTIME;
ia.ia_ctime = current_fs_time(inode->i_sb);
if (is_dir)
ia.ia_mode = fat_make_mode(sbi, attr, S_IRWXUGO);
else {
ia.ia_mode = fat_make_mode(sbi, attr,
S_IRUGO | S_IWUGO | (inode->i_mode & S_IXUGO));
}
/* The root directory has no attributes */
if (inode->i_ino == MSDOS_ROOT_INO && attr != ATTR_DIR) {
err = -EINVAL;
goto out_drop_write;
}
if (sbi->options.sys_immutable &&
((attr | oldattr) & ATTR_SYS) &&
!capable(CAP_LINUX_IMMUTABLE)) {
err = -EPERM;
goto out_drop_write;
}
/*
* The security check is questionable... We single
* out the RO attribute for checking by the security
* module, just because it maps to a file mode.
*/
err = security_inode_setattr(file->f_path.dentry, &ia);
if (err)
goto out_drop_write;
/* This MUST be done before doing anything irreversible... */
err = fat_setattr(file->f_path.dentry, &ia);
if (err)
goto out_drop_write;
fsnotify_change(file->f_path.dentry, ia.ia_valid);
if (sbi->options.sys_immutable) {
if (attr & ATTR_SYS)
inode->i_flags |= S_IMMUTABLE;
else
inode->i_flags &= ~S_IMMUTABLE;
}
fat_save_attrs(inode, attr);
mark_inode_dirty(inode);
out_drop_write:
mnt_drop_write_file(file);
out_unlock_inode:
mutex_unlock(&inode->i_mutex);
out:
return err;
}
long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = filp->f_path.dentry->d_inode;
u32 __user *user_attr = (u32 __user *)arg;
switch (cmd) {
case FAT_IOCTL_GET_ATTRIBUTES:
return fat_ioctl_get_attributes(inode, user_attr);
case FAT_IOCTL_SET_ATTRIBUTES:
return fat_ioctl_set_attributes(filp, user_attr);
default:
return -ENOTTY; /* Inappropriate ioctl for device */
}
}
#ifdef CONFIG_COMPAT
static long fat_generic_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
return fat_generic_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
}
#endif
static int fat_file_release(struct inode *inode, struct file *filp)
{
if ((filp->f_mode & FMODE_WRITE) &&
MSDOS_SB(inode->i_sb)->options.flush) {
fat_flush_inodes(inode->i_sb, inode, NULL);
congestion_wait(BLK_RW_ASYNC, HZ/10);
}
return 0;
}
int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
{
struct inode *inode = filp->f_mapping->host;
int res, err;
res = generic_file_fsync(filp, start, end, datasync);
err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping);
return res ? res : err;
}
const struct file_operations fat_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = generic_file_aio_read,
.aio_write = generic_file_aio_write,
.mmap = generic_file_mmap,
.release = fat_file_release,
.unlocked_ioctl = fat_generic_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = fat_generic_compat_ioctl,
#endif
.fsync = fat_file_fsync,
.splice_read = generic_file_splice_read,
};
static int fat_cont_expand(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
loff_t start = inode->i_size, count = size - inode->i_size;
int err;
err = generic_cont_expand_simple(inode, size);
if (err)
goto out;
inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
if (IS_SYNC(inode)) {
int err2;
/*
* Opencode syncing since we don't have a file open to use
* standard fsync path.
*/
err = filemap_fdatawrite_range(mapping, start,
start + count - 1);
err2 = sync_mapping_buffers(mapping);
if (!err)
err = err2;
err2 = write_inode_now(inode, 1);
if (!err)
err = err2;
if (!err) {
err = filemap_fdatawait_range(mapping, start,
start + count - 1);
}
}
out:
return err;
}
/* Free all clusters after the skip'th cluster. */
static int fat_free(struct inode *inode, int skip)
{
struct super_block *sb = inode->i_sb;
int err, wait, free_start, i_start, i_logstart;
if (MSDOS_I(inode)->i_start == 0)
return 0;
fat_cache_inval_inode(inode);
wait = IS_DIRSYNC(inode);
i_start = free_start = MSDOS_I(inode)->i_start;
i_logstart = MSDOS_I(inode)->i_logstart;
/* First, we write the new file size. */
if (!skip) {
MSDOS_I(inode)->i_start = 0;
MSDOS_I(inode)->i_logstart = 0;
}
MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
if (wait) {
err = fat_sync_inode(inode);
if (err) {
MSDOS_I(inode)->i_start = i_start;
MSDOS_I(inode)->i_logstart = i_logstart;
return err;
}
} else
mark_inode_dirty(inode);
/* Write a new EOF, and get the remaining cluster chain for freeing. */
if (skip) {
struct fat_entry fatent;
int ret, fclus, dclus;
ret = fat_get_cluster(inode, skip - 1, &fclus, &dclus);
if (ret < 0)
return ret;
else if (ret == FAT_ENT_EOF)
return 0;
fatent_init(&fatent);
ret = fat_ent_read(inode, &fatent, dclus);
if (ret == FAT_ENT_EOF) {
fatent_brelse(&fatent);
return 0;
} else if (ret == FAT_ENT_FREE) {
fat_fs_error(sb,
"%s: invalid cluster chain (i_pos %lld)",
__func__, MSDOS_I(inode)->i_pos);
ret = -EIO;
} else if (ret > 0) {
err = fat_ent_write(inode, &fatent, FAT_ENT_EOF, wait);
if (err)
ret = err;
}
fatent_brelse(&fatent);
if (ret < 0)
return ret;
free_start = ret;
}
inode->i_blocks = skip << (MSDOS_SB(sb)->cluster_bits - 9);
/* Freeing the remained cluster chain */
return fat_free_clusters(inode, free_start);
}
void fat_truncate_blocks(struct inode *inode, loff_t offset)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
const unsigned int cluster_size = sbi->cluster_size;
int nr_clusters;
/*
* This protects against truncating a file bigger than it was then
* trying to write into the hole.
*/
if (MSDOS_I(inode)->mmu_private > offset)
MSDOS_I(inode)->mmu_private = offset;
nr_clusters = (offset + (cluster_size - 1)) >> sbi->cluster_bits;
fat_free(inode, nr_clusters);
fat_flush_inodes(inode->i_sb, inode, NULL);
}
int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
struct inode *inode = dentry->d_inode;
generic_fillattr(inode, stat);
stat->blksize = MSDOS_SB(inode->i_sb)->cluster_size;
return 0;
}
EXPORT_SYMBOL_GPL(fat_getattr);
static int fat_sanitize_mode(const struct msdos_sb_info *sbi,
struct inode *inode, umode_t *mode_ptr)
{
umode_t mask, perm;
/*
* Note, the basic check is already done by a caller of
* (attr->ia_mode & ~FAT_VALID_MODE)
*/
if (S_ISREG(inode->i_mode))
mask = sbi->options.fs_fmask;
else
mask = sbi->options.fs_dmask;
perm = *mode_ptr & ~(S_IFMT | mask);
/*
* Of the r and x bits, all (subject to umask) must be present. Of the
* w bits, either all (subject to umask) or none must be present.
*
* If fat_mode_can_hold_ro(inode) is false, can't change w bits.
*/
if ((perm & (S_IRUGO | S_IXUGO)) != (inode->i_mode & (S_IRUGO|S_IXUGO)))
return -EPERM;
if (fat_mode_can_hold_ro(inode)) {
if ((perm & S_IWUGO) && ((perm & S_IWUGO) != (S_IWUGO & ~mask)))
return -EPERM;
} else {
if ((perm & S_IWUGO) != (S_IWUGO & ~mask))
return -EPERM;
}
*mode_ptr &= S_IFMT | perm;
return 0;
}
static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode)
{
umode_t allow_utime = sbi->options.allow_utime;
if (current_fsuid() != inode->i_uid) {
if (in_group_p(inode->i_gid))
allow_utime >>= 3;
if (allow_utime & MAY_WRITE)
return 1;
}
/* use a default check */
return 0;
}
#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
/* valid file mode bits */
#define FAT_VALID_MODE (S_IFREG | S_IFDIR | S_IRWXUGO)
int fat_setattr(struct dentry *dentry, struct iattr *attr)
{
struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
struct inode *inode = dentry->d_inode;
unsigned int ia_valid;
int error;
/* Check for setting the inode time. */
ia_valid = attr->ia_valid;
if (ia_valid & TIMES_SET_FLAGS) {
if (fat_allow_set_time(sbi, inode))
attr->ia_valid &= ~TIMES_SET_FLAGS;
}
error = inode_change_ok(inode, attr);
attr->ia_valid = ia_valid;
if (error) {
if (sbi->options.quiet)
error = 0;
goto out;
}
/*
* Expand the file. Since inode_setattr() updates ->i_size
* before calling the ->truncate(), but FAT needs to fill the
* hole before it. XXX: this is no longer true with new truncate
* sequence.
*/
if (attr->ia_valid & ATTR_SIZE) {
inode_dio_wait(inode);
if (attr->ia_size > inode->i_size) {
error = fat_cont_expand(inode, attr->ia_size);
if (error || attr->ia_valid == ATTR_SIZE)
goto out;
attr->ia_valid &= ~ATTR_SIZE;
}
}
if (((attr->ia_valid & ATTR_UID) &&
(attr->ia_uid != sbi->options.fs_uid)) ||
((attr->ia_valid & ATTR_GID) &&
(attr->ia_gid != sbi->options.fs_gid)) ||
((attr->ia_valid & ATTR_MODE) &&
(attr->ia_mode & ~FAT_VALID_MODE)))
error = -EPERM;
if (error) {
if (sbi->options.quiet)
error = 0;
goto out;
}
/*
* We don't return -EPERM here. Yes, strange, but this is too
* old behavior.
*/
if (attr->ia_valid & ATTR_MODE) {
if (fat_sanitize_mode(sbi, inode, &attr->ia_mode) < 0)
attr->ia_valid &= ~ATTR_MODE;
}
if (attr->ia_valid & ATTR_SIZE) {
down_write(&MSDOS_I(inode)->truncate_lock);
truncate_setsize(inode, attr->ia_size);
fat_truncate_blocks(inode, attr->ia_size);
up_write(&MSDOS_I(inode)->truncate_lock);
}
setattr_copy(inode, attr);
mark_inode_dirty(inode);
out:
return error;
}
EXPORT_SYMBOL_GPL(fat_setattr);
const struct inode_operations fat_file_inode_operations = {
.setattr = fat_setattr,
.getattr = fat_getattr,
};
| gpl-2.0 |
CyanogenMod/android_kernel_samsung_mondrianwifi | lib/bitmap.c | 3327 | 35502 | /*
* lib/bitmap.c
* Helper functions for bitmap.h.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/export.h>
#include <linux/thread_info.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <asm/uaccess.h>
/*
* bitmaps provide an array of bits, implemented using an an
* array of unsigned longs. The number of valid bits in a
* given bitmap does _not_ need to be an exact multiple of
* BITS_PER_LONG.
*
* The possible unused bits in the last, partially used word
* of a bitmap are 'don't care'. The implementation makes
* no particular effort to keep them zero. It ensures that
* their value will not affect the results of any operation.
* The bitmap operations that return Boolean (bitmap_empty,
* for example) or scalar (bitmap_weight, for example) results
* carefully filter out these unused bits from impacting their
* results.
*
* These operations actually hold to a slightly stronger rule:
* if you don't input any bitmaps to these ops that have some
* unused bits set, then they won't output any set unused bits
* in output bitmaps.
*
* The byte ordering of bitmaps is more natural on little
* endian architectures. See the big-endian headers
* include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
* for the best explanations of this ordering.
*/
int __bitmap_empty(const unsigned long *bitmap, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap[k])
return 0;
if (bits % BITS_PER_LONG)
if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_empty);
int __bitmap_full(const unsigned long *bitmap, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (~bitmap[k])
return 0;
if (bits % BITS_PER_LONG)
if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_full);
int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
return 0;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_equal);
void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
dst[k] = ~src[k];
if (bits % BITS_PER_LONG)
dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits);
}
EXPORT_SYMBOL(__bitmap_complement);
/**
* __bitmap_shift_right - logical right shift of the bits in a bitmap
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
* @bits : bitmap size, in bits
*
* Shifting right (dividing) means moving bits in the MS -> LS bit
* direction. Zeros are fed into the vacated MS positions and the
* LS bits shifted off the bottom are lost.
*/
void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits)
{
int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
unsigned long mask = (1UL << left) - 1;
for (k = 0; off + k < lim; ++k) {
unsigned long upper, lower;
/*
* If shift is not word aligned, take lower rem bits of
* word above and make them the top rem bits of result.
*/
if (!rem || off + k + 1 >= lim)
upper = 0;
else {
upper = src[off + k + 1];
if (off + k + 1 == lim - 1 && left)
upper &= mask;
}
lower = src[off + k];
if (left && off + k == lim - 1)
lower &= mask;
dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
if (left && k == lim - 1)
dst[k] &= mask;
}
if (off)
memset(&dst[lim - off], 0, off*sizeof(unsigned long));
}
EXPORT_SYMBOL(__bitmap_shift_right);
/**
* __bitmap_shift_left - logical left shift of the bits in a bitmap
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
* @bits : bitmap size, in bits
*
* Shifting left (multiplying) means moving bits in the LS -> MS
* direction. Zeros are fed into the vacated LS bit positions
* and those MS bits shifted off the top are lost.
*/
void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits)
{
int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
for (k = lim - off - 1; k >= 0; --k) {
unsigned long upper, lower;
/*
* If shift is not word aligned, take upper rem bits of
* word below and make them the bottom rem bits of result.
*/
if (rem && k > 0)
lower = src[k - 1];
else
lower = 0;
upper = src[k];
if (left && k == lim - 1)
upper &= (1UL << left) - 1;
dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
if (left && k + off == lim - 1)
dst[k + off] &= (1UL << left) - 1;
}
if (off)
memset(dst, 0, off*sizeof(unsigned long));
}
EXPORT_SYMBOL(__bitmap_shift_left);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
unsigned long result = 0;
for (k = 0; k < nr; k++)
result |= (dst[k] = bitmap1[k] & bitmap2[k]);
return result != 0;
}
EXPORT_SYMBOL(__bitmap_and);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] | bitmap2[k];
}
EXPORT_SYMBOL(__bitmap_or);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] ^ bitmap2[k];
}
EXPORT_SYMBOL(__bitmap_xor);
int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k;
int nr = BITS_TO_LONGS(bits);
unsigned long result = 0;
for (k = 0; k < nr; k++)
result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
return result != 0;
}
EXPORT_SYMBOL(__bitmap_andnot);
int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k])
return 1;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return 1;
return 0;
}
EXPORT_SYMBOL(__bitmap_intersects);
int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits)
{
int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & ~bitmap2[k])
return 0;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return 0;
return 1;
}
EXPORT_SYMBOL(__bitmap_subset);
int __bitmap_weight(const unsigned long *bitmap, int bits)
{
int k, w = 0, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; k++)
w += hweight_long(bitmap[k]);
if (bits % BITS_PER_LONG)
w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
return w;
}
EXPORT_SYMBOL(__bitmap_weight);
void bitmap_set(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_set >= 0) {
*p |= mask_to_set;
nr -= bits_to_set;
bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL;
p++;
}
if (nr) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
*p |= mask_to_set;
}
}
EXPORT_SYMBOL(bitmap_set);
void bitmap_clear(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_clear >= 0) {
*p &= ~mask_to_clear;
nr -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL;
p++;
}
if (nr) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
*p &= ~mask_to_clear;
}
}
EXPORT_SYMBOL(bitmap_clear);
/**
* bitmap_find_next_zero_area - find a contiguous aligned zero area
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @align_mask: Alignment mask for zero area
* @align_offset: Alignment offset for zero area.
*
* The @align_mask should be one less than a power of 2; the effect is that
* the bit offset of all zero areas this function finds plus @align_offset
* is multiple of that power of 2.
*/
unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask,
unsigned long align_offset)
{
unsigned long index, end, i;
again:
index = find_next_zero_bit(map, size, start);
/* Align allocation */
index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;
end = index + nr;
if (end > size)
return end;
i = find_next_bit(map, end, index);
if (i < end) {
start = i + 1;
goto again;
}
return index;
}
EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
/*
* Bitmap printing & parsing functions: first version by Bill Irwin,
* second version by Paul Jackson, third by Joe Korty.
*/
#define CHUNKSZ 32
#define nbits_to_hold_value(val) fls(val)
#define BASEDEC 10 /* fancier cpuset lists input in decimal */
/**
* bitmap_scnprintf - convert bitmap to an ASCII hex string.
* @buf: byte buffer into which string is placed
* @buflen: reserved size of @buf, in bytes
* @maskp: pointer to bitmap to convert
* @nmaskbits: size of bitmap, in bits
*
* Exactly @nmaskbits bits are displayed. Hex digits are grouped into
* comma-separated sets of eight digits per set.
*/
int bitmap_scnprintf(char *buf, unsigned int buflen,
const unsigned long *maskp, int nmaskbits)
{
int i, word, bit, len = 0;
unsigned long val;
const char *sep = "";
int chunksz;
u32 chunkmask;
chunksz = nmaskbits & (CHUNKSZ - 1);
if (chunksz == 0)
chunksz = CHUNKSZ;
i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ;
for (; i >= 0; i -= CHUNKSZ) {
chunkmask = ((1ULL << chunksz) - 1);
word = i / BITS_PER_LONG;
bit = i % BITS_PER_LONG;
val = (maskp[word] >> bit) & chunkmask;
len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep,
(chunksz+3)/4, val);
chunksz = CHUNKSZ;
sep = ",";
}
return len;
}
EXPORT_SYMBOL(bitmap_scnprintf);
/**
* __bitmap_parse - convert an ASCII hex string into a bitmap.
* @buf: pointer to buffer containing string.
* @buflen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
* @is_user: location of buffer, 0 indicates kernel space
* @maskp: pointer to bitmap array that will contain result.
* @nmaskbits: size of bitmap, in bits.
*
* Commas group hex digits into chunks. Each chunk defines exactly 32
* bits of the resultant bitmask. No chunk may specify a value larger
* than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
* then leading 0-bits are prepended. %-EINVAL is returned for illegal
* characters and for grouping errors such as "1,,5", ",44", "," and "".
* Leading and trailing whitespace accepted, but not embedded whitespace.
*/
int __bitmap_parse(const char *buf, unsigned int buflen,
int is_user, unsigned long *maskp,
int nmaskbits)
{
int c, old_c, totaldigits, ndigits, nchunks, nbits;
u32 chunk;
const char __user __force *ubuf = (const char __user __force *)buf;
bitmap_zero(maskp, nmaskbits);
nchunks = nbits = totaldigits = c = 0;
do {
chunk = ndigits = 0;
/* Get the next chunk of the bitmap */
while (buflen) {
old_c = c;
if (is_user) {
if (__get_user(c, ubuf++))
return -EFAULT;
}
else
c = *buf++;
buflen--;
if (isspace(c))
continue;
/*
* If the last character was a space and the current
* character isn't '\0', we've got embedded whitespace.
* This is a no-no, so throw an error.
*/
if (totaldigits && c && isspace(old_c))
return -EINVAL;
/* A '\0' or a ',' signal the end of the chunk */
if (c == '\0' || c == ',')
break;
if (!isxdigit(c))
return -EINVAL;
/*
* Make sure there are at least 4 free bits in 'chunk'.
* If not, this hexdigit will overflow 'chunk', so
* throw an error.
*/
if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1))
return -EOVERFLOW;
chunk = (chunk << 4) | hex_to_bin(c);
ndigits++; totaldigits++;
}
if (ndigits == 0)
return -EINVAL;
if (nchunks == 0 && chunk == 0)
continue;
__bitmap_shift_left(maskp, maskp, CHUNKSZ, nmaskbits);
*maskp |= chunk;
nchunks++;
nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ;
if (nbits > nmaskbits)
return -EOVERFLOW;
} while (buflen && c == ',');
return 0;
}
EXPORT_SYMBOL(__bitmap_parse);
/**
* bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
*
* @ubuf: pointer to user buffer containing string.
* @ulen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
* @maskp: pointer to bitmap array that will contain result.
* @nmaskbits: size of bitmap, in bits.
*
* Wrapper for __bitmap_parse(), providing it with user buffer.
*
* We cannot have this as an inline function in bitmap.h because it needs
* linux/uaccess.h to get the access_ok() declaration and this causes
* cyclic dependencies.
*/
int bitmap_parse_user(const char __user *ubuf,
unsigned int ulen, unsigned long *maskp,
int nmaskbits)
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
return __bitmap_parse((const char __force *)ubuf,
ulen, 1, maskp, nmaskbits);
}
EXPORT_SYMBOL(bitmap_parse_user);
/*
* bscnl_emit(buf, buflen, rbot, rtop, bp)
*
* Helper routine for bitmap_scnlistprintf(). Write decimal number
* or range to buf, suppressing output past buf+buflen, with optional
* comma-prefix. Return len of what would be written to buf, if it
* all fit.
*/
static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
{
if (len > 0)
len += scnprintf(buf + len, buflen - len, ",");
if (rbot == rtop)
len += scnprintf(buf + len, buflen - len, "%d", rbot);
else
len += scnprintf(buf + len, buflen - len, "%d-%d", rbot, rtop);
return len;
}
/**
* bitmap_scnlistprintf - convert bitmap to list format ASCII string
* @buf: byte buffer into which string is placed
* @buflen: reserved size of @buf, in bytes
* @maskp: pointer to bitmap to convert
* @nmaskbits: size of bitmap, in bits
*
* Output format is a comma-separated list of decimal numbers and
* ranges. Consecutively set bits are shown as two hyphen-separated
* decimal numbers, the smallest and largest bit numbers set in
* the range. Output format is compatible with the format
* accepted as input by bitmap_parselist().
*
* The return value is the number of characters which would be
* generated for the given input, excluding the trailing '\0', as
* per ISO C99.
*/
int bitmap_scnlistprintf(char *buf, unsigned int buflen,
const unsigned long *maskp, int nmaskbits)
{
int len = 0;
/* current bit is 'cur', most recently seen range is [rbot, rtop] */
int cur, rbot, rtop;
if (buflen == 0)
return 0;
buf[0] = 0;
rbot = cur = find_first_bit(maskp, nmaskbits);
while (cur < nmaskbits) {
rtop = cur;
cur = find_next_bit(maskp, nmaskbits, cur+1);
if (cur >= nmaskbits || cur > rtop + 1) {
len = bscnl_emit(buf, buflen, rbot, rtop, len);
rbot = cur;
}
}
return len;
}
EXPORT_SYMBOL(bitmap_scnlistprintf);
/**
* __bitmap_parselist - convert list format ASCII string to bitmap
* @buf: read nul-terminated user string from this buffer
* @buflen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
* @is_user: location of buffer, 0 indicates kernel space
* @maskp: write resulting mask here
* @nmaskbits: number of bits in mask to be written
*
* Input format is a comma-separated list of decimal numbers and
* ranges. Consecutively set bits are shown as two hyphen-separated
* decimal numbers, the smallest and largest bit numbers set in
* the range.
*
* Returns 0 on success, -errno on invalid input strings.
* Error values:
* %-EINVAL: second number in range smaller than first
* %-EINVAL: invalid character in string
* %-ERANGE: bit number specified too large for mask
*/
static int __bitmap_parselist(const char *buf, unsigned int buflen,
int is_user, unsigned long *maskp,
int nmaskbits)
{
unsigned a, b;
int c, old_c, totaldigits;
const char __user __force *ubuf = (const char __user __force *)buf;
int exp_digit, in_range;
totaldigits = c = 0;
bitmap_zero(maskp, nmaskbits);
do {
exp_digit = 1;
in_range = 0;
a = b = 0;
/* Get the next cpu# or a range of cpu#'s */
while (buflen) {
old_c = c;
if (is_user) {
if (__get_user(c, ubuf++))
return -EFAULT;
} else
c = *buf++;
buflen--;
if (isspace(c))
continue;
/*
* If the last character was a space and the current
* character isn't '\0', we've got embedded whitespace.
* This is a no-no, so throw an error.
*/
if (totaldigits && c && isspace(old_c))
return -EINVAL;
/* A '\0' or a ',' signal the end of a cpu# or range */
if (c == '\0' || c == ',')
break;
if (c == '-') {
if (exp_digit || in_range)
return -EINVAL;
b = 0;
in_range = 1;
exp_digit = 1;
continue;
}
if (!isdigit(c))
return -EINVAL;
b = b * 10 + (c - '0');
if (!in_range)
a = b;
exp_digit = 0;
totaldigits++;
}
if (!(a <= b))
return -EINVAL;
if (b >= nmaskbits)
return -ERANGE;
while (a <= b) {
set_bit(a, maskp);
a++;
}
} while (buflen && c == ',');
return 0;
}
int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
{
char *nl = strchr(bp, '\n');
int len;
if (nl)
len = nl - bp;
else
len = strlen(bp);
return __bitmap_parselist(bp, len, 0, maskp, nmaskbits);
}
EXPORT_SYMBOL(bitmap_parselist);
/**
* bitmap_parselist_user()
*
* @ubuf: pointer to user buffer containing string.
* @ulen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
* @maskp: pointer to bitmap array that will contain result.
* @nmaskbits: size of bitmap, in bits.
*
* Wrapper for bitmap_parselist(), providing it with user buffer.
*
* We cannot have this as an inline function in bitmap.h because it needs
* linux/uaccess.h to get the access_ok() declaration and this causes
* cyclic dependencies.
*/
int bitmap_parselist_user(const char __user *ubuf,
unsigned int ulen, unsigned long *maskp,
int nmaskbits)
{
if (!access_ok(VERIFY_READ, ubuf, ulen))
return -EFAULT;
return __bitmap_parselist((const char __force *)ubuf,
ulen, 1, maskp, nmaskbits);
}
EXPORT_SYMBOL(bitmap_parselist_user);
/**
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
* @buf: pointer to a bitmap
* @pos: a bit position in @buf (0 <= @pos < @bits)
* @bits: number of valid bit positions in @buf
*
* Map the bit at position @pos in @buf (of length @bits) to the
* ordinal of which set bit it is. If it is not set or if @pos
* is not a valid bit position, map to -1.
*
* If for example, just bits 4 through 7 are set in @buf, then @pos
* values 4 through 7 will get mapped to 0 through 3, respectively,
* and other @pos values will get mapped to 0. When @pos value 7
* gets mapped to (returns) @ord value 3 in this example, that means
* that bit 7 is the 3rd (starting with 0th) set bit in @buf.
*
* The bit positions 0 through @bits are valid positions in @buf.
*/
static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
{
int i, ord;
if (pos < 0 || pos >= bits || !test_bit(pos, buf))
return -1;
i = find_first_bit(buf, bits);
ord = 0;
while (i < pos) {
i = find_next_bit(buf, bits, i + 1);
ord++;
}
BUG_ON(i != pos);
return ord;
}
/**
* bitmap_ord_to_pos - find position of n-th set bit in bitmap
* @buf: pointer to bitmap
* @ord: ordinal bit position (n-th set bit, n >= 0)
* @bits: number of valid bit positions in @buf
*
* Map the ordinal offset of bit @ord in @buf to its position in @buf.
* Value of @ord should be in range 0 <= @ord < weight(buf), else
* results are undefined.
*
* If for example, just bits 4 through 7 are set in @buf, then @ord
* values 0 through 3 will get mapped to 4 through 7, respectively,
* and all other @ord values return undefined values. When @ord value 3
* gets mapped to (returns) @pos value 7 in this example, that means
* that the 3rd set bit (starting with 0th) is at position 7 in @buf.
*
* The bit positions 0 through @bits are valid positions in @buf.
*/
int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
{
int pos = 0;
if (ord >= 0 && ord < bits) {
int i;
for (i = find_first_bit(buf, bits);
i < bits && ord > 0;
i = find_next_bit(buf, bits, i + 1))
ord--;
if (i < bits && ord == 0)
pos = i;
}
return pos;
}
/**
* bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap
* @dst: remapped result
* @src: subset to be remapped
* @old: defines domain of map
* @new: defines range of map
* @bits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
* to the n-th set bit in @new. In the more general case, allowing
* for the possibility that the weight 'w' of @new is less than the
* weight of @old, map the position of the n-th set bit in @old to
* the position of the m-th set bit in @new, where m == n % w.
*
* If either of the @old and @new bitmaps are empty, or if @src and
* @dst point to the same location, then this routine copies @src
* to @dst.
*
* The positions of unset bits in @old are mapped to themselves
* (the identify map).
*
* Apply the above specified mapping to @src, placing the result in
* @dst, clearing any bits previously set in @dst.
*
* For example, lets say that @old has bits 4 through 7 set, and
* @new has bits 12 through 15 set. This defines the mapping of bit
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
* bit positions unchanged. So if say @src comes into this routine
* with bits 1, 5 and 7 set, then @dst should leave with bits 1,
* 13 and 15 set.
*/
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new,
int bits)
{
int oldbit, w;
if (dst == src) /* following doesn't handle inplace remaps */
return;
bitmap_zero(dst, bits);
w = bitmap_weight(new, bits);
for_each_set_bit(oldbit, src, bits) {
int n = bitmap_pos_to_ord(old, oldbit, bits);
if (n < 0 || w == 0)
set_bit(oldbit, dst); /* identity map */
else
set_bit(bitmap_ord_to_pos(new, n % w, bits), dst);
}
}
EXPORT_SYMBOL(bitmap_remap);
/**
* bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
* @oldbit: bit position to be mapped
* @old: defines domain of map
* @new: defines range of map
* @bits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
* to the n-th set bit in @new. In the more general case, allowing
* for the possibility that the weight 'w' of @new is less than the
* weight of @old, map the position of the n-th set bit in @old to
* the position of the m-th set bit in @new, where m == n % w.
*
* The positions of unset bits in @old are mapped to themselves
* (the identify map).
*
* Apply the above specified mapping to bit position @oldbit, returning
* the new bit position.
*
* For example, lets say that @old has bits 4 through 7 set, and
* @new has bits 12 through 15 set. This defines the mapping of bit
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
* bit positions unchanged. So if say @oldbit is 5, then this routine
* returns 13.
*/
int bitmap_bitremap(int oldbit, const unsigned long *old,
const unsigned long *new, int bits)
{
int w = bitmap_weight(new, bits);
int n = bitmap_pos_to_ord(old, oldbit, bits);
if (n < 0 || w == 0)
return oldbit;
else
return bitmap_ord_to_pos(new, n % w, bits);
}
EXPORT_SYMBOL(bitmap_bitremap);
/**
* bitmap_onto - translate one bitmap relative to another
* @dst: resulting translated bitmap
* @orig: original untranslated bitmap
* @relmap: bitmap relative to which translated
* @bits: number of bits in each of these bitmaps
*
* Set the n-th bit of @dst iff there exists some m such that the
* n-th bit of @relmap is set, the m-th bit of @orig is set, and
* the n-th bit of @relmap is also the m-th _set_ bit of @relmap.
* (If you understood the previous sentence the first time your
* read it, you're overqualified for your current job.)
*
* In other words, @orig is mapped onto (surjectively) @dst,
* using the the map { <n, m> | the n-th bit of @relmap is the
* m-th set bit of @relmap }.
*
* Any set bits in @orig above bit number W, where W is the
* weight of (number of set bits in) @relmap are mapped nowhere.
* In particular, if for all bits m set in @orig, m >= W, then
* @dst will end up empty. In situations where the possibility
* of such an empty result is not desired, one way to avoid it is
* to use the bitmap_fold() operator, below, to first fold the
* @orig bitmap over itself so that all its set bits x are in the
* range 0 <= x < W. The bitmap_fold() operator does this by
* setting the bit (m % W) in @dst, for each bit (m) set in @orig.
*
* Example [1] for bitmap_onto():
* Let's say @relmap has bits 30-39 set, and @orig has bits
* 1, 3, 5, 7, 9 and 11 set. Then on return from this routine,
* @dst will have bits 31, 33, 35, 37 and 39 set.
*
* When bit 0 is set in @orig, it means turn on the bit in
* @dst corresponding to whatever is the first bit (if any)
* that is turned on in @relmap. Since bit 0 was off in the
* above example, we leave off that bit (bit 30) in @dst.
*
* When bit 1 is set in @orig (as in the above example), it
* means turn on the bit in @dst corresponding to whatever
* is the second bit that is turned on in @relmap. The second
* bit in @relmap that was turned on in the above example was
* bit 31, so we turned on bit 31 in @dst.
*
* Similarly, we turned on bits 33, 35, 37 and 39 in @dst,
* because they were the 4th, 6th, 8th and 10th set bits
* set in @relmap, and the 4th, 6th, 8th and 10th bits of
* @orig (i.e. bits 3, 5, 7 and 9) were also set.
*
* When bit 11 is set in @orig, it means turn on the bit in
* @dst corresponding to whatever is the twelfth bit that is
* turned on in @relmap. In the above example, there were
* only ten bits turned on in @relmap (30..39), so that bit
* 11 was set in @orig had no affect on @dst.
*
* Example [2] for bitmap_fold() + bitmap_onto():
* Let's say @relmap has these ten bits set:
* 40 41 42 43 45 48 53 61 74 95
* (for the curious, that's 40 plus the first ten terms of the
* Fibonacci sequence.)
*
* Further lets say we use the following code, invoking
* bitmap_fold() then bitmap_onto, as suggested above to
* avoid the possitility of an empty @dst result:
*
* unsigned long *tmp; // a temporary bitmap's bits
*
* bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits);
* bitmap_onto(dst, tmp, relmap, bits);
*
* Then this table shows what various values of @dst would be, for
* various @orig's. I list the zero-based positions of each set bit.
* The tmp column shows the intermediate result, as computed by
* using bitmap_fold() to fold the @orig bitmap modulo ten
* (the weight of @relmap).
*
* @orig tmp @dst
* 0 0 40
* 1 1 41
* 9 9 95
* 10 0 40 (*)
* 1 3 5 7 1 3 5 7 41 43 48 61
* 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45
* 0 9 18 27 0 9 8 7 40 61 74 95
* 0 10 20 30 0 40
* 0 11 22 33 0 1 2 3 40 41 42 43
* 0 12 24 36 0 2 4 6 40 42 45 53
* 78 102 211 1 2 8 41 42 74 (*)
*
* (*) For these marked lines, if we hadn't first done bitmap_fold()
* into tmp, then the @dst result would have been empty.
*
* If either of @orig or @relmap is empty (no set bits), then @dst
* will be returned empty.
*
* If (as explained above) the only set bits in @orig are in positions
* m where m >= W, (where W is the weight of @relmap) then @dst will
* once again be returned empty.
*
* All bits in @dst not set by the above rule are cleared.
*/
void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, int bits)
{
int n, m; /* same meaning as in above comment */
if (dst == orig) /* following doesn't handle inplace mappings */
return;
bitmap_zero(dst, bits);
/*
* The following code is a more efficient, but less
* obvious, equivalent to the loop:
* for (m = 0; m < bitmap_weight(relmap, bits); m++) {
* n = bitmap_ord_to_pos(orig, m, bits);
* if (test_bit(m, orig))
* set_bit(n, dst);
* }
*/
m = 0;
for_each_set_bit(n, relmap, bits) {
/* m == bitmap_pos_to_ord(relmap, n, bits) */
if (test_bit(m, orig))
set_bit(n, dst);
m++;
}
}
EXPORT_SYMBOL(bitmap_onto);
/**
* bitmap_fold - fold larger bitmap into smaller, modulo specified size
* @dst: resulting smaller bitmap
* @orig: original larger bitmap
* @sz: specified size
* @bits: number of bits in each of these bitmaps
*
* For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
* Clear all other bits in @dst. See further the comment and
* Example [2] for bitmap_onto() for why and how to use this.
*/
void bitmap_fold(unsigned long *dst, const unsigned long *orig,
int sz, int bits)
{
int oldbit;
if (dst == orig) /* following doesn't handle inplace mappings */
return;
bitmap_zero(dst, bits);
for_each_set_bit(oldbit, orig, bits)
set_bit(oldbit % sz, dst);
}
EXPORT_SYMBOL(bitmap_fold);
/*
* Common code for bitmap_*_region() routines.
* bitmap: array of unsigned longs corresponding to the bitmap
* pos: the beginning of the region
* order: region size (log base 2 of number of bits)
* reg_op: operation(s) to perform on that region of bitmap
*
* Can set, verify and/or release a region of bits in a bitmap,
* depending on which combination of REG_OP_* flag bits is set.
*
* A region of a bitmap is a sequence of bits in the bitmap, of
* some size '1 << order' (a power of two), aligned to that same
* '1 << order' power of two.
*
* Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits).
* Returns 0 in all other cases and reg_ops.
*/
enum {
REG_OP_ISFREE, /* true if region is all zero bits */
REG_OP_ALLOC, /* set all bits in region */
REG_OP_RELEASE, /* clear all bits in region */
};
static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op)
{
int nbits_reg; /* number of bits in region */
int index; /* index first long of region in bitmap */
int offset; /* bit offset region in bitmap[index] */
int nlongs_reg; /* num longs spanned by region in bitmap */
int nbitsinlong; /* num bits of region in each spanned long */
unsigned long mask; /* bitmask for one long of region */
int i; /* scans bitmap by longs */
int ret = 0; /* return value */
/*
* Either nlongs_reg == 1 (for small orders that fit in one long)
* or (offset == 0 && mask == ~0UL) (for larger multiword orders.)
*/
nbits_reg = 1 << order;
index = pos / BITS_PER_LONG;
offset = pos - (index * BITS_PER_LONG);
nlongs_reg = BITS_TO_LONGS(nbits_reg);
nbitsinlong = min(nbits_reg, BITS_PER_LONG);
/*
* Can't do "mask = (1UL << nbitsinlong) - 1", as that
* overflows if nbitsinlong == BITS_PER_LONG.
*/
mask = (1UL << (nbitsinlong - 1));
mask += mask - 1;
mask <<= offset;
switch (reg_op) {
case REG_OP_ISFREE:
for (i = 0; i < nlongs_reg; i++) {
if (bitmap[index + i] & mask)
goto done;
}
ret = 1; /* all bits in region free (zero) */
break;
case REG_OP_ALLOC:
for (i = 0; i < nlongs_reg; i++)
bitmap[index + i] |= mask;
break;
case REG_OP_RELEASE:
for (i = 0; i < nlongs_reg; i++)
bitmap[index + i] &= ~mask;
break;
}
done:
return ret;
}
/**
* bitmap_find_free_region - find a contiguous aligned mem region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @bits: number of bits in the bitmap
* @order: region size (log base 2 of number of bits) to find
*
* Find a region of free (zero) bits in a @bitmap of @bits bits and
* allocate them (set them to one). Only consider regions of length
* a power (@order) of two, aligned to that power of two, which
* makes the search algorithm much faster.
*
* Return the bit offset in bitmap of the allocated region,
* or -errno on failure.
*/
int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
{
int pos, end; /* scans bitmap by regions of size order */
for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) {
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
continue;
__reg_op(bitmap, pos, order, REG_OP_ALLOC);
return pos;
}
return -ENOMEM;
}
EXPORT_SYMBOL(bitmap_find_free_region);
/**
* bitmap_release_region - release allocated bitmap region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @pos: beginning of bit region to release
* @order: region size (log base 2 of number of bits) to release
*
* This is the complement to __bitmap_find_free_region() and releases
* the found region (by clearing it in the bitmap).
*
* No return value.
*/
void bitmap_release_region(unsigned long *bitmap, int pos, int order)
{
__reg_op(bitmap, pos, order, REG_OP_RELEASE);
}
EXPORT_SYMBOL(bitmap_release_region);
/**
* bitmap_allocate_region - allocate bitmap region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @pos: beginning of bit region to allocate
* @order: region size (log base 2 of number of bits) to allocate
*
* Allocate (set bits in) a specified region of a bitmap.
*
* Return 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
{
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
return -EBUSY;
__reg_op(bitmap, pos, order, REG_OP_ALLOC);
return 0;
}
EXPORT_SYMBOL(bitmap_allocate_region);
/**
* bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
* @dst: destination buffer
* @src: bitmap to copy
* @nbits: number of bits in the bitmap
*
* Require nbits % BITS_PER_LONG == 0.
*/
void bitmap_copy_le(void *dst, const unsigned long *src, int nbits)
{
unsigned long *d = dst;
int i;
for (i = 0; i < nbits/BITS_PER_LONG; i++) {
if (BITS_PER_LONG == 64)
d[i] = cpu_to_le64(src[i]);
else
d[i] = cpu_to_le32(src[i]);
}
}
EXPORT_SYMBOL(bitmap_copy_le);
| gpl-2.0 |
MinimalOS-CAF/kernel_huawei_angler | arch/um/kernel/ksyms.c | 3583 | 1159 | /*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <linux/module.h>
#include <os.h>
EXPORT_SYMBOL(set_signals);
EXPORT_SYMBOL(get_signals);
EXPORT_SYMBOL(os_stat_fd);
EXPORT_SYMBOL(os_stat_file);
EXPORT_SYMBOL(os_access);
EXPORT_SYMBOL(os_set_exec_close);
EXPORT_SYMBOL(os_getpid);
EXPORT_SYMBOL(os_open_file);
EXPORT_SYMBOL(os_read_file);
EXPORT_SYMBOL(os_write_file);
EXPORT_SYMBOL(os_seek_file);
EXPORT_SYMBOL(os_lock_file);
EXPORT_SYMBOL(os_ioctl_generic);
EXPORT_SYMBOL(os_pipe);
EXPORT_SYMBOL(os_file_type);
EXPORT_SYMBOL(os_file_mode);
EXPORT_SYMBOL(os_file_size);
EXPORT_SYMBOL(os_flush_stdout);
EXPORT_SYMBOL(os_close_file);
EXPORT_SYMBOL(os_set_fd_async);
EXPORT_SYMBOL(os_set_fd_block);
EXPORT_SYMBOL(helper_wait);
EXPORT_SYMBOL(os_shutdown_socket);
EXPORT_SYMBOL(os_create_unix_socket);
EXPORT_SYMBOL(os_connect_socket);
EXPORT_SYMBOL(os_accept_connection);
EXPORT_SYMBOL(os_rcv_fd);
EXPORT_SYMBOL(run_helper);
EXPORT_SYMBOL(os_major);
EXPORT_SYMBOL(os_minor);
EXPORT_SYMBOL(os_makedev);
EXPORT_SYMBOL(add_sigio_fd);
EXPORT_SYMBOL(ignore_sigio_fd);
EXPORT_SYMBOL(sigio_broken);
| gpl-2.0 |
sev3n85/android_kernel_samsung_s3ve3g | net/netfilter/nf_log.c | 4607 | 7370 | #include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <linux/seq_file.h>
#include <net/protocol.h>
#include <net/netfilter/nf_log.h>
#include "nf_internals.h"
/* Internal logging interface, which relies on the real
LOG target modules */
#define NF_LOG_PREFIXLEN 128
#define NFLOGGER_NAME_LEN 64
static const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO] __read_mostly;
static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly;
static DEFINE_MUTEX(nf_log_mutex);
static struct nf_logger *__find_logger(int pf, const char *str_logger)
{
struct nf_logger *t;
list_for_each_entry(t, &nf_loggers_l[pf], list[pf]) {
if (!strnicmp(str_logger, t->name, strlen(t->name)))
return t;
}
return NULL;
}
/* return EEXIST if the same logger is registred, 0 on success. */
int nf_log_register(u_int8_t pf, struct nf_logger *logger)
{
const struct nf_logger *llog;
int i;
if (pf >= ARRAY_SIZE(nf_loggers))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(logger->list); i++)
INIT_LIST_HEAD(&logger->list[i]);
mutex_lock(&nf_log_mutex);
if (pf == NFPROTO_UNSPEC) {
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
list_add_tail(&(logger->list[i]), &(nf_loggers_l[i]));
} else {
/* register at end of list to honor first register win */
list_add_tail(&logger->list[pf], &nf_loggers_l[pf]);
llog = rcu_dereference_protected(nf_loggers[pf],
lockdep_is_held(&nf_log_mutex));
if (llog == NULL)
rcu_assign_pointer(nf_loggers[pf], logger);
}
mutex_unlock(&nf_log_mutex);
return 0;
}
EXPORT_SYMBOL(nf_log_register);
void nf_log_unregister(struct nf_logger *logger)
{
const struct nf_logger *c_logger;
int i;
mutex_lock(&nf_log_mutex);
for (i = 0; i < ARRAY_SIZE(nf_loggers); i++) {
c_logger = rcu_dereference_protected(nf_loggers[i],
lockdep_is_held(&nf_log_mutex));
if (c_logger == logger)
RCU_INIT_POINTER(nf_loggers[i], NULL);
list_del(&logger->list[i]);
}
mutex_unlock(&nf_log_mutex);
synchronize_rcu();
}
EXPORT_SYMBOL(nf_log_unregister);
int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
{
if (pf >= ARRAY_SIZE(nf_loggers))
return -EINVAL;
mutex_lock(&nf_log_mutex);
if (__find_logger(pf, logger->name) == NULL) {
mutex_unlock(&nf_log_mutex);
return -ENOENT;
}
rcu_assign_pointer(nf_loggers[pf], logger);
mutex_unlock(&nf_log_mutex);
return 0;
}
EXPORT_SYMBOL(nf_log_bind_pf);
void nf_log_unbind_pf(u_int8_t pf)
{
if (pf >= ARRAY_SIZE(nf_loggers))
return;
mutex_lock(&nf_log_mutex);
RCU_INIT_POINTER(nf_loggers[pf], NULL);
mutex_unlock(&nf_log_mutex);
}
EXPORT_SYMBOL(nf_log_unbind_pf);
void nf_log_packet(u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *fmt, ...)
{
va_list args;
char prefix[NF_LOG_PREFIXLEN];
const struct nf_logger *logger;
rcu_read_lock();
logger = rcu_dereference(nf_loggers[pf]);
if (logger) {
va_start(args, fmt);
vsnprintf(prefix, sizeof(prefix), fmt, args);
va_end(args);
logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix);
}
rcu_read_unlock();
}
EXPORT_SYMBOL(nf_log_packet);
#ifdef CONFIG_PROC_FS
static void *seq_start(struct seq_file *seq, loff_t *pos)
{
mutex_lock(&nf_log_mutex);
if (*pos >= ARRAY_SIZE(nf_loggers))
return NULL;
return pos;
}
static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
{
(*pos)++;
if (*pos >= ARRAY_SIZE(nf_loggers))
return NULL;
return pos;
}
static void seq_stop(struct seq_file *s, void *v)
{
mutex_unlock(&nf_log_mutex);
}
static int seq_show(struct seq_file *s, void *v)
{
loff_t *pos = v;
const struct nf_logger *logger;
struct nf_logger *t;
int ret;
logger = rcu_dereference_protected(nf_loggers[*pos],
lockdep_is_held(&nf_log_mutex));
if (!logger)
ret = seq_printf(s, "%2lld NONE (", *pos);
else
ret = seq_printf(s, "%2lld %s (", *pos, logger->name);
if (ret < 0)
return ret;
list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) {
ret = seq_printf(s, "%s", t->name);
if (ret < 0)
return ret;
if (&t->list[*pos] != nf_loggers_l[*pos].prev) {
ret = seq_printf(s, ",");
if (ret < 0)
return ret;
}
}
return seq_printf(s, ")\n");
}
static const struct seq_operations nflog_seq_ops = {
.start = seq_start,
.next = seq_next,
.stop = seq_stop,
.show = seq_show,
};
static int nflog_open(struct inode *inode, struct file *file)
{
return seq_open(file, &nflog_seq_ops);
}
static const struct file_operations nflog_file_ops = {
.owner = THIS_MODULE,
.open = nflog_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* PROC_FS */
#ifdef CONFIG_SYSCTL
static struct ctl_path nf_log_sysctl_path[] = {
{ .procname = "net", },
{ .procname = "netfilter", },
{ .procname = "nf_log", },
{ }
};
static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
static struct ctl_table_header *nf_log_dir_header;
static int nf_log_proc_dostring(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
const struct nf_logger *logger;
char buf[NFLOGGER_NAME_LEN];
size_t size = *lenp;
int r = 0;
int tindex = (unsigned long)table->extra1;
if (write) {
if (size > sizeof(buf))
size = sizeof(buf);
if (copy_from_user(buf, buffer, size))
return -EFAULT;
if (!strcmp(buf, "NONE")) {
nf_log_unbind_pf(tindex);
return 0;
}
mutex_lock(&nf_log_mutex);
logger = __find_logger(tindex, buf);
if (logger == NULL) {
mutex_unlock(&nf_log_mutex);
return -ENOENT;
}
rcu_assign_pointer(nf_loggers[tindex], logger);
mutex_unlock(&nf_log_mutex);
} else {
mutex_lock(&nf_log_mutex);
logger = rcu_dereference_protected(nf_loggers[tindex],
lockdep_is_held(&nf_log_mutex));
if (!logger)
table->data = "NONE";
else
table->data = logger->name;
r = proc_dostring(table, write, buffer, lenp, ppos);
mutex_unlock(&nf_log_mutex);
}
return r;
}
static __init int netfilter_log_sysctl_init(void)
{
int i;
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) {
snprintf(nf_log_sysctl_fnames[i-NFPROTO_UNSPEC], 3, "%d", i);
nf_log_sysctl_table[i].procname =
nf_log_sysctl_fnames[i-NFPROTO_UNSPEC];
nf_log_sysctl_table[i].data = NULL;
nf_log_sysctl_table[i].maxlen =
NFLOGGER_NAME_LEN * sizeof(char);
nf_log_sysctl_table[i].mode = 0644;
nf_log_sysctl_table[i].proc_handler = nf_log_proc_dostring;
nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i;
}
nf_log_dir_header = register_sysctl_paths(nf_log_sysctl_path,
nf_log_sysctl_table);
if (!nf_log_dir_header)
return -ENOMEM;
return 0;
}
#else
static __init int netfilter_log_sysctl_init(void)
{
return 0;
}
#endif /* CONFIG_SYSCTL */
int __init netfilter_log_init(void)
{
int i, r;
#ifdef CONFIG_PROC_FS
if (!proc_create("nf_log", S_IRUGO,
proc_net_netfilter, &nflog_file_ops))
return -1;
#endif
/* Errors will trigger panic, unroll on error is unnecessary. */
r = netfilter_log_sysctl_init();
if (r < 0)
return r;
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
INIT_LIST_HEAD(&(nf_loggers_l[i]));
return 0;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.