repo_name
string
path
string
copies
string
size
string
content
string
license
string
nazunamoe/android_kernel_lge_v4xx
arch/arm/mach-mmp/mmp2.c
4866
6769
/* * linux/arch/arm/mach-mmp/mmp2.c * * code name MMP2 * * Copyright (C) 2009 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_device.h> #include <asm/hardware/cache-tauros2.h> #include <asm/mach/time.h> #include <mach/addr-map.h> #include <mach/regs-apbc.h> #include <mach/regs-apmu.h> #include <mach/cputype.h> #include <mach/irqs.h> #include <mach/dma.h> #include <mach/mfp.h> #include <mach/devices.h> #include <mach/mmp2.h> #include "common.h" #include "clock.h" #define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000) static struct mfp_addr_map mmp2_addr_map[] __initdata = { MFP_ADDR_X(GPIO0, GPIO58, 0x54), MFP_ADDR_X(GPIO59, GPIO73, 0x280), MFP_ADDR_X(GPIO74, GPIO101, 0x170), MFP_ADDR(GPIO102, 0x0), MFP_ADDR(GPIO103, 0x4), MFP_ADDR(GPIO104, 0x1fc), MFP_ADDR(GPIO105, 0x1f8), MFP_ADDR(GPIO106, 0x1f4), MFP_ADDR(GPIO107, 0x1f0), MFP_ADDR(GPIO108, 0x21c), MFP_ADDR(GPIO109, 0x218), MFP_ADDR(GPIO110, 0x214), MFP_ADDR(GPIO111, 0x200), MFP_ADDR(GPIO112, 0x244), MFP_ADDR(GPIO113, 0x25c), MFP_ADDR(GPIO114, 0x164), MFP_ADDR_X(GPIO115, GPIO122, 0x260), MFP_ADDR(GPIO123, 0x148), MFP_ADDR_X(GPIO124, GPIO141, 0xc), MFP_ADDR(GPIO142, 0x8), MFP_ADDR_X(GPIO143, GPIO151, 0x220), MFP_ADDR_X(GPIO152, GPIO153, 0x248), MFP_ADDR_X(GPIO154, GPIO155, 0x254), MFP_ADDR_X(GPIO156, GPIO159, 0x14c), MFP_ADDR(GPIO160, 0x250), MFP_ADDR(GPIO161, 0x210), MFP_ADDR(GPIO162, 0x20c), MFP_ADDR(GPIO163, 0x208), MFP_ADDR(GPIO164, 0x204), MFP_ADDR(GPIO165, 0x1ec), MFP_ADDR(GPIO166, 0x1e8), MFP_ADDR(GPIO167, 0x1e4), MFP_ADDR(GPIO168, 0x1e0), MFP_ADDR_X(TWSI1_SCL, TWSI1_SDA, 0x140), MFP_ADDR_X(TWSI4_SCL, TWSI4_SDA, 0x2bc), MFP_ADDR(PMIC_INT, 0x2c4), MFP_ADDR(CLK_REQ, 0x160), MFP_ADDR_END, }; void mmp2_clear_pmic_int(void) { void __iomem *mfpr_pmic; unsigned long data; mfpr_pmic = APB_VIRT_BASE + 0x1e000 + 0x2c4; data = __raw_readl(mfpr_pmic); __raw_writel(data | (1 << 6), mfpr_pmic); __raw_writel(data, mfpr_pmic); } void __init mmp2_init_irq(void) { mmp2_init_icu(); } static void sdhc_clk_enable(struct clk *clk) { uint32_t clk_rst; clk_rst = __raw_readl(clk->clk_rst); clk_rst |= clk->enable_val; __raw_writel(clk_rst, clk->clk_rst); } static void sdhc_clk_disable(struct clk *clk) { uint32_t clk_rst; clk_rst = __raw_readl(clk->clk_rst); clk_rst &= ~clk->enable_val; __raw_writel(clk_rst, clk->clk_rst); } struct clkops sdhc_clk_ops = { .enable = sdhc_clk_enable, .disable = sdhc_clk_disable, }; /* APB peripheral clocks */ static APBC_CLK(uart1, MMP2_UART1, 1, 26000000); static APBC_CLK(uart2, MMP2_UART2, 1, 26000000); static APBC_CLK(uart3, MMP2_UART3, 1, 26000000); static APBC_CLK(uart4, MMP2_UART4, 1, 26000000); static APBC_CLK(twsi1, MMP2_TWSI1, 0, 26000000); static APBC_CLK(twsi2, MMP2_TWSI2, 0, 26000000); static APBC_CLK(twsi3, MMP2_TWSI3, 0, 26000000); static APBC_CLK(twsi4, MMP2_TWSI4, 0, 26000000); static APBC_CLK(twsi5, MMP2_TWSI5, 0, 26000000); static APBC_CLK(twsi6, MMP2_TWSI6, 0, 26000000); static APBC_CLK(gpio, MMP2_GPIO, 0, 26000000); static APMU_CLK(nand, NAND, 0xbf, 100000000); static APMU_CLK_OPS(sdh0, SDH0, 0x1b, 200000000, &sdhc_clk_ops); static APMU_CLK_OPS(sdh1, SDH1, 0x1b, 200000000, &sdhc_clk_ops); static APMU_CLK_OPS(sdh2, SDH2, 0x1b, 200000000, &sdhc_clk_ops); static APMU_CLK_OPS(sdh3, SDH3, 0x1b, 200000000, &sdhc_clk_ops); static struct clk_lookup mmp2_clkregs[] = { INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL), INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL), INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL), INIT_CLKREG(&clk_uart4, "pxa2xx-uart.3", NULL), INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.0", NULL), INIT_CLKREG(&clk_twsi2, "pxa2xx-i2c.1", NULL), INIT_CLKREG(&clk_twsi3, "pxa2xx-i2c.2", NULL), INIT_CLKREG(&clk_twsi4, "pxa2xx-i2c.3", NULL), INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL), INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL), INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL), INIT_CLKREG(&clk_gpio, "pxa-gpio", NULL), INIT_CLKREG(&clk_sdh0, "sdhci-pxav3.0", "PXA-SDHCLK"), INIT_CLKREG(&clk_sdh1, "sdhci-pxav3.1", "PXA-SDHCLK"), INIT_CLKREG(&clk_sdh2, "sdhci-pxav3.2", "PXA-SDHCLK"), INIT_CLKREG(&clk_sdh3, "sdhci-pxav3.3", "PXA-SDHCLK"), }; static int __init mmp2_init(void) { if (cpu_is_mmp2()) { #ifdef CONFIG_CACHE_TAUROS2 tauros2_init(); #endif mfp_init_base(MFPR_VIRT_BASE); mfp_init_addr(mmp2_addr_map); pxa_init_dma(IRQ_MMP2_DMA_RIQ, 16); clkdev_add_table(ARRAY_AND_SIZE(mmp2_clkregs)); } return 0; } postcore_initcall(mmp2_init); static void __init mmp2_timer_init(void) { unsigned long clk_rst; __raw_writel(APBC_APBCLK | APBC_RST, APBC_MMP2_TIMERS); /* * enable bus/functional clock, enable 6.5MHz (divider 4), * release reset */ clk_rst = APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(1); __raw_writel(clk_rst, APBC_MMP2_TIMERS); timer_init(IRQ_MMP2_TIMER1); } struct sys_timer mmp2_timer = { .init = mmp2_timer_init, }; /* on-chip devices */ MMP2_DEVICE(uart1, "pxa2xx-uart", 0, UART1, 0xd4030000, 0x30, 4, 5); MMP2_DEVICE(uart2, "pxa2xx-uart", 1, UART2, 0xd4017000, 0x30, 20, 21); MMP2_DEVICE(uart3, "pxa2xx-uart", 2, UART3, 0xd4018000, 0x30, 22, 23); MMP2_DEVICE(uart4, "pxa2xx-uart", 3, UART4, 0xd4016000, 0x30, 18, 19); MMP2_DEVICE(twsi1, "pxa2xx-i2c", 0, TWSI1, 0xd4011000, 0x70); MMP2_DEVICE(twsi2, "pxa2xx-i2c", 1, TWSI2, 0xd4031000, 0x70); MMP2_DEVICE(twsi3, "pxa2xx-i2c", 2, TWSI3, 0xd4032000, 0x70); MMP2_DEVICE(twsi4, "pxa2xx-i2c", 3, TWSI4, 0xd4033000, 0x70); MMP2_DEVICE(twsi5, "pxa2xx-i2c", 4, TWSI5, 0xd4033800, 0x70); MMP2_DEVICE(twsi6, "pxa2xx-i2c", 5, TWSI6, 0xd4034000, 0x70); MMP2_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x100, 28, 29); MMP2_DEVICE(sdh0, "sdhci-pxav3", 0, MMC, 0xd4280000, 0x120); MMP2_DEVICE(sdh1, "sdhci-pxav3", 1, MMC2, 0xd4280800, 0x120); MMP2_DEVICE(sdh2, "sdhci-pxav3", 2, MMC3, 0xd4281000, 0x120); MMP2_DEVICE(sdh3, "sdhci-pxav3", 3, MMC4, 0xd4281800, 0x120); MMP2_DEVICE(asram, "asram", -1, NONE, 0xe0000000, 0x4000); /* 0xd1000000 ~ 0xd101ffff is reserved for secure processor */ MMP2_DEVICE(isram, "isram", -1, NONE, 0xd1020000, 0x18000); struct resource mmp2_resource_gpio[] = { { .start = 0xd4019000, .end = 0xd4019fff, .flags = IORESOURCE_MEM, }, { .start = IRQ_MMP2_GPIO, .end = IRQ_MMP2_GPIO, .name = "gpio_mux", .flags = IORESOURCE_IRQ, }, }; struct platform_device mmp2_device_gpio = { .name = "pxa-gpio", .id = -1, .num_resources = ARRAY_SIZE(mmp2_resource_gpio), .resource = mmp2_resource_gpio, };
gpl-2.0
curbthepain/revkernel_us990
drivers/media/video/s5p-mfc/s5p_mfc_intr.c
5634
2505
/* * drivers/media/video/samsung/mfc5/s5p_mfc_intr.c * * C file for Samsung MFC (Multi Function Codec - FIMV) driver * This file contains functions used to wait for command completion. * * Kamil Debski, Copyright (C) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/sched.h> #include <linux/wait.h> #include "regs-mfc.h" #include "s5p_mfc_common.h" #include "s5p_mfc_debug.h" #include "s5p_mfc_intr.h" int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command) { int ret; ret = wait_event_interruptible_timeout(dev->queue, (dev->int_cond && (dev->int_type == command || dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET)), msecs_to_jiffies(MFC_INT_TIMEOUT)); if (ret == 0) { mfc_err("Interrupt (dev->int_type:%d, command:%d) timed out\n", dev->int_type, command); return 1; } else if (ret == -ERESTARTSYS) { mfc_err("Interrupted by a signal\n"); return 1; } mfc_debug(1, "Finished waiting (dev->int_type:%d, command: %d)\n", dev->int_type, command); if (dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET) return 1; return 0; } void s5p_mfc_clean_dev_int_flags(struct s5p_mfc_dev *dev) { dev->int_cond = 0; dev->int_type = 0; dev->int_err = 0; } int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx, int command, int interrupt) { int ret; if (interrupt) { ret = wait_event_interruptible_timeout(ctx->queue, (ctx->int_cond && (ctx->int_type == command || ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)), msecs_to_jiffies(MFC_INT_TIMEOUT)); } else { ret = wait_event_timeout(ctx->queue, (ctx->int_cond && (ctx->int_type == command || ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)), msecs_to_jiffies(MFC_INT_TIMEOUT)); } if (ret == 0) { mfc_err("Interrupt (ctx->int_type:%d, command:%d) timed out\n", ctx->int_type, command); return 1; } else if (ret == -ERESTARTSYS) { mfc_err("Interrupted by a signal\n"); return 1; } mfc_debug(1, "Finished waiting (ctx->int_type:%d, command: %d)\n", ctx->int_type, command); if (ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET) return 1; return 0; } void s5p_mfc_clean_ctx_int_flags(struct s5p_mfc_ctx *ctx) { ctx->int_cond = 0; ctx->int_type = 0; ctx->int_err = 0; }
gpl-2.0
lyfkevin/MIUIv5_iproj_kernel
net/bridge/br_ioctl.c
7682
8865
/* * Ioctl handler * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/capability.h> #include <linux/kernel.h> #include <linux/if_bridge.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/times.h> #include <net/net_namespace.h> #include <asm/uaccess.h> #include "br_private.h" /* called with RTNL */ static int get_bridge_ifindices(struct net *net, int *indices, int num) { struct net_device *dev; int i = 0; for_each_netdev(net, dev) { if (i >= num) break; if (dev->priv_flags & IFF_EBRIDGE) indices[i++] = dev->ifindex; } return i; } /* called with RTNL */ static void get_port_ifindices(struct net_bridge *br, int *ifindices, int num) { struct net_bridge_port *p; list_for_each_entry(p, &br->port_list, list) { if (p->port_no < num) ifindices[p->port_no] = p->dev->ifindex; } } /* * Format up to a page worth of forwarding table entries * userbuf -- where to copy result * maxnum -- maximum number of entries desired * (limited to a page for sanity) * offset -- number of records to skip */ static int get_fdb_entries(struct net_bridge *br, void __user *userbuf, unsigned long maxnum, unsigned long offset) { int num; void *buf; size_t size; /* Clamp size to PAGE_SIZE, test maxnum to avoid overflow */ if (maxnum > PAGE_SIZE/sizeof(struct __fdb_entry)) maxnum = PAGE_SIZE/sizeof(struct __fdb_entry); size = maxnum * sizeof(struct __fdb_entry); buf = kmalloc(size, GFP_USER); if (!buf) return -ENOMEM; num = br_fdb_fillbuf(br, buf, maxnum, offset); if (num > 0) { if (copy_to_user(userbuf, buf, num*sizeof(struct __fdb_entry))) num = -EFAULT; } kfree(buf); return num; } /* called with RTNL */ static int add_del_if(struct net_bridge *br, int ifindex, int isadd) { struct net_device *dev; int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; dev = __dev_get_by_index(dev_net(br->dev), ifindex); if (dev == NULL) return -EINVAL; if (isadd) ret = br_add_if(br, dev); else ret = br_del_if(br, dev); return ret; } /* * Legacy ioctl's through SIOCDEVPRIVATE * This interface is deprecated because it was too difficult to * to do the translation for 32/64bit ioctl compatibility. */ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct net_bridge *br = netdev_priv(dev); unsigned long args[4]; if (copy_from_user(args, rq->ifr_data, sizeof(args))) return -EFAULT; switch (args[0]) { case BRCTL_ADD_IF: case BRCTL_DEL_IF: return add_del_if(br, args[1], args[0] == BRCTL_ADD_IF); case BRCTL_GET_BRIDGE_INFO: { struct __bridge_info b; memset(&b, 0, sizeof(struct __bridge_info)); rcu_read_lock(); memcpy(&b.designated_root, &br->designated_root, 8); memcpy(&b.bridge_id, &br->bridge_id, 8); b.root_path_cost = br->root_path_cost; b.max_age = jiffies_to_clock_t(br->max_age); b.hello_time = jiffies_to_clock_t(br->hello_time); b.forward_delay = br->forward_delay; b.bridge_max_age = br->bridge_max_age; b.bridge_hello_time = br->bridge_hello_time; b.bridge_forward_delay = jiffies_to_clock_t(br->bridge_forward_delay); b.topology_change = br->topology_change; b.topology_change_detected = br->topology_change_detected; b.root_port = br->root_port; b.stp_enabled = (br->stp_enabled != BR_NO_STP); b.ageing_time = jiffies_to_clock_t(br->ageing_time); b.hello_timer_value = br_timer_value(&br->hello_timer); b.tcn_timer_value = br_timer_value(&br->tcn_timer); b.topology_change_timer_value = br_timer_value(&br->topology_change_timer); b.gc_timer_value = br_timer_value(&br->gc_timer); rcu_read_unlock(); if (copy_to_user((void __user *)args[1], &b, sizeof(b))) return -EFAULT; return 0; } case BRCTL_GET_PORT_LIST: { int num, *indices; num = args[2]; if (num < 0) return -EINVAL; if (num == 0) num = 256; if (num > BR_MAX_PORTS) num = BR_MAX_PORTS; indices = kcalloc(num, sizeof(int), GFP_KERNEL); if (indices == NULL) return -ENOMEM; get_port_ifindices(br, indices, num); if (copy_to_user((void __user *)args[1], indices, num*sizeof(int))) num = -EFAULT; kfree(indices); return num; } case BRCTL_SET_BRIDGE_FORWARD_DELAY: if (!capable(CAP_NET_ADMIN)) return -EPERM; return br_set_forward_delay(br, args[1]); case BRCTL_SET_BRIDGE_HELLO_TIME: if (!capable(CAP_NET_ADMIN)) return -EPERM; return br_set_hello_time(br, args[1]); case BRCTL_SET_BRIDGE_MAX_AGE: if (!capable(CAP_NET_ADMIN)) return -EPERM; return br_set_max_age(br, args[1]); case BRCTL_SET_AGEING_TIME: if (!capable(CAP_NET_ADMIN)) return -EPERM; br->ageing_time = clock_t_to_jiffies(args[1]); return 0; case BRCTL_GET_PORT_INFO: { struct __port_info p; struct net_bridge_port *pt; rcu_read_lock(); if ((pt = br_get_port(br, args[2])) == NULL) { rcu_read_unlock(); return -EINVAL; } memset(&p, 0, sizeof(struct __port_info)); memcpy(&p.designated_root, &pt->designated_root, 8); memcpy(&p.designated_bridge, &pt->designated_bridge, 8); p.port_id = pt->port_id; p.designated_port = pt->designated_port; p.path_cost = pt->path_cost; p.designated_cost = pt->designated_cost; p.state = pt->state; p.top_change_ack = pt->topology_change_ack; p.config_pending = pt->config_pending; p.message_age_timer_value = br_timer_value(&pt->message_age_timer); p.forward_delay_timer_value = br_timer_value(&pt->forward_delay_timer); p.hold_timer_value = br_timer_value(&pt->hold_timer); rcu_read_unlock(); if (copy_to_user((void __user *)args[1], &p, sizeof(p))) return -EFAULT; return 0; } case BRCTL_SET_BRIDGE_STP_STATE: if (!capable(CAP_NET_ADMIN)) return -EPERM; br_stp_set_enabled(br, args[1]); return 0; case BRCTL_SET_BRIDGE_PRIORITY: if (!capable(CAP_NET_ADMIN)) return -EPERM; spin_lock_bh(&br->lock); br_stp_set_bridge_priority(br, args[1]); spin_unlock_bh(&br->lock); return 0; case BRCTL_SET_PORT_PRIORITY: { struct net_bridge_port *p; int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; spin_lock_bh(&br->lock); if ((p = br_get_port(br, args[1])) == NULL) ret = -EINVAL; else ret = br_stp_set_port_priority(p, args[2]); spin_unlock_bh(&br->lock); return ret; } case BRCTL_SET_PATH_COST: { struct net_bridge_port *p; int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; spin_lock_bh(&br->lock); if ((p = br_get_port(br, args[1])) == NULL) ret = -EINVAL; else ret = br_stp_set_path_cost(p, args[2]); spin_unlock_bh(&br->lock); return ret; } case BRCTL_GET_FDB_ENTRIES: return get_fdb_entries(br, (void __user *)args[1], args[2], args[3]); } return -EOPNOTSUPP; } static int old_deviceless(struct net *net, void __user *uarg) { unsigned long args[3]; if (copy_from_user(args, uarg, sizeof(args))) return -EFAULT; switch (args[0]) { case BRCTL_GET_VERSION: return BRCTL_VERSION; case BRCTL_GET_BRIDGES: { int *indices; int ret = 0; if (args[2] >= 2048) return -ENOMEM; indices = kcalloc(args[2], sizeof(int), GFP_KERNEL); if (indices == NULL) return -ENOMEM; args[2] = get_bridge_ifindices(net, indices, args[2]); ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int)) ? -EFAULT : args[2]; kfree(indices); return ret; } case BRCTL_ADD_BRIDGE: case BRCTL_DEL_BRIDGE: { char buf[IFNAMSIZ]; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(buf, (void __user *)args[1], IFNAMSIZ)) return -EFAULT; buf[IFNAMSIZ-1] = 0; if (args[0] == BRCTL_ADD_BRIDGE) return br_add_bridge(net, buf); return br_del_bridge(net, buf); } } return -EOPNOTSUPP; } int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uarg) { switch (cmd) { case SIOCGIFBR: case SIOCSIFBR: return old_deviceless(net, uarg); case SIOCBRADDBR: case SIOCBRDELBR: { char buf[IFNAMSIZ]; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(buf, uarg, IFNAMSIZ)) return -EFAULT; buf[IFNAMSIZ-1] = 0; if (cmd == SIOCBRADDBR) return br_add_bridge(net, buf); return br_del_bridge(net, buf); } } return -EOPNOTSUPP; } int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct net_bridge *br = netdev_priv(dev); switch(cmd) { case SIOCDEVPRIVATE: return old_dev_ioctl(dev, rq, cmd); case SIOCBRADDIF: case SIOCBRDELIF: return add_del_if(br, rq->ifr_ifindex, cmd == SIOCBRADDIF); } br_debug(br, "Bridge does not support ioctl 0x%x\n", cmd); return -EOPNOTSUPP; }
gpl-2.0
olexiyt/telechips-linux
scripts/kconfig/lxdialog/inputbox.c
9218
6054
/* * inputbox.c -- implements the input box * * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk) * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcap@cfw.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "dialog.h" char dialog_input_result[MAX_LEN + 1]; /* * Print the termination buttons */ static void print_buttons(WINDOW * dialog, int height, int width, int selected) { int x = width / 2 - 11; int y = height - 2; print_button(dialog, gettext(" Ok "), y, x, selected == 0); print_button(dialog, gettext(" Help "), y, x + 14, selected == 1); wmove(dialog, y, x + 1 + 14 * selected); wrefresh(dialog); } /* * Display a dialog box for inputing a string */ int dialog_inputbox(const char *title, const char *prompt, int height, int width, const char *init) { int i, x, y, box_y, box_x, box_width; int input_x = 0, scroll = 0, key = 0, button = -1; char *instr = dialog_input_result; WINDOW *dialog; if (!init) instr[0] = '\0'; else strcpy(instr, init); do_resize: if (getmaxy(stdscr) <= (height - 2)) return -ERRDISPLAYTOOSMALL; if (getmaxx(stdscr) <= (width - 2)) return -ERRDISPLAYTOOSMALL; /* center dialog box on screen */ x = (COLS - width) / 2; y = (LINES - height) / 2; draw_shadow(stdscr, y, x, height, width); dialog = newwin(height, width, y, x); keypad(dialog, TRUE); draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); wattrset(dialog, dlg.border.atr); mvwaddch(dialog, height - 3, 0, ACS_LTEE); for (i = 0; i < width - 2; i++) waddch(dialog, ACS_HLINE); wattrset(dialog, dlg.dialog.atr); waddch(dialog, ACS_RTEE); print_title(dialog, title, width); wattrset(dialog, dlg.dialog.atr); print_autowrap(dialog, prompt, width - 2, 1, 3); /* Draw the input field box */ box_width = width - 6; getyx(dialog, y, x); box_y = y + 2; box_x = (width - box_width) / 2; draw_box(dialog, y + 1, box_x - 1, 3, box_width + 2, dlg.dialog.atr, dlg.border.atr); print_buttons(dialog, height, width, 0); /* Set up the initial value */ wmove(dialog, box_y, box_x); wattrset(dialog, dlg.inputbox.atr); input_x = strlen(instr); if (input_x >= box_width) { scroll = input_x - box_width + 1; input_x = box_width - 1; for (i = 0; i < box_width - 1; i++) waddch(dialog, instr[scroll + i]); } else { waddstr(dialog, instr); } wmove(dialog, box_y, box_x + input_x); wrefresh(dialog); while (key != KEY_ESC) { key = wgetch(dialog); if (button == -1) { /* Input box selected */ switch (key) { case TAB: case KEY_UP: case KEY_DOWN: break; case KEY_LEFT: continue; case KEY_RIGHT: continue; case KEY_BACKSPACE: case 127: if (input_x || scroll) { wattrset(dialog, dlg.inputbox.atr); if (!input_x) { scroll = scroll < box_width - 1 ? 0 : scroll - (box_width - 1); wmove(dialog, box_y, box_x); for (i = 0; i < box_width; i++) waddch(dialog, instr[scroll + input_x + i] ? instr[scroll + input_x + i] : ' '); input_x = strlen(instr) - scroll; } else input_x--; instr[scroll + input_x] = '\0'; mvwaddch(dialog, box_y, input_x + box_x, ' '); wmove(dialog, box_y, input_x + box_x); wrefresh(dialog); } continue; default: if (key < 0x100 && isprint(key)) { if (scroll + input_x < MAX_LEN) { wattrset(dialog, dlg.inputbox.atr); instr[scroll + input_x] = key; instr[scroll + input_x + 1] = '\0'; if (input_x == box_width - 1) { scroll++; wmove(dialog, box_y, box_x); for (i = 0; i < box_width - 1; i++) waddch(dialog, instr [scroll + i]); } else { wmove(dialog, box_y, input_x++ + box_x); waddch(dialog, key); } wrefresh(dialog); } else flash(); /* Alarm user about overflow */ continue; } } } switch (key) { case 'O': case 'o': delwin(dialog); return 0; case 'H': case 'h': delwin(dialog); return 1; case KEY_UP: case KEY_LEFT: switch (button) { case -1: button = 1; /* Indicates "Help" button is selected */ print_buttons(dialog, height, width, 1); break; case 0: button = -1; /* Indicates input box is selected */ print_buttons(dialog, height, width, 0); wmove(dialog, box_y, box_x + input_x); wrefresh(dialog); break; case 1: button = 0; /* Indicates "OK" button is selected */ print_buttons(dialog, height, width, 0); break; } break; case TAB: case KEY_DOWN: case KEY_RIGHT: switch (button) { case -1: button = 0; /* Indicates "OK" button is selected */ print_buttons(dialog, height, width, 0); break; case 0: button = 1; /* Indicates "Help" button is selected */ print_buttons(dialog, height, width, 1); break; case 1: button = -1; /* Indicates input box is selected */ print_buttons(dialog, height, width, 0); wmove(dialog, box_y, box_x + input_x); wrefresh(dialog); break; } break; case ' ': case '\n': delwin(dialog); return (button == -1 ? 0 : button); case 'X': case 'x': key = KEY_ESC; break; case KEY_ESC: key = on_key_esc(dialog); break; case KEY_RESIZE: delwin(dialog); on_key_resize(); goto do_resize; } } delwin(dialog); return KEY_ESC; /* ESC pressed */ }
gpl-2.0
ShadowElite22/Xperia-Z2-Z3
fs/nilfs2/btree.c
9986
61414
/* * btree.c - NILFS B-tree. * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Koji Sato <koji@osrg.net>. */ #include <linux/slab.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/pagevec.h> #include "nilfs.h" #include "page.h" #include "btnode.h" #include "btree.h" #include "alloc.h" #include "dat.h" static struct nilfs_btree_path *nilfs_btree_alloc_path(void) { struct nilfs_btree_path *path; int level = NILFS_BTREE_LEVEL_DATA; path = kmem_cache_alloc(nilfs_btree_path_cache, GFP_NOFS); if (path == NULL) goto out; for (; level < NILFS_BTREE_LEVEL_MAX; level++) { path[level].bp_bh = NULL; path[level].bp_sib_bh = NULL; path[level].bp_index = 0; path[level].bp_oldreq.bpr_ptr = NILFS_BMAP_INVALID_PTR; path[level].bp_newreq.bpr_ptr = NILFS_BMAP_INVALID_PTR; path[level].bp_op = NULL; } out: return path; } static void nilfs_btree_free_path(struct nilfs_btree_path *path) { int level = NILFS_BTREE_LEVEL_DATA; for (; level < NILFS_BTREE_LEVEL_MAX; level++) brelse(path[level].bp_bh); kmem_cache_free(nilfs_btree_path_cache, path); } /* * B-tree node operations */ static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, __u64 ptr, struct buffer_head **bhp) { struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; struct buffer_head *bh; bh = nilfs_btnode_create_block(btnc, ptr); if (!bh) return -ENOMEM; set_buffer_nilfs_volatile(bh); *bhp = bh; return 0; } static int nilfs_btree_node_get_flags(const struct nilfs_btree_node *node) { return node->bn_flags; } static void nilfs_btree_node_set_flags(struct nilfs_btree_node *node, int flags) { node->bn_flags = flags; } static int nilfs_btree_node_root(const struct nilfs_btree_node *node) { return nilfs_btree_node_get_flags(node) & NILFS_BTREE_NODE_ROOT; } static int nilfs_btree_node_get_level(const struct nilfs_btree_node *node) { return node->bn_level; } static void nilfs_btree_node_set_level(struct nilfs_btree_node *node, int level) { node->bn_level = level; } static int nilfs_btree_node_get_nchildren(const struct nilfs_btree_node *node) { return le16_to_cpu(node->bn_nchildren); } static void nilfs_btree_node_set_nchildren(struct nilfs_btree_node *node, int nchildren) { node->bn_nchildren = cpu_to_le16(nchildren); } static int nilfs_btree_node_size(const struct nilfs_bmap *btree) { return 1 << btree->b_inode->i_blkbits; } static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap *btree) { return btree->b_nchildren_per_block; } static __le64 * nilfs_btree_node_dkeys(const struct nilfs_btree_node *node) { return (__le64 *)((char *)(node + 1) + (nilfs_btree_node_root(node) ? 0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE)); } static __le64 * nilfs_btree_node_dptrs(const struct nilfs_btree_node *node, int ncmax) { return (__le64 *)(nilfs_btree_node_dkeys(node) + ncmax); } static __u64 nilfs_btree_node_get_key(const struct nilfs_btree_node *node, int index) { return le64_to_cpu(*(nilfs_btree_node_dkeys(node) + index)); } static void nilfs_btree_node_set_key(struct nilfs_btree_node *node, int index, __u64 key) { *(nilfs_btree_node_dkeys(node) + index) = cpu_to_le64(key); } static __u64 nilfs_btree_node_get_ptr(const struct nilfs_btree_node *node, int index, int ncmax) { return le64_to_cpu(*(nilfs_btree_node_dptrs(node, ncmax) + index)); } static void nilfs_btree_node_set_ptr(struct nilfs_btree_node *node, int index, __u64 ptr, int ncmax) { *(nilfs_btree_node_dptrs(node, ncmax) + index) = cpu_to_le64(ptr); } static void nilfs_btree_node_init(struct nilfs_btree_node *node, int flags, int level, int nchildren, int ncmax, const __u64 *keys, const __u64 *ptrs) { __le64 *dkeys; __le64 *dptrs; int i; nilfs_btree_node_set_flags(node, flags); nilfs_btree_node_set_level(node, level); nilfs_btree_node_set_nchildren(node, nchildren); dkeys = nilfs_btree_node_dkeys(node); dptrs = nilfs_btree_node_dptrs(node, ncmax); for (i = 0; i < nchildren; i++) { dkeys[i] = cpu_to_le64(keys[i]); dptrs[i] = cpu_to_le64(ptrs[i]); } } /* Assume the buffer heads corresponding to left and right are locked. */ static void nilfs_btree_node_move_left(struct nilfs_btree_node *left, struct nilfs_btree_node *right, int n, int lncmax, int rncmax) { __le64 *ldkeys, *rdkeys; __le64 *ldptrs, *rdptrs; int lnchildren, rnchildren; ldkeys = nilfs_btree_node_dkeys(left); ldptrs = nilfs_btree_node_dptrs(left, lncmax); lnchildren = nilfs_btree_node_get_nchildren(left); rdkeys = nilfs_btree_node_dkeys(right); rdptrs = nilfs_btree_node_dptrs(right, rncmax); rnchildren = nilfs_btree_node_get_nchildren(right); memcpy(ldkeys + lnchildren, rdkeys, n * sizeof(*rdkeys)); memcpy(ldptrs + lnchildren, rdptrs, n * sizeof(*rdptrs)); memmove(rdkeys, rdkeys + n, (rnchildren - n) * sizeof(*rdkeys)); memmove(rdptrs, rdptrs + n, (rnchildren - n) * sizeof(*rdptrs)); lnchildren += n; rnchildren -= n; nilfs_btree_node_set_nchildren(left, lnchildren); nilfs_btree_node_set_nchildren(right, rnchildren); } /* Assume that the buffer heads corresponding to left and right are locked. */ static void nilfs_btree_node_move_right(struct nilfs_btree_node *left, struct nilfs_btree_node *right, int n, int lncmax, int rncmax) { __le64 *ldkeys, *rdkeys; __le64 *ldptrs, *rdptrs; int lnchildren, rnchildren; ldkeys = nilfs_btree_node_dkeys(left); ldptrs = nilfs_btree_node_dptrs(left, lncmax); lnchildren = nilfs_btree_node_get_nchildren(left); rdkeys = nilfs_btree_node_dkeys(right); rdptrs = nilfs_btree_node_dptrs(right, rncmax); rnchildren = nilfs_btree_node_get_nchildren(right); memmove(rdkeys + n, rdkeys, rnchildren * sizeof(*rdkeys)); memmove(rdptrs + n, rdptrs, rnchildren * sizeof(*rdptrs)); memcpy(rdkeys, ldkeys + lnchildren - n, n * sizeof(*rdkeys)); memcpy(rdptrs, ldptrs + lnchildren - n, n * sizeof(*rdptrs)); lnchildren -= n; rnchildren += n; nilfs_btree_node_set_nchildren(left, lnchildren); nilfs_btree_node_set_nchildren(right, rnchildren); } /* Assume that the buffer head corresponding to node is locked. */ static void nilfs_btree_node_insert(struct nilfs_btree_node *node, int index, __u64 key, __u64 ptr, int ncmax) { __le64 *dkeys; __le64 *dptrs; int nchildren; dkeys = nilfs_btree_node_dkeys(node); dptrs = nilfs_btree_node_dptrs(node, ncmax); nchildren = nilfs_btree_node_get_nchildren(node); if (index < nchildren) { memmove(dkeys + index + 1, dkeys + index, (nchildren - index) * sizeof(*dkeys)); memmove(dptrs + index + 1, dptrs + index, (nchildren - index) * sizeof(*dptrs)); } dkeys[index] = cpu_to_le64(key); dptrs[index] = cpu_to_le64(ptr); nchildren++; nilfs_btree_node_set_nchildren(node, nchildren); } /* Assume that the buffer head corresponding to node is locked. */ static void nilfs_btree_node_delete(struct nilfs_btree_node *node, int index, __u64 *keyp, __u64 *ptrp, int ncmax) { __u64 key; __u64 ptr; __le64 *dkeys; __le64 *dptrs; int nchildren; dkeys = nilfs_btree_node_dkeys(node); dptrs = nilfs_btree_node_dptrs(node, ncmax); key = le64_to_cpu(dkeys[index]); ptr = le64_to_cpu(dptrs[index]); nchildren = nilfs_btree_node_get_nchildren(node); if (keyp != NULL) *keyp = key; if (ptrp != NULL) *ptrp = ptr; if (index < nchildren - 1) { memmove(dkeys + index, dkeys + index + 1, (nchildren - index - 1) * sizeof(*dkeys)); memmove(dptrs + index, dptrs + index + 1, (nchildren - index - 1) * sizeof(*dptrs)); } nchildren--; nilfs_btree_node_set_nchildren(node, nchildren); } static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node, __u64 key, int *indexp) { __u64 nkey; int index, low, high, s; /* binary search */ low = 0; high = nilfs_btree_node_get_nchildren(node) - 1; index = 0; s = 0; while (low <= high) { index = (low + high) / 2; nkey = nilfs_btree_node_get_key(node, index); if (nkey == key) { s = 0; goto out; } else if (nkey < key) { low = index + 1; s = -1; } else { high = index - 1; s = 1; } } /* adjust index */ if (nilfs_btree_node_get_level(node) > NILFS_BTREE_LEVEL_NODE_MIN) { if (s > 0 && index > 0) index--; } else if (s < 0) index++; out: *indexp = index; return s == 0; } /** * nilfs_btree_node_broken - verify consistency of btree node * @node: btree node block to be examined * @size: node size (in bytes) * @blocknr: block number * * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. */ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, size_t size, sector_t blocknr) { int level, flags, nchildren; int ret = 0; level = nilfs_btree_node_get_level(node); flags = nilfs_btree_node_get_flags(node); nchildren = nilfs_btree_node_get_nchildren(node); if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || level >= NILFS_BTREE_LEVEL_MAX || (flags & NILFS_BTREE_NODE_ROOT) || nchildren < 0 || nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) { printk(KERN_CRIT "NILFS: bad btree node (blocknr=%llu): " "level = %d, flags = 0x%x, nchildren = %d\n", (unsigned long long)blocknr, level, flags, nchildren); ret = 1; } return ret; } int nilfs_btree_broken_node_block(struct buffer_head *bh) { int ret; if (buffer_nilfs_checked(bh)) return 0; ret = nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data, bh->b_size, bh->b_blocknr); if (likely(!ret)) set_buffer_nilfs_checked(bh); return ret; } static struct nilfs_btree_node * nilfs_btree_get_root(const struct nilfs_bmap *btree) { return (struct nilfs_btree_node *)btree->b_u.u_data; } static struct nilfs_btree_node * nilfs_btree_get_nonroot_node(const struct nilfs_btree_path *path, int level) { return (struct nilfs_btree_node *)path[level].bp_bh->b_data; } static struct nilfs_btree_node * nilfs_btree_get_sib_node(const struct nilfs_btree_path *path, int level) { return (struct nilfs_btree_node *)path[level].bp_sib_bh->b_data; } static int nilfs_btree_height(const struct nilfs_bmap *btree) { return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1; } static struct nilfs_btree_node * nilfs_btree_get_node(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, int level, int *ncmaxp) { struct nilfs_btree_node *node; if (level == nilfs_btree_height(btree) - 1) { node = nilfs_btree_get_root(btree); *ncmaxp = NILFS_BTREE_ROOT_NCHILDREN_MAX; } else { node = nilfs_btree_get_nonroot_node(path, level); *ncmaxp = nilfs_btree_nchildren_per_block(btree); } return node; } static int nilfs_btree_bad_node(struct nilfs_btree_node *node, int level) { if (unlikely(nilfs_btree_node_get_level(node) != level)) { dump_stack(); printk(KERN_CRIT "NILFS: btree level mismatch: %d != %d\n", nilfs_btree_node_get_level(node), level); return 1; } return 0; } struct nilfs_btree_readahead_info { struct nilfs_btree_node *node; /* parent node */ int max_ra_blocks; /* max nof blocks to read ahead */ int index; /* current index on the parent node */ int ncmax; /* nof children in the parent node */ }; static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, struct buffer_head **bhp, const struct nilfs_btree_readahead_info *ra) { struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; struct buffer_head *bh, *ra_bh; sector_t submit_ptr = 0; int ret; ret = nilfs_btnode_submit_block(btnc, ptr, 0, READ, &bh, &submit_ptr); if (ret) { if (ret != -EEXIST) return ret; goto out_check; } if (ra) { int i, n; __u64 ptr2; /* read ahead sibling nodes */ for (n = ra->max_ra_blocks, i = ra->index + 1; n > 0 && i < ra->ncmax; n--, i++) { ptr2 = nilfs_btree_node_get_ptr(ra->node, i, ra->ncmax); ret = nilfs_btnode_submit_block(btnc, ptr2, 0, READA, &ra_bh, &submit_ptr); if (likely(!ret || ret == -EEXIST)) brelse(ra_bh); else if (ret != -EBUSY) break; if (!buffer_locked(bh)) goto out_no_wait; } } wait_on_buffer(bh); out_no_wait: if (!buffer_uptodate(bh)) { brelse(bh); return -EIO; } out_check: if (nilfs_btree_broken_node_block(bh)) { clear_buffer_uptodate(bh); brelse(bh); return -EINVAL; } *bhp = bh; return 0; } static int nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, struct buffer_head **bhp) { return __nilfs_btree_get_block(btree, ptr, bhp, NULL); } static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree, struct nilfs_btree_path *path, __u64 key, __u64 *ptrp, int minlevel, int readahead) { struct nilfs_btree_node *node; struct nilfs_btree_readahead_info p, *ra; __u64 ptr; int level, index, found, ncmax, ret; node = nilfs_btree_get_root(btree); level = nilfs_btree_node_get_level(node); if (level < minlevel || nilfs_btree_node_get_nchildren(node) <= 0) return -ENOENT; found = nilfs_btree_node_lookup(node, key, &index); ptr = nilfs_btree_node_get_ptr(node, index, NILFS_BTREE_ROOT_NCHILDREN_MAX); path[level].bp_bh = NULL; path[level].bp_index = index; ncmax = nilfs_btree_nchildren_per_block(btree); while (--level >= minlevel) { ra = NULL; if (level == NILFS_BTREE_LEVEL_NODE_MIN && readahead) { p.node = nilfs_btree_get_node(btree, path, level + 1, &p.ncmax); p.index = index; p.max_ra_blocks = 7; ra = &p; } ret = __nilfs_btree_get_block(btree, ptr, &path[level].bp_bh, ra); if (ret < 0) return ret; node = nilfs_btree_get_nonroot_node(path, level); if (nilfs_btree_bad_node(node, level)) return -EINVAL; if (!found) found = nilfs_btree_node_lookup(node, key, &index); else index = 0; if (index < ncmax) { ptr = nilfs_btree_node_get_ptr(node, index, ncmax); } else { WARN_ON(found || level != NILFS_BTREE_LEVEL_NODE_MIN); /* insert */ ptr = NILFS_BMAP_INVALID_PTR; } path[level].bp_index = index; } if (!found) return -ENOENT; if (ptrp != NULL) *ptrp = ptr; return 0; } static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree, struct nilfs_btree_path *path, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node; __u64 ptr; int index, level, ncmax, ret; node = nilfs_btree_get_root(btree); index = nilfs_btree_node_get_nchildren(node) - 1; if (index < 0) return -ENOENT; level = nilfs_btree_node_get_level(node); ptr = nilfs_btree_node_get_ptr(node, index, NILFS_BTREE_ROOT_NCHILDREN_MAX); path[level].bp_bh = NULL; path[level].bp_index = index; ncmax = nilfs_btree_nchildren_per_block(btree); for (level--; level > 0; level--) { ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh); if (ret < 0) return ret; node = nilfs_btree_get_nonroot_node(path, level); if (nilfs_btree_bad_node(node, level)) return -EINVAL; index = nilfs_btree_node_get_nchildren(node) - 1; ptr = nilfs_btree_node_get_ptr(node, index, ncmax); path[level].bp_index = index; } if (keyp != NULL) *keyp = nilfs_btree_node_get_key(node, index); if (ptrp != NULL) *ptrp = ptr; return 0; } static int nilfs_btree_lookup(const struct nilfs_bmap *btree, __u64 key, int level, __u64 *ptrp) { struct nilfs_btree_path *path; int ret; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; ret = nilfs_btree_do_lookup(btree, path, key, ptrp, level, 0); nilfs_btree_free_path(path); return ret; } static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree, __u64 key, __u64 *ptrp, unsigned maxblocks) { struct nilfs_btree_path *path; struct nilfs_btree_node *node; struct inode *dat = NULL; __u64 ptr, ptr2; sector_t blocknr; int level = NILFS_BTREE_LEVEL_NODE_MIN; int ret, cnt, index, maxlevel, ncmax; struct nilfs_btree_readahead_info p; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level, 1); if (ret < 0) goto out; if (NILFS_BMAP_USE_VBN(btree)) { dat = nilfs_bmap_get_dat(btree); ret = nilfs_dat_translate(dat, ptr, &blocknr); if (ret < 0) goto out; ptr = blocknr; } cnt = 1; if (cnt == maxblocks) goto end; maxlevel = nilfs_btree_height(btree) - 1; node = nilfs_btree_get_node(btree, path, level, &ncmax); index = path[level].bp_index + 1; for (;;) { while (index < nilfs_btree_node_get_nchildren(node)) { if (nilfs_btree_node_get_key(node, index) != key + cnt) goto end; ptr2 = nilfs_btree_node_get_ptr(node, index, ncmax); if (dat) { ret = nilfs_dat_translate(dat, ptr2, &blocknr); if (ret < 0) goto out; ptr2 = blocknr; } if (ptr2 != ptr + cnt || ++cnt == maxblocks) goto end; index++; continue; } if (level == maxlevel) break; /* look-up right sibling node */ p.node = nilfs_btree_get_node(btree, path, level + 1, &p.ncmax); p.index = path[level + 1].bp_index + 1; p.max_ra_blocks = 7; if (p.index >= nilfs_btree_node_get_nchildren(p.node) || nilfs_btree_node_get_key(p.node, p.index) != key + cnt) break; ptr2 = nilfs_btree_node_get_ptr(p.node, p.index, p.ncmax); path[level + 1].bp_index = p.index; brelse(path[level].bp_bh); path[level].bp_bh = NULL; ret = __nilfs_btree_get_block(btree, ptr2, &path[level].bp_bh, &p); if (ret < 0) goto out; node = nilfs_btree_get_nonroot_node(path, level); ncmax = nilfs_btree_nchildren_per_block(btree); index = 0; path[level].bp_index = index; } end: *ptrp = ptr; ret = cnt; out: nilfs_btree_free_path(path); return ret; } static void nilfs_btree_promote_key(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 key) { if (level < nilfs_btree_height(btree) - 1) { do { nilfs_btree_node_set_key( nilfs_btree_get_nonroot_node(path, level), path[level].bp_index, key); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); } while ((path[level].bp_index == 0) && (++level < nilfs_btree_height(btree) - 1)); } /* root */ if (level == nilfs_btree_height(btree) - 1) { nilfs_btree_node_set_key(nilfs_btree_get_root(btree), path[level].bp_index, key); } } static void nilfs_btree_do_insert(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node; int ncblk; if (level < nilfs_btree_height(btree) - 1) { node = nilfs_btree_get_nonroot_node(path, level); ncblk = nilfs_btree_nchildren_per_block(btree); nilfs_btree_node_insert(node, path[level].bp_index, *keyp, *ptrp, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (path[level].bp_index == 0) nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_node_get_key(node, 0)); } else { node = nilfs_btree_get_root(btree); nilfs_btree_node_insert(node, path[level].bp_index, *keyp, *ptrp, NILFS_BTREE_ROOT_NCHILDREN_MAX); } } static void nilfs_btree_carry_left(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *left; int nchildren, lnchildren, n, move, ncblk; node = nilfs_btree_get_nonroot_node(path, level); left = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); lnchildren = nilfs_btree_node_get_nchildren(left); ncblk = nilfs_btree_nchildren_per_block(btree); move = 0; n = (nchildren + lnchildren + 1) / 2 - lnchildren; if (n > path[level].bp_index) { /* move insert point */ n--; move = 1; } nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_node_get_key(node, 0)); if (move) { brelse(path[level].bp_bh); path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_sib_bh = NULL; path[level].bp_index += lnchildren; path[level + 1].bp_index--; } else { brelse(path[level].bp_sib_bh); path[level].bp_sib_bh = NULL; path[level].bp_index -= n; } nilfs_btree_do_insert(btree, path, level, keyp, ptrp); } static void nilfs_btree_carry_right(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *right; int nchildren, rnchildren, n, move, ncblk; node = nilfs_btree_get_nonroot_node(path, level); right = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); rnchildren = nilfs_btree_node_get_nchildren(right); ncblk = nilfs_btree_nchildren_per_block(btree); move = 0; n = (nchildren + rnchildren + 1) / 2 - rnchildren; if (n > nchildren - path[level].bp_index) { /* move insert point */ n--; move = 1; } nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); path[level + 1].bp_index++; nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_node_get_key(right, 0)); path[level + 1].bp_index--; if (move) { brelse(path[level].bp_bh); path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_sib_bh = NULL; path[level].bp_index -= nilfs_btree_node_get_nchildren(node); path[level + 1].bp_index++; } else { brelse(path[level].bp_sib_bh); path[level].bp_sib_bh = NULL; } nilfs_btree_do_insert(btree, path, level, keyp, ptrp); } static void nilfs_btree_split(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *right; __u64 newkey; __u64 newptr; int nchildren, n, move, ncblk; node = nilfs_btree_get_nonroot_node(path, level); right = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); ncblk = nilfs_btree_nchildren_per_block(btree); move = 0; n = (nchildren + 1) / 2; if (n > nchildren - path[level].bp_index) { n--; move = 1; } nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); newkey = nilfs_btree_node_get_key(right, 0); newptr = path[level].bp_newreq.bpr_ptr; if (move) { path[level].bp_index -= nilfs_btree_node_get_nchildren(node); nilfs_btree_node_insert(right, path[level].bp_index, *keyp, *ptrp, ncblk); *keyp = nilfs_btree_node_get_key(right, 0); *ptrp = path[level].bp_newreq.bpr_ptr; brelse(path[level].bp_bh); path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_sib_bh = NULL; } else { nilfs_btree_do_insert(btree, path, level, keyp, ptrp); *keyp = nilfs_btree_node_get_key(right, 0); *ptrp = path[level].bp_newreq.bpr_ptr; brelse(path[level].bp_sib_bh); path[level].bp_sib_bh = NULL; } path[level + 1].bp_index++; } static void nilfs_btree_grow(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *root, *child; int n, ncblk; root = nilfs_btree_get_root(btree); child = nilfs_btree_get_sib_node(path, level); ncblk = nilfs_btree_nchildren_per_block(btree); n = nilfs_btree_node_get_nchildren(root); nilfs_btree_node_move_right(root, child, n, NILFS_BTREE_ROOT_NCHILDREN_MAX, ncblk); nilfs_btree_node_set_level(root, level + 1); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_sib_bh = NULL; nilfs_btree_do_insert(btree, path, level, keyp, ptrp); *keyp = nilfs_btree_node_get_key(child, 0); *ptrp = path[level].bp_newreq.bpr_ptr; } static __u64 nilfs_btree_find_near(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path) { struct nilfs_btree_node *node; int level, ncmax; if (path == NULL) return NILFS_BMAP_INVALID_PTR; /* left sibling */ level = NILFS_BTREE_LEVEL_NODE_MIN; if (path[level].bp_index > 0) { node = nilfs_btree_get_node(btree, path, level, &ncmax); return nilfs_btree_node_get_ptr(node, path[level].bp_index - 1, ncmax); } /* parent */ level = NILFS_BTREE_LEVEL_NODE_MIN + 1; if (level <= nilfs_btree_height(btree) - 1) { node = nilfs_btree_get_node(btree, path, level, &ncmax); return nilfs_btree_node_get_ptr(node, path[level].bp_index, ncmax); } return NILFS_BMAP_INVALID_PTR; } static __u64 nilfs_btree_find_target_v(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, __u64 key) { __u64 ptr; ptr = nilfs_bmap_find_target_seq(btree, key); if (ptr != NILFS_BMAP_INVALID_PTR) /* sequential access */ return ptr; else { ptr = nilfs_btree_find_near(btree, path); if (ptr != NILFS_BMAP_INVALID_PTR) /* near */ return ptr; } /* block group */ return nilfs_bmap_find_target_in_group(btree); } static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int *levelp, __u64 key, __u64 ptr, struct nilfs_bmap_stats *stats) { struct buffer_head *bh; struct nilfs_btree_node *node, *parent, *sib; __u64 sibptr; int pindex, level, ncmax, ncblk, ret; struct inode *dat = NULL; stats->bs_nblocks = 0; level = NILFS_BTREE_LEVEL_DATA; /* allocate a new ptr for data block */ if (NILFS_BMAP_USE_VBN(btree)) { path[level].bp_newreq.bpr_ptr = nilfs_btree_find_target_v(btree, path, key); dat = nilfs_bmap_get_dat(btree); } ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat); if (ret < 0) goto err_out_data; ncblk = nilfs_btree_nchildren_per_block(btree); for (level = NILFS_BTREE_LEVEL_NODE_MIN; level < nilfs_btree_height(btree) - 1; level++) { node = nilfs_btree_get_nonroot_node(path, level); if (nilfs_btree_node_get_nchildren(node) < ncblk) { path[level].bp_op = nilfs_btree_do_insert; stats->bs_nblocks++; goto out; } parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); pindex = path[level + 1].bp_index; /* left sibling */ if (pindex > 0) { sibptr = nilfs_btree_node_get_ptr(parent, pindex - 1, ncmax); ret = nilfs_btree_get_block(btree, sibptr, &bh); if (ret < 0) goto err_out_child_node; sib = (struct nilfs_btree_node *)bh->b_data; if (nilfs_btree_node_get_nchildren(sib) < ncblk) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_carry_left; stats->bs_nblocks++; goto out; } else { brelse(bh); } } /* right sibling */ if (pindex < nilfs_btree_node_get_nchildren(parent) - 1) { sibptr = nilfs_btree_node_get_ptr(parent, pindex + 1, ncmax); ret = nilfs_btree_get_block(btree, sibptr, &bh); if (ret < 0) goto err_out_child_node; sib = (struct nilfs_btree_node *)bh->b_data; if (nilfs_btree_node_get_nchildren(sib) < ncblk) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_carry_right; stats->bs_nblocks++; goto out; } else { brelse(bh); } } /* split */ path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat); if (ret < 0) goto err_out_child_node; ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr, &bh); if (ret < 0) goto err_out_curr_node; stats->bs_nblocks++; sib = (struct nilfs_btree_node *)bh->b_data; nilfs_btree_node_init(sib, 0, level, 0, ncblk, NULL, NULL); path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_split; } /* root */ node = nilfs_btree_get_root(btree); if (nilfs_btree_node_get_nchildren(node) < NILFS_BTREE_ROOT_NCHILDREN_MAX) { path[level].bp_op = nilfs_btree_do_insert; stats->bs_nblocks++; goto out; } /* grow */ path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat); if (ret < 0) goto err_out_child_node; ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr, &bh); if (ret < 0) goto err_out_curr_node; nilfs_btree_node_init((struct nilfs_btree_node *)bh->b_data, 0, level, 0, ncblk, NULL, NULL); path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_grow; level++; path[level].bp_op = nilfs_btree_do_insert; /* a newly-created node block and a data block are added */ stats->bs_nblocks += 2; /* success */ out: *levelp = level; return ret; /* error */ err_out_curr_node: nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat); err_out_child_node: for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) { nilfs_btnode_delete(path[level].bp_sib_bh); nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat); } nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat); err_out_data: *levelp = level; stats->bs_nblocks = 0; return ret; } static void nilfs_btree_commit_insert(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int maxlevel, __u64 key, __u64 ptr) { struct inode *dat = NULL; int level; set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr; if (NILFS_BMAP_USE_VBN(btree)) { nilfs_bmap_set_target_v(btree, key, ptr); dat = nilfs_bmap_get_dat(btree); } for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { nilfs_bmap_commit_alloc_ptr(btree, &path[level - 1].bp_newreq, dat); path[level].bp_op(btree, path, level, &key, &ptr); } if (!nilfs_bmap_dirty(btree)) nilfs_bmap_set_dirty(btree); } static int nilfs_btree_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr) { struct nilfs_btree_path *path; struct nilfs_bmap_stats stats; int level, ret; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; ret = nilfs_btree_do_lookup(btree, path, key, NULL, NILFS_BTREE_LEVEL_NODE_MIN, 0); if (ret != -ENOENT) { if (ret == 0) ret = -EEXIST; goto out; } ret = nilfs_btree_prepare_insert(btree, path, &level, key, ptr, &stats); if (ret < 0) goto out; nilfs_btree_commit_insert(btree, path, level, key, ptr); nilfs_inode_add_blocks(btree->b_inode, stats.bs_nblocks); out: nilfs_btree_free_path(path); return ret; } static void nilfs_btree_do_delete(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node; int ncblk; if (level < nilfs_btree_height(btree) - 1) { node = nilfs_btree_get_nonroot_node(path, level); ncblk = nilfs_btree_nchildren_per_block(btree); nilfs_btree_node_delete(node, path[level].bp_index, keyp, ptrp, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (path[level].bp_index == 0) nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_node_get_key(node, 0)); } else { node = nilfs_btree_get_root(btree); nilfs_btree_node_delete(node, path[level].bp_index, keyp, ptrp, NILFS_BTREE_ROOT_NCHILDREN_MAX); } } static void nilfs_btree_borrow_left(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *left; int nchildren, lnchildren, n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); node = nilfs_btree_get_nonroot_node(path, level); left = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); lnchildren = nilfs_btree_node_get_nchildren(left); ncblk = nilfs_btree_nchildren_per_block(btree); n = (nchildren + lnchildren) / 2 - nchildren; nilfs_btree_node_move_right(left, node, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_node_get_key(node, 0)); brelse(path[level].bp_sib_bh); path[level].bp_sib_bh = NULL; path[level].bp_index += n; } static void nilfs_btree_borrow_right(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *right; int nchildren, rnchildren, n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); node = nilfs_btree_get_nonroot_node(path, level); right = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); rnchildren = nilfs_btree_node_get_nchildren(right); ncblk = nilfs_btree_nchildren_per_block(btree); n = (nchildren + rnchildren) / 2 - nchildren; nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); path[level + 1].bp_index++; nilfs_btree_promote_key(btree, path, level + 1, nilfs_btree_node_get_key(right, 0)); path[level + 1].bp_index--; brelse(path[level].bp_sib_bh); path[level].bp_sib_bh = NULL; } static void nilfs_btree_concat_left(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *left; int n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); node = nilfs_btree_get_nonroot_node(path, level); left = nilfs_btree_get_sib_node(path, level); ncblk = nilfs_btree_nchildren_per_block(btree); n = nilfs_btree_node_get_nchildren(node); nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_sib_bh)) mark_buffer_dirty(path[level].bp_sib_bh); nilfs_btnode_delete(path[level].bp_bh); path[level].bp_bh = path[level].bp_sib_bh; path[level].bp_sib_bh = NULL; path[level].bp_index += nilfs_btree_node_get_nchildren(left); } static void nilfs_btree_concat_right(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *right; int n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); node = nilfs_btree_get_nonroot_node(path, level); right = nilfs_btree_get_sib_node(path, level); ncblk = nilfs_btree_nchildren_per_block(btree); n = nilfs_btree_node_get_nchildren(right); nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); nilfs_btnode_delete(path[level].bp_sib_bh); path[level].bp_sib_bh = NULL; path[level + 1].bp_index++; } static void nilfs_btree_shrink(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *root, *child; int n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); root = nilfs_btree_get_root(btree); child = nilfs_btree_get_nonroot_node(path, level); ncblk = nilfs_btree_nchildren_per_block(btree); nilfs_btree_node_delete(root, 0, NULL, NULL, NILFS_BTREE_ROOT_NCHILDREN_MAX); nilfs_btree_node_set_level(root, level); n = nilfs_btree_node_get_nchildren(child); nilfs_btree_node_move_left(root, child, n, NILFS_BTREE_ROOT_NCHILDREN_MAX, ncblk); nilfs_btnode_delete(path[level].bp_bh); path[level].bp_bh = NULL; } static void nilfs_btree_nop(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { } static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int *levelp, struct nilfs_bmap_stats *stats, struct inode *dat) { struct buffer_head *bh; struct nilfs_btree_node *node, *parent, *sib; __u64 sibptr; int pindex, dindex, level, ncmin, ncmax, ncblk, ret; ret = 0; stats->bs_nblocks = 0; ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree)); ncblk = nilfs_btree_nchildren_per_block(btree); for (level = NILFS_BTREE_LEVEL_NODE_MIN, dindex = path[level].bp_index; level < nilfs_btree_height(btree) - 1; level++) { node = nilfs_btree_get_nonroot_node(path, level); path[level].bp_oldreq.bpr_ptr = nilfs_btree_node_get_ptr(node, dindex, ncblk); ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat); if (ret < 0) goto err_out_child_node; if (nilfs_btree_node_get_nchildren(node) > ncmin) { path[level].bp_op = nilfs_btree_do_delete; stats->bs_nblocks++; goto out; } parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); pindex = path[level + 1].bp_index; dindex = pindex; if (pindex > 0) { /* left sibling */ sibptr = nilfs_btree_node_get_ptr(parent, pindex - 1, ncmax); ret = nilfs_btree_get_block(btree, sibptr, &bh); if (ret < 0) goto err_out_curr_node; sib = (struct nilfs_btree_node *)bh->b_data; if (nilfs_btree_node_get_nchildren(sib) > ncmin) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_borrow_left; stats->bs_nblocks++; goto out; } else { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_concat_left; stats->bs_nblocks++; /* continue; */ } } else if (pindex < nilfs_btree_node_get_nchildren(parent) - 1) { /* right sibling */ sibptr = nilfs_btree_node_get_ptr(parent, pindex + 1, ncmax); ret = nilfs_btree_get_block(btree, sibptr, &bh); if (ret < 0) goto err_out_curr_node; sib = (struct nilfs_btree_node *)bh->b_data; if (nilfs_btree_node_get_nchildren(sib) > ncmin) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_borrow_right; stats->bs_nblocks++; goto out; } else { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_concat_right; stats->bs_nblocks++; /* * When merging right sibling node * into the current node, pointer to * the right sibling node must be * terminated instead. The adjustment * below is required for that. */ dindex = pindex + 1; /* continue; */ } } else { /* no siblings */ /* the only child of the root node */ WARN_ON(level != nilfs_btree_height(btree) - 2); if (nilfs_btree_node_get_nchildren(node) - 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX) { path[level].bp_op = nilfs_btree_shrink; stats->bs_nblocks += 2; level++; path[level].bp_op = nilfs_btree_nop; goto shrink_root_child; } else { path[level].bp_op = nilfs_btree_do_delete; stats->bs_nblocks++; goto out; } } } /* child of the root node is deleted */ path[level].bp_op = nilfs_btree_do_delete; stats->bs_nblocks++; shrink_root_child: node = nilfs_btree_get_root(btree); path[level].bp_oldreq.bpr_ptr = nilfs_btree_node_get_ptr(node, dindex, NILFS_BTREE_ROOT_NCHILDREN_MAX); ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat); if (ret < 0) goto err_out_child_node; /* success */ out: *levelp = level; return ret; /* error */ err_out_curr_node: nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat); err_out_child_node: for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) { brelse(path[level].bp_sib_bh); nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat); } *levelp = level; stats->bs_nblocks = 0; return ret; } static void nilfs_btree_commit_delete(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int maxlevel, struct inode *dat) { int level; for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { nilfs_bmap_commit_end_ptr(btree, &path[level].bp_oldreq, dat); path[level].bp_op(btree, path, level, NULL, NULL); } if (!nilfs_bmap_dirty(btree)) nilfs_bmap_set_dirty(btree); } static int nilfs_btree_delete(struct nilfs_bmap *btree, __u64 key) { struct nilfs_btree_path *path; struct nilfs_bmap_stats stats; struct inode *dat; int level, ret; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; ret = nilfs_btree_do_lookup(btree, path, key, NULL, NILFS_BTREE_LEVEL_NODE_MIN, 0); if (ret < 0) goto out; dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; ret = nilfs_btree_prepare_delete(btree, path, &level, &stats, dat); if (ret < 0) goto out; nilfs_btree_commit_delete(btree, path, level, dat); nilfs_inode_sub_blocks(btree->b_inode, stats.bs_nblocks); out: nilfs_btree_free_path(path); return ret; } static int nilfs_btree_last_key(const struct nilfs_bmap *btree, __u64 *keyp) { struct nilfs_btree_path *path; int ret; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; ret = nilfs_btree_do_lookup_last(btree, path, keyp, NULL); nilfs_btree_free_path(path); return ret; } static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key) { struct buffer_head *bh; struct nilfs_btree_node *root, *node; __u64 maxkey, nextmaxkey; __u64 ptr; int nchildren, ret; root = nilfs_btree_get_root(btree); switch (nilfs_btree_height(btree)) { case 2: bh = NULL; node = root; break; case 3: nchildren = nilfs_btree_node_get_nchildren(root); if (nchildren > 1) return 0; ptr = nilfs_btree_node_get_ptr(root, nchildren - 1, NILFS_BTREE_ROOT_NCHILDREN_MAX); ret = nilfs_btree_get_block(btree, ptr, &bh); if (ret < 0) return ret; node = (struct nilfs_btree_node *)bh->b_data; break; default: return 0; } nchildren = nilfs_btree_node_get_nchildren(node); maxkey = nilfs_btree_node_get_key(node, nchildren - 1); nextmaxkey = (nchildren > 1) ? nilfs_btree_node_get_key(node, nchildren - 2) : 0; if (bh != NULL) brelse(bh); return (maxkey == key) && (nextmaxkey < NILFS_BMAP_LARGE_LOW); } static int nilfs_btree_gather_data(struct nilfs_bmap *btree, __u64 *keys, __u64 *ptrs, int nitems) { struct buffer_head *bh; struct nilfs_btree_node *node, *root; __le64 *dkeys; __le64 *dptrs; __u64 ptr; int nchildren, ncmax, i, ret; root = nilfs_btree_get_root(btree); switch (nilfs_btree_height(btree)) { case 2: bh = NULL; node = root; ncmax = NILFS_BTREE_ROOT_NCHILDREN_MAX; break; case 3: nchildren = nilfs_btree_node_get_nchildren(root); WARN_ON(nchildren > 1); ptr = nilfs_btree_node_get_ptr(root, nchildren - 1, NILFS_BTREE_ROOT_NCHILDREN_MAX); ret = nilfs_btree_get_block(btree, ptr, &bh); if (ret < 0) return ret; node = (struct nilfs_btree_node *)bh->b_data; ncmax = nilfs_btree_nchildren_per_block(btree); break; default: node = NULL; return -EINVAL; } nchildren = nilfs_btree_node_get_nchildren(node); if (nchildren < nitems) nitems = nchildren; dkeys = nilfs_btree_node_dkeys(node); dptrs = nilfs_btree_node_dptrs(node, ncmax); for (i = 0; i < nitems; i++) { keys[i] = le64_to_cpu(dkeys[i]); ptrs[i] = le64_to_cpu(dptrs[i]); } if (bh != NULL) brelse(bh); return nitems; } static int nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key, union nilfs_bmap_ptr_req *dreq, union nilfs_bmap_ptr_req *nreq, struct buffer_head **bhp, struct nilfs_bmap_stats *stats) { struct buffer_head *bh; struct inode *dat = NULL; int ret; stats->bs_nblocks = 0; /* for data */ /* cannot find near ptr */ if (NILFS_BMAP_USE_VBN(btree)) { dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key); dat = nilfs_bmap_get_dat(btree); } ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat); if (ret < 0) return ret; *bhp = NULL; stats->bs_nblocks++; if (nreq != NULL) { nreq->bpr_ptr = dreq->bpr_ptr + 1; ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); if (ret < 0) goto err_out_dreq; ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); if (ret < 0) goto err_out_nreq; *bhp = bh; stats->bs_nblocks++; } /* success */ return 0; /* error */ err_out_nreq: nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); err_out_dreq: nilfs_bmap_abort_alloc_ptr(btree, dreq, dat); stats->bs_nblocks = 0; return ret; } static void nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr, const __u64 *keys, const __u64 *ptrs, int n, union nilfs_bmap_ptr_req *dreq, union nilfs_bmap_ptr_req *nreq, struct buffer_head *bh) { struct nilfs_btree_node *node; struct inode *dat; __u64 tmpptr; int ncblk; /* free resources */ if (btree->b_ops->bop_clear != NULL) btree->b_ops->bop_clear(btree); /* ptr must be a pointer to a buffer head. */ set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); /* convert and insert */ dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; nilfs_btree_init(btree); if (nreq != NULL) { nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); /* create child node at level 1 */ node = (struct nilfs_btree_node *)bh->b_data; ncblk = nilfs_btree_nchildren_per_block(btree); nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs); nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk); if (!buffer_dirty(bh)) mark_buffer_dirty(bh); if (!nilfs_bmap_dirty(btree)) nilfs_bmap_set_dirty(btree); brelse(bh); /* create root node at level 2 */ node = nilfs_btree_get_root(btree); tmpptr = nreq->bpr_ptr; nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 2, 1, NILFS_BTREE_ROOT_NCHILDREN_MAX, &keys[0], &tmpptr); } else { nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); /* create root node at level 1 */ node = nilfs_btree_get_root(btree); nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 1, n, NILFS_BTREE_ROOT_NCHILDREN_MAX, keys, ptrs); nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, NILFS_BTREE_ROOT_NCHILDREN_MAX); if (!nilfs_bmap_dirty(btree)) nilfs_bmap_set_dirty(btree); } if (NILFS_BMAP_USE_VBN(btree)) nilfs_bmap_set_target_v(btree, key, dreq->bpr_ptr); } /** * nilfs_btree_convert_and_insert - * @bmap: * @key: * @ptr: * @keys: * @ptrs: * @n: */ int nilfs_btree_convert_and_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr, const __u64 *keys, const __u64 *ptrs, int n) { struct buffer_head *bh; union nilfs_bmap_ptr_req dreq, nreq, *di, *ni; struct nilfs_bmap_stats stats; int ret; if (n + 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX) { di = &dreq; ni = NULL; } else if ((n + 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX( 1 << btree->b_inode->i_blkbits)) { di = &dreq; ni = &nreq; } else { di = NULL; ni = NULL; BUG(); } ret = nilfs_btree_prepare_convert_and_insert(btree, key, di, ni, &bh, &stats); if (ret < 0) return ret; nilfs_btree_commit_convert_and_insert(btree, key, ptr, keys, ptrs, n, di, ni, bh); nilfs_inode_add_blocks(btree->b_inode, stats.bs_nblocks); return 0; } static int nilfs_btree_propagate_p(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head *bh) { while ((++level < nilfs_btree_height(btree) - 1) && !buffer_dirty(path[level].bp_bh)) mark_buffer_dirty(path[level].bp_bh); return 0; } static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct inode *dat) { struct nilfs_btree_node *parent; int ncmax, ret; parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); path[level].bp_oldreq.bpr_ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, ncmax); path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1; ret = nilfs_dat_prepare_update(dat, &path[level].bp_oldreq.bpr_req, &path[level].bp_newreq.bpr_req); if (ret < 0) return ret; if (buffer_nilfs_node(path[level].bp_bh)) { path[level].bp_ctxt.oldkey = path[level].bp_oldreq.bpr_ptr; path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr; path[level].bp_ctxt.bh = path[level].bp_bh; ret = nilfs_btnode_prepare_change_key( &NILFS_BMAP_I(btree)->i_btnode_cache, &path[level].bp_ctxt); if (ret < 0) { nilfs_dat_abort_update(dat, &path[level].bp_oldreq.bpr_req, &path[level].bp_newreq.bpr_req); return ret; } } return 0; } static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct inode *dat) { struct nilfs_btree_node *parent; int ncmax; nilfs_dat_commit_update(dat, &path[level].bp_oldreq.bpr_req, &path[level].bp_newreq.bpr_req, btree->b_ptr_type == NILFS_BMAP_PTR_VS); if (buffer_nilfs_node(path[level].bp_bh)) { nilfs_btnode_commit_change_key( &NILFS_BMAP_I(btree)->i_btnode_cache, &path[level].bp_ctxt); path[level].bp_bh = path[level].bp_ctxt.bh; } set_buffer_nilfs_volatile(path[level].bp_bh); parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index, path[level].bp_newreq.bpr_ptr, ncmax); } static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct inode *dat) { nilfs_dat_abort_update(dat, &path[level].bp_oldreq.bpr_req, &path[level].bp_newreq.bpr_req); if (buffer_nilfs_node(path[level].bp_bh)) nilfs_btnode_abort_change_key( &NILFS_BMAP_I(btree)->i_btnode_cache, &path[level].bp_ctxt); } static int nilfs_btree_prepare_propagate_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int minlevel, int *maxlevelp, struct inode *dat) { int level, ret; level = minlevel; if (!buffer_nilfs_volatile(path[level].bp_bh)) { ret = nilfs_btree_prepare_update_v(btree, path, level, dat); if (ret < 0) return ret; } while ((++level < nilfs_btree_height(btree) - 1) && !buffer_dirty(path[level].bp_bh)) { WARN_ON(buffer_nilfs_volatile(path[level].bp_bh)); ret = nilfs_btree_prepare_update_v(btree, path, level, dat); if (ret < 0) goto out; } /* success */ *maxlevelp = level - 1; return 0; /* error */ out: while (--level > minlevel) nilfs_btree_abort_update_v(btree, path, level, dat); if (!buffer_nilfs_volatile(path[level].bp_bh)) nilfs_btree_abort_update_v(btree, path, level, dat); return ret; } static void nilfs_btree_commit_propagate_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int minlevel, int maxlevel, struct buffer_head *bh, struct inode *dat) { int level; if (!buffer_nilfs_volatile(path[minlevel].bp_bh)) nilfs_btree_commit_update_v(btree, path, minlevel, dat); for (level = minlevel + 1; level <= maxlevel; level++) nilfs_btree_commit_update_v(btree, path, level, dat); } static int nilfs_btree_propagate_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head *bh) { int maxlevel = 0, ret; struct nilfs_btree_node *parent; struct inode *dat = nilfs_bmap_get_dat(btree); __u64 ptr; int ncmax; get_bh(bh); path[level].bp_bh = bh; ret = nilfs_btree_prepare_propagate_v(btree, path, level, &maxlevel, dat); if (ret < 0) goto out; if (buffer_nilfs_volatile(path[level].bp_bh)) { parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, ncmax); ret = nilfs_dat_mark_dirty(dat, ptr); if (ret < 0) goto out; } nilfs_btree_commit_propagate_v(btree, path, level, maxlevel, bh, dat); out: brelse(path[level].bp_bh); path[level].bp_bh = NULL; return ret; } static int nilfs_btree_propagate(struct nilfs_bmap *btree, struct buffer_head *bh) { struct nilfs_btree_path *path; struct nilfs_btree_node *node; __u64 key; int level, ret; WARN_ON(!buffer_dirty(bh)); path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; if (buffer_nilfs_node(bh)) { node = (struct nilfs_btree_node *)bh->b_data; key = nilfs_btree_node_get_key(node, 0); level = nilfs_btree_node_get_level(node); } else { key = nilfs_bmap_data_get_key(btree, bh); level = NILFS_BTREE_LEVEL_DATA; } ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0); if (ret < 0) { if (unlikely(ret == -ENOENT)) printk(KERN_CRIT "%s: key = %llu, level == %d\n", __func__, (unsigned long long)key, level); goto out; } ret = NILFS_BMAP_USE_VBN(btree) ? nilfs_btree_propagate_v(btree, path, level, bh) : nilfs_btree_propagate_p(btree, path, level, bh); out: nilfs_btree_free_path(path); return ret; } static int nilfs_btree_propagate_gc(struct nilfs_bmap *btree, struct buffer_head *bh) { return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(btree), bh->b_blocknr); } static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree, struct list_head *lists, struct buffer_head *bh) { struct list_head *head; struct buffer_head *cbh; struct nilfs_btree_node *node, *cnode; __u64 key, ckey; int level; get_bh(bh); node = (struct nilfs_btree_node *)bh->b_data; key = nilfs_btree_node_get_key(node, 0); level = nilfs_btree_node_get_level(node); if (level < NILFS_BTREE_LEVEL_NODE_MIN || level >= NILFS_BTREE_LEVEL_MAX) { dump_stack(); printk(KERN_WARNING "%s: invalid btree level: %d (key=%llu, ino=%lu, " "blocknr=%llu)\n", __func__, level, (unsigned long long)key, NILFS_BMAP_I(btree)->vfs_inode.i_ino, (unsigned long long)bh->b_blocknr); return; } list_for_each(head, &lists[level]) { cbh = list_entry(head, struct buffer_head, b_assoc_buffers); cnode = (struct nilfs_btree_node *)cbh->b_data; ckey = nilfs_btree_node_get_key(cnode, 0); if (key < ckey) break; } list_add_tail(&bh->b_assoc_buffers, head); } static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree, struct list_head *listp) { struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache; struct list_head lists[NILFS_BTREE_LEVEL_MAX]; struct pagevec pvec; struct buffer_head *bh, *head; pgoff_t index = 0; int level, i; for (level = NILFS_BTREE_LEVEL_NODE_MIN; level < NILFS_BTREE_LEVEL_MAX; level++) INIT_LIST_HEAD(&lists[level]); pagevec_init(&pvec, 0); while (pagevec_lookup_tag(&pvec, btcache, &index, PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE)) { for (i = 0; i < pagevec_count(&pvec); i++) { bh = head = page_buffers(pvec.pages[i]); do { if (buffer_dirty(bh)) nilfs_btree_add_dirty_buffer(btree, lists, bh); } while ((bh = bh->b_this_page) != head); } pagevec_release(&pvec); cond_resched(); } for (level = NILFS_BTREE_LEVEL_NODE_MIN; level < NILFS_BTREE_LEVEL_MAX; level++) list_splice_tail(&lists[level], listp); } static int nilfs_btree_assign_p(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { struct nilfs_btree_node *parent; __u64 key; __u64 ptr; int ncmax, ret; parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, ncmax); if (buffer_nilfs_node(*bh)) { path[level].bp_ctxt.oldkey = ptr; path[level].bp_ctxt.newkey = blocknr; path[level].bp_ctxt.bh = *bh; ret = nilfs_btnode_prepare_change_key( &NILFS_BMAP_I(btree)->i_btnode_cache, &path[level].bp_ctxt); if (ret < 0) return ret; nilfs_btnode_commit_change_key( &NILFS_BMAP_I(btree)->i_btnode_cache, &path[level].bp_ctxt); *bh = path[level].bp_ctxt.bh; } nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index, blocknr, ncmax); key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index); /* on-disk format */ binfo->bi_dat.bi_blkoff = cpu_to_le64(key); binfo->bi_dat.bi_level = level; return 0; } static int nilfs_btree_assign_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { struct nilfs_btree_node *parent; struct inode *dat = nilfs_bmap_get_dat(btree); __u64 key; __u64 ptr; union nilfs_bmap_ptr_req req; int ncmax, ret; parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, ncmax); req.bpr_ptr = ptr; ret = nilfs_dat_prepare_start(dat, &req.bpr_req); if (ret < 0) return ret; nilfs_dat_commit_start(dat, &req.bpr_req, blocknr); key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index); /* on-disk format */ binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr); binfo->bi_v.bi_blkoff = cpu_to_le64(key); return 0; } static int nilfs_btree_assign(struct nilfs_bmap *btree, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { struct nilfs_btree_path *path; struct nilfs_btree_node *node; __u64 key; int level, ret; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; if (buffer_nilfs_node(*bh)) { node = (struct nilfs_btree_node *)(*bh)->b_data; key = nilfs_btree_node_get_key(node, 0); level = nilfs_btree_node_get_level(node); } else { key = nilfs_bmap_data_get_key(btree, *bh); level = NILFS_BTREE_LEVEL_DATA; } ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0); if (ret < 0) { WARN_ON(ret == -ENOENT); goto out; } ret = NILFS_BMAP_USE_VBN(btree) ? nilfs_btree_assign_v(btree, path, level, bh, blocknr, binfo) : nilfs_btree_assign_p(btree, path, level, bh, blocknr, binfo); out: nilfs_btree_free_path(path); return ret; } static int nilfs_btree_assign_gc(struct nilfs_bmap *btree, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { struct nilfs_btree_node *node; __u64 key; int ret; ret = nilfs_dat_move(nilfs_bmap_get_dat(btree), (*bh)->b_blocknr, blocknr); if (ret < 0) return ret; if (buffer_nilfs_node(*bh)) { node = (struct nilfs_btree_node *)(*bh)->b_data; key = nilfs_btree_node_get_key(node, 0); } else key = nilfs_bmap_data_get_key(btree, *bh); /* on-disk format */ binfo->bi_v.bi_vblocknr = cpu_to_le64((*bh)->b_blocknr); binfo->bi_v.bi_blkoff = cpu_to_le64(key); return 0; } static int nilfs_btree_mark(struct nilfs_bmap *btree, __u64 key, int level) { struct buffer_head *bh; struct nilfs_btree_path *path; __u64 ptr; int ret; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1, 0); if (ret < 0) { WARN_ON(ret == -ENOENT); goto out; } ret = nilfs_btree_get_block(btree, ptr, &bh); if (ret < 0) { WARN_ON(ret == -ENOENT); goto out; } if (!buffer_dirty(bh)) mark_buffer_dirty(bh); brelse(bh); if (!nilfs_bmap_dirty(btree)) nilfs_bmap_set_dirty(btree); out: nilfs_btree_free_path(path); return ret; } static const struct nilfs_bmap_operations nilfs_btree_ops = { .bop_lookup = nilfs_btree_lookup, .bop_lookup_contig = nilfs_btree_lookup_contig, .bop_insert = nilfs_btree_insert, .bop_delete = nilfs_btree_delete, .bop_clear = NULL, .bop_propagate = nilfs_btree_propagate, .bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers, .bop_assign = nilfs_btree_assign, .bop_mark = nilfs_btree_mark, .bop_last_key = nilfs_btree_last_key, .bop_check_insert = NULL, .bop_check_delete = nilfs_btree_check_delete, .bop_gather_data = nilfs_btree_gather_data, }; static const struct nilfs_bmap_operations nilfs_btree_ops_gc = { .bop_lookup = NULL, .bop_lookup_contig = NULL, .bop_insert = NULL, .bop_delete = NULL, .bop_clear = NULL, .bop_propagate = nilfs_btree_propagate_gc, .bop_lookup_dirty_buffers = nilfs_btree_lookup_dirty_buffers, .bop_assign = nilfs_btree_assign_gc, .bop_mark = NULL, .bop_last_key = NULL, .bop_check_insert = NULL, .bop_check_delete = NULL, .bop_gather_data = NULL, }; int nilfs_btree_init(struct nilfs_bmap *bmap) { bmap->b_ops = &nilfs_btree_ops; bmap->b_nchildren_per_block = NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap)); return 0; } void nilfs_btree_init_gc(struct nilfs_bmap *bmap) { bmap->b_ops = &nilfs_btree_ops_gc; bmap->b_nchildren_per_block = NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap)); }
gpl-2.0
UnknownzD/I9103_ICS_Kernel
drivers/auxdisplay/ks0108.c
13058
4622
/* * Filename: ks0108.c * Version: 0.1.0 * Description: ks0108 LCD Controller driver * License: GPLv2 * Depends: parport * * Author: Copyright (C) Miguel Ojeda Sandonis * Date: 2006-10-31 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/io.h> #include <linux/parport.h> #include <linux/uaccess.h> #include <linux/ks0108.h> #define KS0108_NAME "ks0108" /* * Module Parameters */ static unsigned int ks0108_port = CONFIG_KS0108_PORT; module_param(ks0108_port, uint, S_IRUGO); MODULE_PARM_DESC(ks0108_port, "Parallel port where the LCD is connected"); static unsigned int ks0108_delay = CONFIG_KS0108_DELAY; module_param(ks0108_delay, uint, S_IRUGO); MODULE_PARM_DESC(ks0108_delay, "Delay between each control writing (microseconds)"); /* * Device */ static struct parport *ks0108_parport; static struct pardevice *ks0108_pardevice; /* * ks0108 Exported Commands (don't lock) * * You _should_ lock in the top driver: This functions _should not_ * get race conditions in any way. Locking for each byte here would be * so slow and useless. * * There are not bit definitions because they are not flags, * just arbitrary combinations defined by the documentation for each * function in the ks0108 LCD controller. If you want to know what means * a specific combination, look at the function's name. * * The ks0108_writecontrol bits need to be reverted ^(0,1,3) because * the parallel port also revert them using a "not" logic gate. */ #define bit(n) (((unsigned char)1)<<(n)) void ks0108_writedata(unsigned char byte) { parport_write_data(ks0108_parport, byte); } void ks0108_writecontrol(unsigned char byte) { udelay(ks0108_delay); parport_write_control(ks0108_parport, byte ^ (bit(0) | bit(1) | bit(3))); } void ks0108_displaystate(unsigned char state) { ks0108_writedata((state ? bit(0) : 0) | bit(1) | bit(2) | bit(3) | bit(4) | bit(5)); } void ks0108_startline(unsigned char startline) { ks0108_writedata(min(startline,(unsigned char)63) | bit(6) | bit(7)); } void ks0108_address(unsigned char address) { ks0108_writedata(min(address,(unsigned char)63) | bit(6)); } void ks0108_page(unsigned char page) { ks0108_writedata(min(page,(unsigned char)7) | bit(3) | bit(4) | bit(5) | bit(7)); } EXPORT_SYMBOL_GPL(ks0108_writedata); EXPORT_SYMBOL_GPL(ks0108_writecontrol); EXPORT_SYMBOL_GPL(ks0108_displaystate); EXPORT_SYMBOL_GPL(ks0108_startline); EXPORT_SYMBOL_GPL(ks0108_address); EXPORT_SYMBOL_GPL(ks0108_page); /* * Is the module inited? */ static unsigned char ks0108_inited; unsigned char ks0108_isinited(void) { return ks0108_inited; } EXPORT_SYMBOL_GPL(ks0108_isinited); /* * Module Init & Exit */ static int __init ks0108_init(void) { int result; int ret = -EINVAL; ks0108_parport = parport_find_base(ks0108_port); if (ks0108_parport == NULL) { printk(KERN_ERR KS0108_NAME ": ERROR: " "parport didn't find %i port\n", ks0108_port); goto none; } ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME, NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL); if (ks0108_pardevice == NULL) { printk(KERN_ERR KS0108_NAME ": ERROR: " "parport didn't register new device\n"); goto none; } result = parport_claim(ks0108_pardevice); if (result != 0) { printk(KERN_ERR KS0108_NAME ": ERROR: " "can't claim %i parport, maybe in use\n", ks0108_port); ret = result; goto registered; } ks0108_inited = 1; return 0; registered: parport_unregister_device(ks0108_pardevice); none: return ret; } static void __exit ks0108_exit(void) { parport_release(ks0108_pardevice); parport_unregister_device(ks0108_pardevice); } module_init(ks0108_init); module_exit(ks0108_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>"); MODULE_DESCRIPTION("ks0108 LCD Controller driver");
gpl-2.0
Snuzzo/dlx_kernel
arch/cris/arch-v32/lib/memset.c
27906
7459
/* A memset for CRIS. Copyright (C) 1999-2005 Axis Communications. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of Axis Communications nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY AXIS COMMUNICATIONS AND ITS CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AXIS COMMUNICATIONS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* FIXME: This file should really only be used for reference, as the result is somewhat depending on gcc generating what we expect rather than what we describe. An assembly file should be used instead. */ /* Note the multiple occurrence of the expression "12*4", including the asm. It is hard to get it into the asm in a good way. Thus better to expose the problem everywhere: no macro. */ /* Assuming one cycle per dword written or read (ok, not really true; the world is not ideal), and one cycle per instruction, then 43+3*(n/48-1) <= 24+24*(n/48-1) so n >= 45.7; n >= 0.9; we win on the first full 48-byte block to set. */ #define MEMSET_BY_BLOCK_THRESHOLD (1 * 48) /* No name ambiguities in this file. */ __asm__ (".syntax no_register_prefix"); void *memset(void *pdst, int c, unsigned int plen) { /* Now we want the parameters in special registers. Make sure the compiler does something usable with this. */ register char *return_dst __asm__ ("r10") = pdst; register int n __asm__ ("r12") = plen; register int lc __asm__ ("r11") = c; /* Most apps use memset sanely. Memsetting about 3..4 bytes or less get penalized here compared to the generic implementation. */ /* This is fragile performancewise at best. Check with newer GCC releases, if they compile cascaded "x |= x << 8" to sane code. */ __asm__("movu.b %0,r13 \n\ lslq 8,r13 \n\ move.b %0,r13 \n\ move.d r13,%0 \n\ lslq 16,r13 \n\ or.d r13,%0" : "=r" (lc) /* Inputs. */ : "0" (lc) /* Outputs. */ : "r13"); /* Trash. */ { register char *dst __asm__ ("r13") = pdst; if (((unsigned long) pdst & 3) != 0 /* Oops! n = 0 must be a valid call, regardless of alignment. */ && n >= 3) { if ((unsigned long) dst & 1) { *dst = (char) lc; n--; dst++; } if ((unsigned long) dst & 2) { *(short *) dst = lc; n -= 2; dst += 2; } } /* Decide which setting method to use. */ if (n >= MEMSET_BY_BLOCK_THRESHOLD) { /* It is not optimal to tell the compiler about clobbering any registers; that will move the saving/restoring of those registers to the function prologue/epilogue, and make non-block sizes suboptimal. */ __asm__ volatile ("\ ;; GCC does promise correct register allocations, but let's \n\ ;; make sure it keeps its promises. \n\ .ifnc %0-%1-%4,$r13-$r12-$r11 \n\ .error \"GCC reg alloc bug: %0-%1-%4 != $r13-$r12-$r11\" \n\ .endif \n\ \n\ ;; Save the registers we'll clobber in the movem process \n\ ;; on the stack. Don't mention them to gcc, it will only be \n\ ;; upset. \n\ subq 11*4,sp \n\ movem r10,[sp] \n\ \n\ move.d r11,r0 \n\ move.d r11,r1 \n\ move.d r11,r2 \n\ move.d r11,r3 \n\ move.d r11,r4 \n\ move.d r11,r5 \n\ move.d r11,r6 \n\ move.d r11,r7 \n\ move.d r11,r8 \n\ move.d r11,r9 \n\ move.d r11,r10 \n\ \n\ ;; Now we've got this: \n\ ;; r13 - dst \n\ ;; r12 - n \n\ \n\ ;; Update n for the first loop \n\ subq 12*4,r12 \n\ 0: \n\ " #ifdef __arch_common_v10_v32 /* Cater to branch offset difference between v32 and v10. We assume the branch below has an 8-bit offset. */ " setf\n" #endif " subq 12*4,r12 \n\ bge 0b \n\ movem r11,[r13+] \n\ \n\ ;; Compensate for last loop underflowing n. \n\ addq 12*4,r12 \n\ \n\ ;; Restore registers from stack. \n\ movem [sp+],r10" /* Outputs. */ : "=r" (dst), "=r" (n) /* Inputs. */ : "0" (dst), "1" (n), "r" (lc)); } /* An ad-hoc unroll, used for 4*12-1..16 bytes. */ while (n >= 16) { *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; n -= 16; } switch (n) { case 0: break; case 1: *dst = (char) lc; break; case 2: *(short *) dst = (short) lc; break; case 3: *(short *) dst = (short) lc; dst += 2; *dst = (char) lc; break; case 4: *(long *) dst = lc; break; case 5: *(long *) dst = lc; dst += 4; *dst = (char) lc; break; case 6: *(long *) dst = lc; dst += 4; *(short *) dst = (short) lc; break; case 7: *(long *) dst = lc; dst += 4; *(short *) dst = (short) lc; dst += 2; *dst = (char) lc; break; case 8: *(long *) dst = lc; dst += 4; *(long *) dst = lc; break; case 9: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *dst = (char) lc; break; case 10: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(short *) dst = (short) lc; break; case 11: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(short *) dst = (short) lc; dst += 2; *dst = (char) lc; break; case 12: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(long *) dst = lc; break; case 13: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *dst = (char) lc; break; case 14: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(short *) dst = (short) lc; break; case 15: *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(long *) dst = lc; dst += 4; *(short *) dst = (short) lc; dst += 2; *dst = (char) lc; break; } } return return_dst; }
gpl-2.0
EpicCM/SPH-D700-Kernel
external/webkit/WebCore/platform/graphics/gtk/WebKitWebSourceGStreamer.cpp
3
23801
/* * Copyright (C) 2009 Sebastian Dröge <sebastian.droege@collabora.co.uk> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include "WebKitWebSourceGStreamer.h" #include "CString.h" #include "Document.h" #include "GOwnPtr.h" #include "GRefPtr.h" #include "Noncopyable.h" #include "NotImplemented.h" #include "ResourceHandleClient.h" #include "ResourceHandleInternal.h" #include "ResourceRequest.h" #include "ResourceResponse.h" #include <gst/app/gstappsrc.h> #include <gst/pbutils/missing-plugins.h> using namespace WebCore; class StreamingClient : public Noncopyable, public ResourceHandleClient { public: StreamingClient(WebKitWebSrc*); virtual ~StreamingClient(); virtual void willSendRequest(ResourceHandle*, ResourceRequest&, const ResourceResponse&); virtual void didReceiveResponse(ResourceHandle*, const ResourceResponse&); virtual void didReceiveData(ResourceHandle*, const char*, int, int); virtual void didFinishLoading(ResourceHandle*); virtual void didFail(ResourceHandle*, const ResourceError&); virtual void wasBlocked(ResourceHandle*); virtual void cannotShowURL(ResourceHandle*); private: WebKitWebSrc* m_src; }; #define WEBKIT_WEB_SRC_GET_PRIVATE(obj) (G_TYPE_INSTANCE_GET_PRIVATE((obj), WEBKIT_TYPE_WEB_SRC, WebKitWebSrcPrivate)) struct _WebKitWebSrcPrivate { GstAppSrc* appsrc; GstPad* srcpad; gchar* uri; RefPtr<WebCore::Frame> frame; StreamingClient* client; RefPtr<ResourceHandle> resourceHandle; guint64 offset; guint64 size; gboolean seekable; gboolean paused; guint64 requestedOffset; guint needDataID; guint enoughDataID; guint seekID; // icecast stuff gboolean iradioMode; gchar* iradioName; gchar* iradioGenre; gchar* iradioUrl; gchar* iradioTitle; }; enum { PROP_IRADIO_MODE = 1, PROP_IRADIO_NAME, PROP_IRADIO_GENRE, PROP_IRADIO_URL, PROP_IRADIO_TITLE }; static GstStaticPadTemplate srcTemplate = GST_STATIC_PAD_TEMPLATE("src", GST_PAD_SRC, GST_PAD_ALWAYS, GST_STATIC_CAPS_ANY); GST_DEBUG_CATEGORY_STATIC(webkit_web_src_debug); #define GST_CAT_DEFAULT webkit_web_src_debug static void webKitWebSrcUriHandlerInit(gpointer gIface, gpointer ifaceData); static void webKitWebSrcFinalize(GObject* object); static void webKitWebSrcSetProperty(GObject* object, guint propID, const GValue* value, GParamSpec* pspec); static void webKitWebSrcGetProperty(GObject* object, guint propID, GValue* value, GParamSpec* pspec); static GstStateChangeReturn webKitWebSrcChangeState(GstElement* element, GstStateChange transition); static void webKitWebSrcNeedDataCb(GstAppSrc* appsrc, guint length, gpointer userData); static void webKitWebSrcEnoughDataCb(GstAppSrc* appsrc, gpointer userData); static gboolean webKitWebSrcSeekDataCb(GstAppSrc* appsrc, guint64 offset, gpointer userData); static void webKitWebSrcStop(WebKitWebSrc* src, bool resetRequestedOffset); static GstAppSrcCallbacks appsrcCallbacks = { webKitWebSrcNeedDataCb, webKitWebSrcEnoughDataCb, webKitWebSrcSeekDataCb, { 0 } }; static void doInit(GType gtype) { static const GInterfaceInfo uriHandlerInfo = { webKitWebSrcUriHandlerInit, 0, 0 }; GST_DEBUG_CATEGORY_INIT(webkit_web_src_debug, "webkitwebsrc", 0, "websrc element"); g_type_add_interface_static(gtype, GST_TYPE_URI_HANDLER, &uriHandlerInfo); } GST_BOILERPLATE_FULL(WebKitWebSrc, webkit_web_src, GstBin, GST_TYPE_BIN, doInit); static void webkit_web_src_base_init(gpointer klass) { GstElementClass* eklass = GST_ELEMENT_CLASS(klass); gst_element_class_add_pad_template(eklass, gst_static_pad_template_get(&srcTemplate)); gst_element_class_set_details_simple(eklass, (gchar*) "WebKit Web source element", (gchar*) "Source", (gchar*) "Handles HTTP/HTTPS uris", (gchar*) "Sebastian Dröge <sebastian.droege@collabora.co.uk>"); } static void webkit_web_src_class_init(WebKitWebSrcClass* klass) { GObjectClass* oklass = G_OBJECT_CLASS(klass); GstElementClass* eklass = GST_ELEMENT_CLASS(klass); oklass->finalize = webKitWebSrcFinalize; oklass->set_property = webKitWebSrcSetProperty; oklass->get_property = webKitWebSrcGetProperty; // icecast stuff g_object_class_install_property(oklass, PROP_IRADIO_MODE, g_param_spec_boolean("iradio-mode", "iradio-mode", "Enable internet radio mode (extraction of shoutcast/icecast metadata)", FALSE, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); g_object_class_install_property(oklass, PROP_IRADIO_NAME, g_param_spec_string("iradio-name", "iradio-name", "Name of the stream", 0, (GParamFlags) (G_PARAM_READABLE | G_PARAM_STATIC_STRINGS))); g_object_class_install_property(oklass, PROP_IRADIO_GENRE, g_param_spec_string("iradio-genre", "iradio-genre", "Genre of the stream", 0, (GParamFlags) (G_PARAM_READABLE | G_PARAM_STATIC_STRINGS))); g_object_class_install_property(oklass, PROP_IRADIO_URL, g_param_spec_string("iradio-url", "iradio-url", "Homepage URL for radio stream", 0, (GParamFlags) (G_PARAM_READABLE | G_PARAM_STATIC_STRINGS))); g_object_class_install_property(oklass, PROP_IRADIO_TITLE, g_param_spec_string("iradio-title", "iradio-title", "Name of currently playing song", 0, (GParamFlags) (G_PARAM_READABLE | G_PARAM_STATIC_STRINGS))); eklass->change_state = webKitWebSrcChangeState; g_type_class_add_private(klass, sizeof(WebKitWebSrcPrivate)); } static void webkit_web_src_init(WebKitWebSrc* src, WebKitWebSrcClass* gKlass) { GstPadTemplate* padTemplate = gst_static_pad_template_get(&srcTemplate); GstPad* targetpad; WebKitWebSrcPrivate* priv = WEBKIT_WEB_SRC_GET_PRIVATE(src); src->priv = priv; priv->client = new StreamingClient(src); priv->srcpad = gst_ghost_pad_new_no_target_from_template("src", padTemplate); gst_element_add_pad(GST_ELEMENT(src), priv->srcpad); priv->appsrc = GST_APP_SRC(gst_element_factory_make("appsrc", 0)); if (!priv->appsrc) { GST_ERROR_OBJECT(src, "Failed to create appsrc"); return; } gst_bin_add(GST_BIN(src), GST_ELEMENT(priv->appsrc)); targetpad = gst_element_get_static_pad(GST_ELEMENT(priv->appsrc), "src"); gst_ghost_pad_set_target(GST_GHOST_PAD(priv->srcpad), targetpad); gst_object_unref(targetpad); gst_app_src_set_callbacks(priv->appsrc, &appsrcCallbacks, src, 0); gst_app_src_set_emit_signals(priv->appsrc, FALSE); gst_app_src_set_stream_type(priv->appsrc, GST_APP_STREAM_TYPE_SEEKABLE); // 512k is a abitrary number but we should choose a value // here to not pause/unpause the SoupMessage too often and // to make sure there's always some data available for // GStreamer to handle. gst_app_src_set_max_bytes(priv->appsrc, 512 * 1024); webKitWebSrcStop(src, true); } static void webKitWebSrcFinalize(GObject* object) { WebKitWebSrc* src = WEBKIT_WEB_SRC(object); WebKitWebSrcPrivate* priv = src->priv; delete priv->client; g_free(priv->uri); GST_CALL_PARENT(G_OBJECT_CLASS, finalize, ((GObject* )(src))); } static void webKitWebSrcSetProperty(GObject* object, guint propID, const GValue* value, GParamSpec* pspec) { WebKitWebSrc* src = WEBKIT_WEB_SRC(object); WebKitWebSrcPrivate* priv = src->priv; switch (propID) { case PROP_IRADIO_MODE: priv->iradioMode = g_value_get_boolean(value); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID(object, propID, pspec); break; } } static void webKitWebSrcGetProperty(GObject* object, guint propID, GValue* value, GParamSpec* pspec) { WebKitWebSrc* src = WEBKIT_WEB_SRC(object); WebKitWebSrcPrivate* priv = src->priv; switch (propID) { case PROP_IRADIO_MODE: g_value_set_boolean(value, priv->iradioMode); break; case PROP_IRADIO_NAME: g_value_set_string(value, priv->iradioName); break; case PROP_IRADIO_GENRE: g_value_set_string(value, priv->iradioGenre); break; case PROP_IRADIO_URL: g_value_set_string(value, priv->iradioUrl); break; case PROP_IRADIO_TITLE: g_value_set_string(value, priv->iradioTitle); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID(object, propID, pspec); break; } } static void webKitWebSrcStop(WebKitWebSrc* src, bool resetRequestedOffset) { WebKitWebSrcPrivate* priv = src->priv; if (priv->resourceHandle) { priv->resourceHandle->cancel(); priv->resourceHandle.release(); } priv->resourceHandle = 0; if (priv->frame) priv->frame.release(); if (priv->needDataID) g_source_remove(priv->needDataID); priv->needDataID = 0; if (priv->enoughDataID) g_source_remove(priv->enoughDataID); priv->enoughDataID = 0; if (priv->seekID) g_source_remove(priv->seekID); priv->seekID = 0; priv->paused = FALSE; g_free(priv->iradioName); priv->iradioName = 0; g_free(priv->iradioGenre); priv->iradioGenre = 0; g_free(priv->iradioUrl); priv->iradioUrl = 0; g_free(priv->iradioTitle); priv->iradioTitle = 0; if (priv->appsrc) gst_app_src_set_caps(priv->appsrc, 0); priv->offset = 0; priv->size = 0; priv->seekable = FALSE; if (resetRequestedOffset) priv->requestedOffset = 0; GST_DEBUG_OBJECT(src, "Stopped request"); } static bool webKitWebSrcStart(WebKitWebSrc* src) { WebKitWebSrcPrivate* priv = src->priv; if (!priv->uri) { GST_ERROR_OBJECT(src, "No URI provided"); return false; } KURL url = KURL(KURL(), priv->uri); ResourceRequest request(url); request.setTargetType(ResourceRequestBase::TargetIsMedia); request.setAllowCookies(true); // Let Apple web servers know we want to access their nice movie trailers. if (!g_ascii_strcasecmp("movies.apple.com", url.host().utf8().data())) request.setHTTPUserAgent("Quicktime/7.2.0"); if (priv->frame) { Document* document = priv->frame->document(); if (document) request.setHTTPReferrer(document->documentURI()); FrameLoader* loader = priv->frame->loader(); if (loader) loader->addExtraFieldsToSubresourceRequest(request); } if (priv->requestedOffset) { GOwnPtr<gchar> val; val.set(g_strdup_printf("bytes=%" G_GUINT64_FORMAT "-", priv->requestedOffset)); request.setHTTPHeaderField("Range", val.get()); } if (priv->iradioMode) request.setHTTPHeaderField("icy-metadata", "1"); // Needed to use DLNA streaming servers request.setHTTPHeaderField("transferMode.dlna", "Streaming"); priv->resourceHandle = ResourceHandle::create(request, priv->client, 0, false, false, false); if (!priv->resourceHandle) { GST_ERROR_OBJECT(src, "Failed to create ResourceHandle"); return false; } GST_DEBUG_OBJECT(src, "Started request"); return true; } static GstStateChangeReturn webKitWebSrcChangeState(GstElement* element, GstStateChange transition) { GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS; WebKitWebSrc* src = WEBKIT_WEB_SRC(element); WebKitWebSrcPrivate* priv = src->priv; switch (transition) { case GST_STATE_CHANGE_NULL_TO_READY: if (!priv->appsrc) { gst_element_post_message(element, gst_missing_element_message_new(element, "appsrc")); GST_ELEMENT_ERROR(src, CORE, MISSING_PLUGIN, (0), ("no appsrc")); return GST_STATE_CHANGE_FAILURE; } break; default: break; } ret = GST_ELEMENT_CLASS(parent_class)->change_state(element, transition); if (G_UNLIKELY(ret == GST_STATE_CHANGE_FAILURE)) { GST_DEBUG_OBJECT(src, "State change failed"); return ret; } switch (transition) { case GST_STATE_CHANGE_READY_TO_PAUSED: GST_DEBUG_OBJECT(src, "READY->PAUSED"); if (!webKitWebSrcStart(src)) ret = GST_STATE_CHANGE_FAILURE; break; case GST_STATE_CHANGE_PAUSED_TO_READY: GST_DEBUG_OBJECT(src, "PAUSED->READY"); webKitWebSrcStop(src, true); break; default: break; } return ret; } // uri handler interface static GstURIType webKitWebSrcUriGetType(void) { return GST_URI_SRC; } static gchar** webKitWebSrcGetProtocols(void) { static gchar* protocols[] = {(gchar*) "http", (gchar*) "https", 0 }; return protocols; } static const gchar* webKitWebSrcGetUri(GstURIHandler* handler) { WebKitWebSrc* src = WEBKIT_WEB_SRC(handler); WebKitWebSrcPrivate* priv = src->priv; return priv->uri; } static gboolean webKitWebSrcSetUri(GstURIHandler* handler, const gchar* uri) { WebKitWebSrc* src = WEBKIT_WEB_SRC(handler); WebKitWebSrcPrivate* priv = src->priv; if (GST_STATE(src) >= GST_STATE_PAUSED) { GST_ERROR_OBJECT(src, "URI can only be set in states < PAUSED"); return FALSE; } g_free(priv->uri); priv->uri = 0; if (!uri) return TRUE; SoupURI* soupUri = soup_uri_new(uri); if (!soupUri || !SOUP_URI_VALID_FOR_HTTP(soupUri)) { GST_ERROR_OBJECT(src, "Invalid URI '%s'", uri); soup_uri_free(soupUri); return FALSE; } priv->uri = soup_uri_to_string(soupUri, FALSE); soup_uri_free(soupUri); return TRUE; } static void webKitWebSrcUriHandlerInit(gpointer gIface, gpointer ifaceData) { GstURIHandlerInterface* iface = (GstURIHandlerInterface *) gIface; iface->get_type = webKitWebSrcUriGetType; iface->get_protocols = webKitWebSrcGetProtocols; iface->get_uri = webKitWebSrcGetUri; iface->set_uri = webKitWebSrcSetUri; } // appsrc callbacks static gboolean webKitWebSrcNeedDataMainCb(WebKitWebSrc* src) { WebKitWebSrcPrivate* priv = src->priv; ResourceHandleInternal* d = priv->resourceHandle->getInternal(); if (d->m_msg) soup_session_unpause_message(ResourceHandle::defaultSession(), d->m_msg); priv->paused = FALSE; priv->needDataID = 0; return FALSE; } static void webKitWebSrcNeedDataCb(GstAppSrc* appsrc, guint length, gpointer userData) { WebKitWebSrc* src = WEBKIT_WEB_SRC(userData); WebKitWebSrcPrivate* priv = src->priv; GST_DEBUG_OBJECT(src, "Need more data: %u", length); if (priv->needDataID || !priv->paused) return; priv->needDataID = g_timeout_add_full(G_PRIORITY_HIGH, 0, (GSourceFunc) webKitWebSrcNeedDataMainCb, gst_object_ref(src), (GDestroyNotify) gst_object_unref); } static gboolean webKitWebSrcEnoughDataMainCb(WebKitWebSrc* src) { WebKitWebSrcPrivate* priv = src->priv; ResourceHandleInternal* d = priv->resourceHandle->getInternal(); soup_session_pause_message(ResourceHandle::defaultSession(), d->m_msg); priv->paused = TRUE; priv->enoughDataID = 0; return FALSE; } static void webKitWebSrcEnoughDataCb(GstAppSrc* appsrc, gpointer userData) { WebKitWebSrc* src = WEBKIT_WEB_SRC(userData); WebKitWebSrcPrivate* priv = src->priv; GST_DEBUG_OBJECT(src, "Have enough data"); if (priv->enoughDataID || priv->paused) return; priv->enoughDataID = g_timeout_add_full(G_PRIORITY_HIGH, 0, (GSourceFunc) webKitWebSrcEnoughDataMainCb, gst_object_ref(src), (GDestroyNotify) gst_object_unref); } static gboolean webKitWebSrcSeekMainCb(WebKitWebSrc* src) { webKitWebSrcStop(src, false); webKitWebSrcStart(src); return FALSE; } static gboolean webKitWebSrcSeekDataCb(GstAppSrc* appsrc, guint64 offset, gpointer userData) { WebKitWebSrc* src = WEBKIT_WEB_SRC(userData); WebKitWebSrcPrivate* priv = src->priv; GST_DEBUG_OBJECT(src, "Seeking to offset: %" G_GUINT64_FORMAT, offset); if (offset == priv->offset) return TRUE; if (!priv->seekable) return FALSE; if (offset > priv->size) return FALSE; GST_DEBUG_OBJECT(src, "Doing range-request seek"); priv->requestedOffset = offset; if (priv->seekID) g_source_remove(priv->seekID); priv->seekID = g_timeout_add_full(G_PRIORITY_HIGH, 0, (GSourceFunc) webKitWebSrcSeekMainCb, gst_object_ref(src), (GDestroyNotify) gst_object_unref); return TRUE; } void webKitWebSrcSetFrame(WebKitWebSrc* src, WebCore::Frame* frame) { WebKitWebSrcPrivate* priv = src->priv; priv->frame = frame; } StreamingClient::StreamingClient(WebKitWebSrc* src) : m_src(src) { } StreamingClient::~StreamingClient() { } void StreamingClient::willSendRequest(ResourceHandle*, ResourceRequest&, const ResourceResponse&) { } void StreamingClient::didReceiveResponse(ResourceHandle*, const ResourceResponse& response) { WebKitWebSrcPrivate* priv = m_src->priv; GST_DEBUG_OBJECT(m_src, "Received response: %d", response.httpStatusCode()); // If we seeked we need 206 == PARTIAL_CONTENT if (priv->requestedOffset && response.httpStatusCode() != 206) { GST_ELEMENT_ERROR(m_src, RESOURCE, READ, (0), (0)); webKitWebSrcStop(m_src, true); return; } long long length = response.expectedContentLength(); if (length > 0) { length += priv->requestedOffset; gst_app_src_set_size(priv->appsrc, length); } priv->size = length >= 0 ? length : 0; priv->seekable = length > 0 && g_ascii_strcasecmp("none", response.httpHeaderField("Accept-Ranges").utf8().data()); // icecast stuff String value = response.httpHeaderField("icy-metaint"); if (!value.isEmpty()) { gchar* endptr = 0; gint64 icyMetaInt = g_ascii_strtoll(value.utf8().data(), &endptr, 10); if (endptr && *endptr == '\0' && icyMetaInt > 0) { GstCaps* caps = gst_caps_new_simple("application/x-icy", "metadata-interval", G_TYPE_INT, (gint) icyMetaInt, NULL); gst_app_src_set_caps(priv->appsrc, caps); gst_caps_unref(caps); } } GstTagList* tags = gst_tag_list_new(); value = response.httpHeaderField("icy-name"); if (!value.isEmpty()) { g_free(priv->iradioName); priv->iradioName = g_strdup(value.utf8().data()); g_object_notify(G_OBJECT(m_src), "iradio-name"); gst_tag_list_add(tags, GST_TAG_MERGE_REPLACE, GST_TAG_ORGANIZATION, priv->iradioName, NULL); } value = response.httpHeaderField("icy-genre"); if (!value.isEmpty()) { g_free(priv->iradioGenre); priv->iradioGenre = g_strdup(value.utf8().data()); g_object_notify(G_OBJECT(m_src), "iradio-genre"); gst_tag_list_add(tags, GST_TAG_MERGE_REPLACE, GST_TAG_GENRE, priv->iradioGenre, NULL); } value = response.httpHeaderField("icy-url"); if (!value.isEmpty()) { g_free(priv->iradioUrl); priv->iradioUrl = g_strdup(value.utf8().data()); g_object_notify(G_OBJECT(m_src), "iradio-url"); gst_tag_list_add(tags, GST_TAG_MERGE_REPLACE, GST_TAG_LOCATION, priv->iradioUrl, NULL); } value = response.httpHeaderField("icy-title"); if (!value.isEmpty()) { g_free(priv->iradioTitle); priv->iradioTitle = g_strdup(value.utf8().data()); g_object_notify(G_OBJECT(m_src), "iradio-title"); gst_tag_list_add(tags, GST_TAG_MERGE_REPLACE, GST_TAG_TITLE, priv->iradioTitle, NULL); } if (gst_tag_list_is_empty(tags)) gst_tag_list_free(tags); else gst_element_found_tags_for_pad(GST_ELEMENT(m_src), m_src->priv->srcpad, tags); } void StreamingClient::didReceiveData(ResourceHandle* handle, const char* data, int length, int lengthReceived) { WebKitWebSrcPrivate* priv = m_src->priv; GST_LOG_OBJECT(m_src, "Have %d bytes of data", length); if (priv->seekID || handle != priv->resourceHandle) { GST_DEBUG_OBJECT(m_src, "Seek in progress, ignoring data"); return; } GstBuffer* buffer = gst_buffer_new_and_alloc(length); memcpy(GST_BUFFER_DATA(buffer), data, length); GST_BUFFER_OFFSET(buffer) = priv->offset; priv->offset += length; GST_BUFFER_OFFSET_END(buffer) = priv->offset; GstFlowReturn ret = gst_app_src_push_buffer(priv->appsrc, buffer); if (ret != GST_FLOW_OK && ret != GST_FLOW_UNEXPECTED) GST_ELEMENT_ERROR(m_src, CORE, FAILED, (0), (0)); } void StreamingClient::didFinishLoading(ResourceHandle*) { GST_DEBUG_OBJECT(m_src, "Have EOS"); gst_app_src_end_of_stream(m_src->priv->appsrc); } void StreamingClient::didFail(ResourceHandle*, const ResourceError& error) { GST_ERROR_OBJECT(m_src, "Have failure: %s", error.localizedDescription().utf8().data()); GST_ELEMENT_ERROR(m_src, RESOURCE, FAILED, ("%s", error.localizedDescription().utf8().data()), (0)); gst_app_src_end_of_stream(m_src->priv->appsrc); } void StreamingClient::wasBlocked(ResourceHandle*) { } void StreamingClient::cannotShowURL(ResourceHandle*) { }
gpl-2.0
edoko/Air_Kernel-Mako
drivers/staging/prima/CORE/SME/src/ccm/ccmApi.c
3
33988
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ #include "palTypes.h" #include "wniApi.h" /* WNI_CFG_SET_REQ */ #include "sirParams.h" /* tSirMbMsg */ #include "smsDebug.h" /* smsLog */ #include "cfgApi.h" #include "ccmApi.h" #include "logDump.h" //#define CCM_DEBUG #undef CCM_DEBUG #define CCM_DEBUG2 //#undef CCM_DEBUG2 #define CFGOBJ_ALIGNTO 4 #define CFGOBJ_ALIGN(len) ( ((len)+CFGOBJ_ALIGNTO-1) & ~(CFGOBJ_ALIGNTO-1) ) #define CFGOBJ_ID_SIZE 4 /* 4 bytes for cfgId */ #define CFGOBJ_LEN_SIZE 4 /* 4 bytes for length */ #define CFGOBJ_INTEGER_VALUE_SIZE 4 /* 4 bytes for integer value */ #define CFG_UPDATE_MAGIC_DWORD 0xabab #define halHandle2HddHandle(hHal) ( (NULL == (hHal)) ? 0 : ((tpAniSirGlobal)(hHal))->hHdd ) static void ccmComplete(tHddHandle hHdd, void *done) { if (done) { (void)palSemaphoreGive(hHdd, done); } } static void ccmWaitForCompletion(tHddHandle hHdd, void *done) { if (done) { (void)palSemaphoreTake(hHdd, done); } } static tANI_U32 * encodeCfgReq(tHddHandle hHdd, tANI_U32 *pl, tANI_U32 cfgId, tANI_S32 length, void *pBuf, tANI_U32 value, tANI_U32 type) { *pl++ = pal_cpu_to_be32(cfgId) ; *pl++ = pal_cpu_to_be32(length) ; if (type == CCM_INTEGER_TYPE) { *pl++ = pal_cpu_to_be32(value) ; } else { palCopyMemory(hHdd, (void *)pl, (void *)pBuf, length); pl += (CFGOBJ_ALIGN(length) / CFGOBJ_ALIGNTO); } return pl ; } /* * CCM_STRING_TYPE CCM_INTEGER_TYPE * |<-------- 4 ----->| |<-------- 4 ----->| * +----------+ <-- msg --> +----------+ * |type | |type | * +----------+ +----------+ * |msgLen=24 | |msgLen=16 | * +----------+----------+ +----------+----------+ * | cfgId | | cfgId | * +---------------------+ +---------------------+ * | length=11 | | length=4 | * +---------------------+ +---------------------+ * | | | value | * | | +---------------------+ * | | * | +----+ * | |////| <- padding to 4-byte boundary * +----------------+----+ */ static eHalStatus sendCfg(tpAniSirGlobal pMac, tHddHandle hHdd, tCfgReq *req, tANI_BOOLEAN fRsp) { tSirMbMsg *msg; eHalStatus status; tANI_S16 msgLen = (tANI_U16)(4 + /* 4 bytes for msg header */ CFGOBJ_ID_SIZE + CFGOBJ_LEN_SIZE + CFGOBJ_ALIGN(req->length)) ; status = palAllocateMemory(hHdd, (void **)&msg, msgLen); if (status == eHAL_STATUS_SUCCESS) { if( fRsp ) { msg->type = pal_cpu_to_be16(WNI_CFG_SET_REQ); } else { msg->type = pal_cpu_to_be16(WNI_CFG_SET_REQ_NO_RSP); } msg->msgLen = pal_cpu_to_be16(msgLen); (void)encodeCfgReq(hHdd, msg->data, req->cfgId, req->length, req->ccmPtr, req->ccmValue, req->type) ; status = palSendMBMessage(hHdd, msg) ; if (status != eHAL_STATUS_SUCCESS) { smsLog( pMac, LOGW, FL("palSendMBMessage() failed\n")); //No need to free msg. palSendMBMessage frees it. status = eHAL_STATUS_FAILURE ; } } else { smsLog( pMac, LOGW, FL("palAllocateMemory(len=%d)\n"), msgLen ); } return status ; } static tCfgReq * allocateCfgReq(tHddHandle hHdd, tANI_U32 type, tANI_S32 length) { tCfgReq *req ; tANI_S32 alloc_len = sizeof(tCfgReq) ; if (type == CCM_STRING_TYPE) { alloc_len += length ; } if (palAllocateMemory(hHdd, (void **)&req, alloc_len) != eHAL_STATUS_SUCCESS) { return NULL ; } req->ccmPtr = (req+1); return req ; } static void freeCfgReq(tHddHandle hHdd, tCfgReq *req) { palFreeMemory(hHdd, (void*)req) ; } static void add_req_tail(tCfgReq *req, struct ccmlink *q) { if (q->tail) { q->tail->next = req; q->tail = req ; } else { q->head = q->tail = req ; } } static void del_req(tCfgReq *req, struct ccmlink *q) { q->head = req->next ; req->next = NULL ; if (q->head == NULL) { q->tail = NULL ; } } static void purgeReqQ(tHalHandle hHal) { tHddHandle hHdd = halHandle2HddHandle(hHal); tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); tCfgReq *req, *tmp ; for (req = pMac->ccm.reqQ.head; req; req = tmp) { /* loop thru reqQ and invoke callback to return failure */ smsLog(pMac, LOGW, FL("deleting cfgReq, cfgid=%d\n"), (int)req->cfgId); tmp = req->next ; if (req->callback) { req->callback(hHal, eHAL_STATUS_FAILURE); } palSpinLockTake(hHdd, pMac->ccm.lock); del_req(req, &pMac->ccm.reqQ); palSpinLockGive(hHdd, pMac->ccm.lock); freeCfgReq(hHdd, req); } return ; } static void sendQueuedReqToMacSw(tpAniSirGlobal pMac, tHddHandle hHdd) { tCfgReq *req ; /* Send the head req */ req = pMac->ccm.reqQ.head ; if (req) { if (req->state == eCCM_REQ_QUEUED) { /* Send WNI_CFG_SET_REQ */ req->state = eCCM_REQ_SENT; if (sendCfg(pMac, hHdd, req, eANI_BOOLEAN_TRUE) != eHAL_STATUS_SUCCESS) { smsLog( pMac, LOGW, FL("sendCfg() failed\n")); palSpinLockTake(hHdd, pMac->ccm.lock); del_req(req, &pMac->ccm.reqQ) ; palSpinLockGive(hHdd, pMac->ccm.lock); if (req->callback) { req->callback((tHalHandle)pMac, WNI_CFG_OTHER_ERROR) ; } #ifdef CCM_DEBUG smsLog(pMac, LOGW, FL("ccmComplete(%p)\n"), req->done); #endif ccmComplete(hHdd, req->done); freeCfgReq(hHdd, req); } } else { smsLog( pMac, LOGW, FL("reqState is not eCCM_REQ_QUEUED, is %d\n"), req->state ); } } return ; } static eHalStatus cfgSetSub(tpAniSirGlobal pMac, tHddHandle hHdd, tANI_U32 cfgId, tANI_U32 type, tANI_S32 length, void *ccmPtr, tANI_U32 ccmValue, tCcmCfgSetCallback callback, eAniBoolean toBeSaved, void *sem, tCfgReq **r) { eHalStatus status = eHAL_STATUS_SUCCESS; tCfgReq *req ; do { *r = NULL ; if (pMac->ccm.state == eCCM_STOPPED) { status = eHAL_STATUS_FAILURE ; break ; } req = allocateCfgReq(hHdd, type, length); if (req == NULL) { status = eHAL_STATUS_FAILED_ALLOC ; break ; } req->next = NULL ; req->cfgId = (tANI_U16)cfgId ; req->type = (tANI_U8)type ; req->state = eCCM_REQ_QUEUED ; req->toBeSaved = !!toBeSaved ; req->length = length ; req->done = sem ; req->callback = callback ; if (type == CCM_INTEGER_TYPE) { req->ccmValue = ccmValue ; } else { palCopyMemory(hHdd, (void*)req->ccmPtr, (void*)ccmPtr, length); } palSpinLockTake(hHdd, pMac->ccm.lock); add_req_tail(req, &pMac->ccm.reqQ); /* If this is the first req on the queue, send it to MAC SW */ if ((pMac->ccm.replay.started == 0) && (pMac->ccm.reqQ.head == req)) { /* Send WNI_CFG_SET_REQ */ req->state = eCCM_REQ_SENT; palSpinLockGive(hHdd, pMac->ccm.lock); status = sendCfg(pMac, hHdd, req, eANI_BOOLEAN_TRUE) ; if (status != eHAL_STATUS_SUCCESS) { smsLog( pMac, LOGW, FL("sendCfg() failed\n")); palSpinLockTake(hHdd, pMac->ccm.lock); del_req(req, &pMac->ccm.reqQ); palSpinLockGive(hHdd, pMac->ccm.lock); freeCfgReq(hHdd, req); break ; } else { palSpinLockTake(hHdd, pMac->ccm.lock); if(req != pMac->ccm.reqQ.head) { //We send the request and it must be done already req = NULL; } palSpinLockGive(hHdd, pMac->ccm.lock); } } else { palSpinLockGive(hHdd, pMac->ccm.lock); } *r = req ; } while(0) ; return status; } static eHalStatus cfgSet(tHalHandle hHal, tANI_U32 cfgId, tANI_U32 type, tANI_S32 length, void * ccmPtr, tANI_U32 ccmValue, tCcmCfgSetCallback callback, eAniBoolean toBeSaved) { tHddHandle hHdd = halHandle2HddHandle(hHal); tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); eHalStatus status; tCfgReq *req ; if (pal_in_interrupt()) { #ifdef CCM_DEBUG2 smsLog(pMac, LOG1, FL("WNI_CFG_%s (%d 0x%x), in_interrupt()=TRUE"), gCfgParamName[cfgId], (int)cfgId, (int)cfgId); #endif status = cfgSetSub(pMac, hHdd, cfgId, type, length, ccmPtr, ccmValue, callback, toBeSaved, NULL, &req); } else { void *sem ; #ifdef CCM_DEBUG2 smsLog(pMac, LOG1, FL("WNI_CFG_%s (%d 0x%x), in_interrupt()=FALSE"), gCfgParamName[cfgId], (int)cfgId, (int)cfgId); #endif pal_local_bh_disable() ; status = palMutexAllocLocked( hHdd, &sem ) ; if (status != eHAL_STATUS_SUCCESS) { smsLog(pMac, LOGE, FL("mutex alloc failed\n")); sem = NULL; } else { status = cfgSetSub(pMac, hHdd, cfgId, type, length, ccmPtr, ccmValue, callback, toBeSaved, sem, &req); if ((status != eHAL_STATUS_SUCCESS) || (req == NULL)) { //Either it fails to send or the req is finished already palSemaphoreFree( hHdd, sem ); sem = NULL; } } pal_local_bh_enable() ; if ((status == eHAL_STATUS_SUCCESS) && (sem != NULL)) { #ifdef CCM_DEBUG smsLog(pMac, LOG1, FL("ccmWaitForCompletion(%p)"), req->done); #endif ccmWaitForCompletion(hHdd, sem); #ifdef CCM_DEBUG smsLog(pMac, LOG1, FL("free(%p)"), req->done); #endif palSemaphoreFree( hHdd, sem ) ; } } return status ; } eHalStatus ccmOpen(tHalHandle hHal) { tHddHandle hHdd = halHandle2HddHandle(hHal); tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); (void)palZeroMemory(hHdd, &pMac->ccm, sizeof(tCcm)) ; return palSpinLockAlloc(hHdd, &pMac->ccm.lock); } eHalStatus ccmClose(tHalHandle hHal) { tHddHandle hHdd = halHandle2HddHandle(hHal); tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); tANI_U32 i ; tCfgReq *req ; ccmStop(hHal); /* Go thru comp[] to free all saved requests */ for (i = 0 ; i < CFG_PARAM_MAX_NUM ; ++i) { if ((req = pMac->ccm.comp[i]) != NULL) { freeCfgReq(hHdd, req); } } return palSpinLockFree(hHdd, pMac->ccm.lock); } /* This function executes in (Linux) softirq context */ void ccmCfgCnfMsgHandler(tHalHandle hHal, void *m) { tHddHandle hHdd = halHandle2HddHandle(hHal); tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); tSirMbMsg *msg = (tSirMbMsg *)m ; tANI_U32 result, cfgId ; tCfgReq *req, *old ; #if 0 if (pMac->ccm.state != eCCM_STARTED) { return ; } #endif result = pal_be32_to_cpu(msg->data[0]); cfgId = pal_be32_to_cpu(msg->data[1]); if (pMac->ccm.replay.started && cfgId == CFG_UPDATE_MAGIC_DWORD) { pMac->ccm.replay.in_progress = 1 ; return ; } if (pMac->ccm.replay.in_progress) { /* save error code */ if (!CCM_IS_RESULT_SUCCESS(result)) { pMac->ccm.replay.result = result ; } if (--pMac->ccm.replay.nr_param == 0) { pMac->ccm.replay.in_progress = 0 ; if (pMac->ccm.replay.callback) { pMac->ccm.replay.callback(hHal, pMac->ccm.replay.result); } pMac->ccm.replay.started = 0 ; /* Wake up the sleeping process */ #ifdef CCM_DEBUG smsLog(pMac, LOGW, FL("ccmComplete(%p)\n"), pMac->ccm.replay.done); #endif ccmComplete(hHdd, pMac->ccm.replay.done); //Let go with the rest of the set CFGs waiting. sendQueuedReqToMacSw(pMac, hHdd); } } else { /* * Try to match this response with the request. * What if i could not find the req entry ??? */ req = pMac->ccm.reqQ.head ; if (req) { if (req->cfgId == cfgId && req->state == eCCM_REQ_SENT) { palSpinLockTake(hHdd, pMac->ccm.lock); del_req(req, &pMac->ccm.reqQ); palSpinLockGive(hHdd, pMac->ccm.lock); req->state = eCCM_REQ_DONE ; if (result == WNI_CFG_NEED_RESTART || result == WNI_CFG_NEED_RELOAD) { #ifdef CCM_DEBUG smsLog(pMac, LOGW, FL("need restart/reload, cfgId=%d\n"), req->cfgId) ; #endif //purgeReqQ(hHal); } /* invoke callback */ if (req->callback) { #ifdef CCM_DEBUG req->callback(hHal, cfgId) ; #else req->callback(hHal, result) ; #endif } /* Wake up the sleeping process */ #ifdef CCM_DEBUG smsLog(pMac, LOGW, FL("cfgId=%ld, calling ccmComplete(%p)\n"), cfgId, req->done); #endif ccmComplete(hHdd, req->done); /* move the completed req from reqQ to comp[] */ if (req->toBeSaved && (CCM_IS_RESULT_SUCCESS(result))) { if ((old = pMac->ccm.comp[cfgId]) != NULL) { freeCfgReq(hHdd, old) ; } pMac->ccm.comp[cfgId] = req ; } else { freeCfgReq(hHdd, req) ; } sendQueuedReqToMacSw(pMac, hHdd); } else { smsLog( pMac, LOGW, FL("can not match RSP with REQ, rspcfgid=%d result=%d reqcfgid=%d reqstate=%d\n"), (int)cfgId, (int)result, req->cfgId, req->state); #ifdef CCM_DEBUG smsLog(pMac, LOGW, FL("ccmComplete(%p)\n"), req->done); #endif } } } return ; } void ccmStart(tHalHandle hHal) { tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); pMac->ccm.state = eCCM_STARTED ; #if defined(ANI_LOGDUMP) ccmDumpInit(hHal); #endif //#if defined(ANI_LOGDUMP) return ; } void ccmStop(tHalHandle hHal) { tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); pMac->ccm.state = eCCM_STOPPED ; pal_local_bh_disable() ; purgeReqQ(hHal); pal_local_bh_enable() ; return ; } eHalStatus ccmCfgSetInt(tHalHandle hHal, tANI_U32 cfgId, tANI_U32 ccmValue, tCcmCfgSetCallback callback, eAniBoolean toBeSaved) { if( callback || toBeSaved) { //we need to sychronous this one return cfgSet(hHal, cfgId, CCM_INTEGER_TYPE, sizeof(tANI_U32), NULL, ccmValue, callback, toBeSaved); } else { //Simply push to CFG and not waiting for the response tCfgReq req; tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); req.callback = NULL; req.next = NULL; req.cfgId = ( tANI_U16 )cfgId; req.length = sizeof( tANI_U32 ); req.type = CCM_INTEGER_TYPE; req.ccmPtr = NULL; req.ccmValue = ccmValue; req.toBeSaved = toBeSaved; req.state = eCCM_REQ_SENT; return ( sendCfg( pMac, pMac->hHdd, &req, eANI_BOOLEAN_FALSE ) ); } } eHalStatus ccmCfgSetStr(tHalHandle hHal, tANI_U32 cfgId, tANI_U8 *pStr, tANI_U32 length, tCcmCfgSetCallback callback, eAniBoolean toBeSaved) { if( callback || toBeSaved ) { //we need to sychronous this one return cfgSet(hHal, cfgId, CCM_STRING_TYPE, length, pStr, 0, callback, toBeSaved); } else { //Simply push to CFG and not waiting for the response tCfgReq req; tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); req.callback = NULL; req.next = NULL; req.cfgId = ( tANI_U16 )cfgId; req.length = length; req.type = CCM_STRING_TYPE; req.ccmPtr = pStr; req.ccmValue = 0; req.toBeSaved = toBeSaved; req.state = eCCM_REQ_SENT; return ( sendCfg( pMac, pMac->hHdd, &req, eANI_BOOLEAN_FALSE ) ); } } eHalStatus ccmCfgGetInt(tHalHandle hHal, tANI_U32 cfgId, tANI_U32 *pValue) { tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); eHalStatus status = eHAL_STATUS_SUCCESS ; tCfgReq *req = pMac->ccm.comp[cfgId] ; if (req && req->state == eCCM_REQ_DONE) { *pValue = req->ccmValue ; } else { if (wlan_cfgGetInt(pMac, (tANI_U16)cfgId, pValue) != eSIR_SUCCESS) status = eHAL_STATUS_FAILURE; } return status ; } eHalStatus ccmCfgGetStr(tHalHandle hHal, tANI_U32 cfgId, tANI_U8 *pBuf, tANI_U32 *pLength) { tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); tHddHandle hHdd; eHalStatus status = eHAL_STATUS_SUCCESS ; tCfgReq *req; if (!pMac) return eHAL_STATUS_FAILURE; hHdd = halHandle2HddHandle(hHal); req = pMac->ccm.comp[cfgId] ; if (req && req->state == eCCM_REQ_DONE && (tANI_U32)req->length <= *pLength) { *pLength = req->length ; palCopyMemory(hHdd, (void*)pBuf, (void*)req->ccmPtr, req->length); } else { if (wlan_cfgGetStr(pMac, (tANI_U16)cfgId, pBuf, pLength) != eSIR_SUCCESS) status = eHAL_STATUS_FAILURE; } return status ; } /* * Loop thru comp[] and form an ANI message which contains all completed cfgIds. * The message begins with an INTEGER parameter (cfgId=CFG_UPDATE_MAGIC_DWORD) * to mark the start of the message. */ static eHalStatus cfgUpdate(tpAniSirGlobal pMac, tHddHandle hHdd, tCcmCfgSetCallback callback) { tANI_U32 i, *pl ; tCfgReq *req ; tSirMbMsg *msg ; eHalStatus status ; tANI_S16 msgLen = 4 + /* 4 bytes for msg header */ /* for CFG_UPDATE_MAGIC_DWORD */ CFGOBJ_ID_SIZE + CFGOBJ_LEN_SIZE + CFGOBJ_INTEGER_VALUE_SIZE ; if (pMac->ccm.state == eCCM_STOPPED || pMac->ccm.replay.started) { status = eHAL_STATUS_FAILURE ; goto end ; } palSpinLockTake(hHdd, pMac->ccm.lock); pMac->ccm.replay.started = 1 ; pMac->ccm.replay.nr_param = 0 ; palSpinLockGive(hHdd, pMac->ccm.lock); /* Calculate message length */ for (i = 0 ; i < CFG_PARAM_MAX_NUM ; ++i) { if ((req = pMac->ccm.comp[i]) != NULL) { msgLen += (tANI_S16)(CFGOBJ_ID_SIZE + CFGOBJ_LEN_SIZE + CFGOBJ_ALIGN(req->length)) ; pMac->ccm.replay.nr_param += 1 ; #ifdef CCM_DEBUG smsLog(pMac, LOGW, FL("cfgId=%d\n"), req->cfgId); #endif } } if (pMac->ccm.replay.nr_param == 0) { if (callback) { callback((tHalHandle)pMac, WNI_CFG_SUCCESS) ; } status = eHAL_STATUS_SUCCESS ; goto end ; } pMac->ccm.replay.in_progress = 0 ; pMac->ccm.replay.result = WNI_CFG_SUCCESS ; pMac->ccm.replay.callback = callback ; pMac->ccm.replay.done = NULL ; status = palAllocateMemory(hHdd, (void **)&msg, msgLen) ; if (status != eHAL_STATUS_SUCCESS) { pMac->ccm.replay.started = 0 ; goto end ; } msg->type = pal_cpu_to_be16(WNI_CFG_SET_REQ); msg->msgLen = pal_cpu_to_be16(msgLen); /* Encode the starting cfgId */ pl = encodeCfgReq(hHdd, msg->data, CFG_UPDATE_MAGIC_DWORD, 4, NULL, 0, CCM_INTEGER_TYPE) ; /* Encode the saved cfg requests */ for (i = 0 ; i < CFG_PARAM_MAX_NUM ; ++i) { if ((req = pMac->ccm.comp[i]) != NULL) { pl = encodeCfgReq(hHdd, pl, req->cfgId, req->length, req->ccmPtr, req->ccmValue, req->type) ; } } status = palSendMBMessage(hHdd, msg) ; if (status != eHAL_STATUS_SUCCESS) { smsLog(pMac, LOGW, FL("palSendMBMessage() failed. status=%d\n"), status); pMac->ccm.replay.started = 0 ; //No need to free msg. palSendMBMessage frees it. goto end ; } end: return status ; } eHalStatus ccmCfgUpdate(tHalHandle hHal, tCcmCfgSetCallback callback) { tHddHandle hHdd = halHandle2HddHandle(hHal); tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); eHalStatus status ; pal_local_bh_disable() ; status = cfgUpdate(pMac, hHdd, callback) ; if (status == eHAL_STATUS_SUCCESS) { if (pMac->ccm.replay.nr_param == 0) { /* there is nothing saved at comp[], so we are done! */ pMac->ccm.replay.started = 0 ; } else { /* we have sent update message to MAC SW */ void *sem ; status = palMutexAllocLocked( hHdd, &sem ) ; if (status != eHAL_STATUS_SUCCESS) { smsLog(pMac, LOGE, FL("mutex alloc failed\n")); pMac->ccm.replay.started = 0 ; } else { pMac->ccm.replay.done = sem ; } } } pal_local_bh_enable() ; /* Waiting here ... */ if (status == eHAL_STATUS_SUCCESS && pMac->ccm.replay.done) { #ifdef CCM_DEBUG smsLog(pMac, LOGW, FL("ccmWaitForCompletion(%p)\n"), pMac->ccm.replay.done); #endif ccmWaitForCompletion(hHdd, pMac->ccm.replay.done); #ifdef CCM_DEBUG smsLog(pMac, LOGW, FL("free(%p)\n"), pMac->ccm.replay.done); #endif palSemaphoreFree( hHdd, pMac->ccm.replay.done) ; } return status ; } #if 0 /////////////////////////////////////////////////////////////////// #include <linux/netdevice.h> #include <linux/delay.h> extern struct net_device * hdd_dev[]; typedef struct pal_netdev_priv_s { // pointer to the PCI device structure for this device struct pci_dev *pci_dev; // TAURUS has three BAR registers // BAR0 is a fixed window for the PIF registers volatile void *win0_addr; tANI_U32 win0_size; // BAR1 is a movable window for all other registers volatile void *win1_addr; tANI_U32 win1_size; tANI_U32 win1_current; // BAR2 is a movable window for all other memory volatile void *win2_addr; tANI_U32 win2_size; tANI_U32 win2_current; } pal_netdev_priv_t; typedef struct hdd_stats_s { /* Stats on the MAC SW messages sent to applications */ ulong stats_mac_rx_mbx_tot_cnt; ulong stats_mac_rx_mbx_success_cnt; ulong stats_mac_rx_mbx_drop_cnt; /* Stats on messages sent to the MAC SW messages from applications */ ulong stats_mac_tx_mbx_tot_cnt; ulong stats_mac_tx_mbx_success_cnt; ulong stats_mac_tx_mbx_drop_cnt; } hdd_stats_t; /* HDD Driver Private Data structure */ typedef struct hdd_netdev_priv_s { tANI_U32 magic_head; // chipset-specific private data pal_netdev_priv_t ANI_CHIPSET; tHalHandle halHandle; #ifdef ANI_BUS_TYPE_PCI struct pci_dev *pci_dev; struct pci_device_id * pPciDevId; #endif // ANI_BUS_TYPE_PCI // Queued EAPOL frame destination. tANI_U32 eapol_pid; unsigned int num_xmit; /* * Various frequently used variables that pertain to this * instance of the driver */ tANI_U32 rx_buf_sz; /* Based on MTU+extra headroom needed.*/ tANI_U32 td_enqueue_nested; /* * Flag set by MAC SW to indicate a TD ring is desired */ int td_flush; int selectiveFlush; spinlock_t lock; /* Stats */ struct net_device_stats stats; int curr_acc_cat; tANI_U16 lport; /* switch logical port */ /* management and control */ tANI_U32 status; tANI_U32 msg_enable; tANI_U32 radio_id; /* Unit # of this device */ int ap_flag; /* * indicates if the Radio is in AP * or BP mode. */ /* NAPI Polling suport */ struct timer_list oom_timer; /* Out of memory timer. */ struct timer_list reset_req_timer;/* * Timer started when a Reset * request is sent to WSM. Cleared * when a subsequent Radio Disable * Request is received. */ struct tasklet_struct *rx_tasklet; struct tasklet_struct *tx_tasklet; tANI_U32 learn_mode_frame_cnt; /* * Data Frames forwarded to MAC SW * when Polaris is in learn mode */ tANI_U32 mgmt_ctl_frames; /* MGMT/CTL Frames forwarded to MAC SW */ tANI_U32 nir; /* total number of times the ISR has * been invoked.*/ tANI_U32 stats_dummy_pkt_requeue_cnt; tANI_U32 stats_rx_td_dummy_pkt_cnt;/* TD Dummy pkts that were after HIF loopback */ tANI_U32 stats_rx_tm_dummy_pkt_cnt;/* TM Dummy pkts that were after HIF loopback */ tANI_U32 stats_td_dummy_pkt_cnt; /* Dummy pkts that were succesfully * put on the TD ring and that * were picked up by the HIF */ tANI_U32 stats_mac_dummy_pkt_drop_cnt;/* Number of dummy pkts dropped by the HDD * due to any reason */ tANI_U32 stats_wns_l2update_cnt; tANI_U32 stats_app_hif_wr_pkt_cnt; hdd_stats_t hdd_stats; tANI_U32 stats_reset_req_timer_cnt; /* * Number of times the * Reset Req Timer expired */ #ifdef TCP_PROFILE unsigned int pv_current_ip_proto; unsigned int pv_current_ip_byte; unsigned int pv_current_ack_seq; unsigned int pv_current_seq; unsigned int pv_rtt; unsigned int pv_sent_seq; unsigned int pv_p_ts; unsigned int pv_tfpl_ts; #endif tANI_U32 stats_mac_reset_cnt; /* MAC SW Reset Requests */ tANI_U32 stats_mac_reset_eof_sof; tANI_U32 stats_mac_reset_bmu; tANI_U32 stats_mac_reset_pdu_low; tANI_U32 stats_mac_reset_user; tANI_U32 stats_mac_reset_wd_timeout; tANI_U32 stats_mac_reset_unspecified; tANI_U32 stats_wd_timeout_cnt; tANI_U32 stats_radio_enable_cnt; tANI_U32 stats_radio_disable_cnt; #ifdef PLM_EXTRA_STATS tANI_U32 stats_tx_xmit_refilled; /* Pkts xmit-filled */ tANI_U32 stats_tx_queue_stop; tANI_U32 stats_tx_queue_start; tANI_U32 stats_alloc_fail; tANI_U32 stats_poll_starts; tANI_U32 stats_poll_pkts; tANI_U32 stats_poll_exit_done; tANI_U32 stats_poll_exit_not_done; tANI_U32 stats_poll_exit_oom; tANI_U32 stats_poll_exit_done_rx_pending; tANI_U32 stats_poll_zero_rx; #ifdef CONFIG_PROC_FS #ifdef ANI_USE_TASKLET struct proc_dir_entry *proc_driver_dir; // for /proc/net/drivers #endif struct proc_dir_entry *proc_ent_dir; // for the directory itself struct proc_dir_entry *proc_ent_stats; struct proc_dir_entry *proc_ent_np_dump; struct proc_dir_entry *proc_ent_ring; char proc_fname_stats[32]; char proc_fname_np_dump[32]; char proc_fname_ring[32]; /* Setting Debug levels */ struct proc_dir_entry * proc_ent_dbg; char proc_fname_dbg[32]; /* For bypass flags */ struct proc_dir_entry * proc_ent_bypass; char proc_fname_bypass[32]; int sir_dump_cmd; // Dump SIR command int sir_dump_arg1; // Dump SIR argument 1 int sir_dump_arg2; // Dump SIR argument 2 int sir_dump_arg3; // Dump SIR argument 3 int sir_dump_arg4; // Dump SIR argument 4 struct proc_dir_entry * proc_ent_sir_dump; char proc_fname_sir_dump[32]; eHalStatus status ; struct proc_dir_entry * proc_ent_eeprom_info; char proc_fname_eeprom_info[32]; #endif /* ifdef CONFIG_PROC_FS */ tANI_U32 rx_small_skb_failure; unsigned long open_time; /* jiffies for last open */ #endif /* PLM_EXTRA_STATS */ int mac_down; tANI_U32 rx_mac_msg_cnt; tANI_U32 tx_mac_msg_cnt; int mbx_sent; tANI_U32 bypass; // Used to various types of bypasses // in the driver /* * this table is initialized once for all by poldrv and so is not in * mac_param struct */ t_mac_block_table * mac_block_table; struct sk_buff_head mac_list; tANI_U32 magic_tail; } hdd_netdev_priv_t; static void ccm_callback(tHalHandle hHal, tANI_S32 cfgId) { tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); smsLog(pMac, LOGW, FL("cfgId = %d\n"), cfgId); } static void ccm_callback2(tHalHandle hHal, tANI_S32 result) { tpAniSirGlobal pMac = PMAC_STRUCT( hHal ); smsLog(pMac, LOGW, FL("result = %d\n"), result); } void ccm_test(void) { tHalHandle hHal ; tpAniSirGlobal pMac; eHalStatus status ; struct net_device *dev; hdd_netdev_priv_t *np; char str[80] = {1} ; dev = hdd_dev[0]; np = (hdd_netdev_priv_t *)dev->priv; hHal = np->halHandle ; pMac = PMAC_STRUCT( hHal ); smsLog(pMac, LOGW, "ccmStart()\n"); ccmStart(hHal) ; status = ccmCfgUpdate(hHal, ccm_callback2) ; smsLog(pMac, LOGW, "ccmCfgUpdate(): status=%d\n", status); status = ccmCfgSetInt(hHal, WNI_CFG_FRAGMENTATION_THRESHOLD, 100, ccm_callback, 1) ; smsLog(pMac, LOGW, "ccmCfgSetInt(WNI_CFG_FRAGMENTATION_THRESHOLD = %d): status=%d\n", WNI_CFG_FRAGMENTATION_THRESHOLD, status); status = ccmCfgSetInt(hHal, WNI_CFG_RTS_THRESHOLD, 100, ccm_callback, 1) ; smsLog(pMac, LOGW, "ccmCfgSetInt(WNI_CFG_RTS_THRESHOLD = %d): status=%d\n", WNI_CFG_RTS_THRESHOLD, status); /* this cfgid causes reload */ status = ccmCfgSetInt(hHal, WNI_CFG_MIMO_ENABLED, 1, ccm_callback, 1) ; smsLog(pMac, LOGW, "ccmCfgSetInt(WNI_CFG_MIMO_ENABLED = %d): status=%d\n", WNI_CFG_MIMO_ENABLED, status); status = ccmCfgSetInt(hHal, WNI_CFG_SHORT_RETRY_LIMIT, 100, ccm_callback, 1) ; smsLog(pMac, LOGW, "ccmCfgSetInt(WNI_CFG_SHORT_RETRY_LIMIT = %d): status=%d\n", WNI_CFG_SHORT_RETRY_LIMIT, status); status = ccmCfgSetInt(hHal, WNI_CFG_LONG_RETRY_LIMIT, 100, ccm_callback, 1) ; smsLog(pMac, LOGW, "ccmCfgSetInt(WNI_CFG_LONG_RETRY_LIMIT = %d): status=%d\n", WNI_CFG_LONG_RETRY_LIMIT, status); /* this cfgid causes restart */ status = ccmCfgSetStr(hHal, WNI_CFG_EDCA_WME_ACVI, str, sizeof(str), ccm_callback, 1) ; smsLog(pMac, LOGW, "ccmCfgSetStr(WNI_CFG_EDCA_WME_ACVI = %d): status=%d\n", WNI_CFG_EDCA_WME_ACVI, status); mdelay(100); smsLog(pMac, LOGW, "ccmStop()\n"); ccmStop(hHal); status = ccmCfgUpdate(hHal, ccm_callback2) ; smsLog(pMac, LOGW, "ccmCfgUpdate(): status=%d\n", status); smsLog(pMac, LOGW, "ccmStart()\n"); ccmStart(hHal) ; status = ccmCfgUpdate(hHal, ccm_callback2) ; smsLog(pMac, LOGW, "ccmCfgUpdate(): status=%d\n", status); } #endif
gpl-2.0
suryasingh/git
wt-status.c
3
43559
#include "cache.h" #include "pathspec.h" #include "wt-status.h" #include "object.h" #include "dir.h" #include "commit.h" #include "diff.h" #include "revision.h" #include "diffcore.h" #include "quote.h" #include "run-command.h" #include "argv-array.h" #include "remote.h" #include "refs.h" #include "submodule.h" #include "column.h" #include "strbuf.h" #include "utf8.h" static char cut_line[] = "------------------------ >8 ------------------------\n"; static char default_wt_status_colors[][COLOR_MAXLEN] = { GIT_COLOR_NORMAL, /* WT_STATUS_HEADER */ GIT_COLOR_GREEN, /* WT_STATUS_UPDATED */ GIT_COLOR_RED, /* WT_STATUS_CHANGED */ GIT_COLOR_RED, /* WT_STATUS_UNTRACKED */ GIT_COLOR_RED, /* WT_STATUS_NOBRANCH */ GIT_COLOR_RED, /* WT_STATUS_UNMERGED */ GIT_COLOR_GREEN, /* WT_STATUS_LOCAL_BRANCH */ GIT_COLOR_RED, /* WT_STATUS_REMOTE_BRANCH */ GIT_COLOR_NIL, /* WT_STATUS_ONBRANCH */ }; static const char *color(int slot, struct wt_status *s) { const char *c = ""; if (want_color(s->use_color)) c = s->color_palette[slot]; if (slot == WT_STATUS_ONBRANCH && color_is_nil(c)) c = s->color_palette[WT_STATUS_HEADER]; return c; } static void status_vprintf(struct wt_status *s, int at_bol, const char *color, const char *fmt, va_list ap, const char *trail) { struct strbuf sb = STRBUF_INIT; struct strbuf linebuf = STRBUF_INIT; const char *line, *eol; strbuf_vaddf(&sb, fmt, ap); if (!sb.len) { if (s->display_comment_prefix) { strbuf_addch(&sb, comment_line_char); if (!trail) strbuf_addch(&sb, ' '); } color_print_strbuf(s->fp, color, &sb); if (trail) fprintf(s->fp, "%s", trail); strbuf_release(&sb); return; } for (line = sb.buf; *line; line = eol + 1) { eol = strchr(line, '\n'); strbuf_reset(&linebuf); if (at_bol && s->display_comment_prefix) { strbuf_addch(&linebuf, comment_line_char); if (*line != '\n' && *line != '\t') strbuf_addch(&linebuf, ' '); } if (eol) strbuf_add(&linebuf, line, eol - line); else strbuf_addstr(&linebuf, line); color_print_strbuf(s->fp, color, &linebuf); if (eol) fprintf(s->fp, "\n"); else break; at_bol = 1; } if (trail) fprintf(s->fp, "%s", trail); strbuf_release(&linebuf); strbuf_release(&sb); } void status_printf_ln(struct wt_status *s, const char *color, const char *fmt, ...) { va_list ap; va_start(ap, fmt); status_vprintf(s, 1, color, fmt, ap, "\n"); va_end(ap); } void status_printf(struct wt_status *s, const char *color, const char *fmt, ...) { va_list ap; va_start(ap, fmt); status_vprintf(s, 1, color, fmt, ap, NULL); va_end(ap); } static void status_printf_more(struct wt_status *s, const char *color, const char *fmt, ...) { va_list ap; va_start(ap, fmt); status_vprintf(s, 0, color, fmt, ap, NULL); va_end(ap); } void wt_status_prepare(struct wt_status *s) { unsigned char sha1[20]; memset(s, 0, sizeof(*s)); memcpy(s->color_palette, default_wt_status_colors, sizeof(default_wt_status_colors)); s->show_untracked_files = SHOW_NORMAL_UNTRACKED_FILES; s->use_color = -1; s->relative_paths = 1; s->branch = resolve_refdup("HEAD", sha1, 0, NULL); s->reference = "HEAD"; s->fp = stdout; s->index_file = get_index_file(); s->change.strdup_strings = 1; s->untracked.strdup_strings = 1; s->ignored.strdup_strings = 1; s->show_branch = -1; /* unspecified */ s->display_comment_prefix = 0; } static void wt_status_print_unmerged_header(struct wt_status *s) { int i; int del_mod_conflict = 0; int both_deleted = 0; int not_deleted = 0; const char *c = color(WT_STATUS_HEADER, s); status_printf_ln(s, c, _("Unmerged paths:")); for (i = 0; i < s->change.nr; i++) { struct string_list_item *it = &(s->change.items[i]); struct wt_status_change_data *d = it->util; switch (d->stagemask) { case 0: break; case 1: both_deleted = 1; break; case 3: case 5: del_mod_conflict = 1; break; default: not_deleted = 1; break; } } if (!s->hints) return; if (s->whence != FROM_COMMIT) ; else if (!s->is_initial) status_printf_ln(s, c, _(" (use \"git reset %s <file>...\" to unstage)"), s->reference); else status_printf_ln(s, c, _(" (use \"git rm --cached <file>...\" to unstage)")); if (!both_deleted) { if (!del_mod_conflict) status_printf_ln(s, c, _(" (use \"git add <file>...\" to mark resolution)")); else status_printf_ln(s, c, _(" (use \"git add/rm <file>...\" as appropriate to mark resolution)")); } else if (!del_mod_conflict && !not_deleted) { status_printf_ln(s, c, _(" (use \"git rm <file>...\" to mark resolution)")); } else { status_printf_ln(s, c, _(" (use \"git add/rm <file>...\" as appropriate to mark resolution)")); } status_printf_ln(s, c, ""); } static void wt_status_print_cached_header(struct wt_status *s) { const char *c = color(WT_STATUS_HEADER, s); status_printf_ln(s, c, _("Changes to be committed:")); if (!s->hints) return; if (s->whence != FROM_COMMIT) ; /* NEEDSWORK: use "git reset --unresolve"??? */ else if (!s->is_initial) status_printf_ln(s, c, _(" (use \"git reset %s <file>...\" to unstage)"), s->reference); else status_printf_ln(s, c, _(" (use \"git rm --cached <file>...\" to unstage)")); status_printf_ln(s, c, ""); } static void wt_status_print_dirty_header(struct wt_status *s, int has_deleted, int has_dirty_submodules) { const char *c = color(WT_STATUS_HEADER, s); status_printf_ln(s, c, _("Changes not staged for commit:")); if (!s->hints) return; if (!has_deleted) status_printf_ln(s, c, _(" (use \"git add <file>...\" to update what will be committed)")); else status_printf_ln(s, c, _(" (use \"git add/rm <file>...\" to update what will be committed)")); status_printf_ln(s, c, _(" (use \"git checkout -- <file>...\" to discard changes in working directory)")); if (has_dirty_submodules) status_printf_ln(s, c, _(" (commit or discard the untracked or modified content in submodules)")); status_printf_ln(s, c, ""); } static void wt_status_print_other_header(struct wt_status *s, const char *what, const char *how) { const char *c = color(WT_STATUS_HEADER, s); status_printf_ln(s, c, "%s:", what); if (!s->hints) return; status_printf_ln(s, c, _(" (use \"git %s <file>...\" to include in what will be committed)"), how); status_printf_ln(s, c, ""); } static void wt_status_print_trailer(struct wt_status *s) { status_printf_ln(s, color(WT_STATUS_HEADER, s), ""); } #define quote_path quote_path_relative static void wt_status_print_unmerged_data(struct wt_status *s, struct string_list_item *it) { const char *c = color(WT_STATUS_UNMERGED, s); struct wt_status_change_data *d = it->util; struct strbuf onebuf = STRBUF_INIT; const char *one, *how = _("bug"); one = quote_path(it->string, s->prefix, &onebuf); status_printf(s, color(WT_STATUS_HEADER, s), "\t"); switch (d->stagemask) { case 1: how = _("both deleted:"); break; case 2: how = _("added by us:"); break; case 3: how = _("deleted by them:"); break; case 4: how = _("added by them:"); break; case 5: how = _("deleted by us:"); break; case 6: how = _("both added:"); break; case 7: how = _("both modified:"); break; } status_printf_more(s, c, "%-20s%s\n", how, one); strbuf_release(&onebuf); } static const char *wt_status_diff_status_string(int status) { switch (status) { case DIFF_STATUS_ADDED: return _("new file"); case DIFF_STATUS_COPIED: return _("copied"); case DIFF_STATUS_DELETED: return _("deleted"); case DIFF_STATUS_MODIFIED: return _("modified"); case DIFF_STATUS_RENAMED: return _("renamed"); case DIFF_STATUS_TYPE_CHANGED: return _("typechange"); case DIFF_STATUS_UNKNOWN: return _("unknown"); case DIFF_STATUS_UNMERGED: return _("unmerged"); default: return NULL; } } static void wt_status_print_change_data(struct wt_status *s, int change_type, struct string_list_item *it) { struct wt_status_change_data *d = it->util; const char *c = color(change_type, s); int status; char *one_name; char *two_name; const char *one, *two; struct strbuf onebuf = STRBUF_INIT, twobuf = STRBUF_INIT; struct strbuf extra = STRBUF_INIT; static char *padding; const char *what; int len; if (!padding) { int width = 0; /* If DIFF_STATUS_* uses outside this range, we're in trouble */ for (status = 'A'; status <= 'Z'; status++) { what = wt_status_diff_status_string(status); len = what ? strlen(what) : 0; if (len > width) width = len; } width += 2; /* colon and a space */ padding = xmallocz(width); memset(padding, ' ', width); } one_name = two_name = it->string; switch (change_type) { case WT_STATUS_UPDATED: status = d->index_status; if (d->head_path) one_name = d->head_path; break; case WT_STATUS_CHANGED: if (d->new_submodule_commits || d->dirty_submodule) { strbuf_addstr(&extra, " ("); if (d->new_submodule_commits) strbuf_addf(&extra, _("new commits, ")); if (d->dirty_submodule & DIRTY_SUBMODULE_MODIFIED) strbuf_addf(&extra, _("modified content, ")); if (d->dirty_submodule & DIRTY_SUBMODULE_UNTRACKED) strbuf_addf(&extra, _("untracked content, ")); strbuf_setlen(&extra, extra.len - 2); strbuf_addch(&extra, ')'); } status = d->worktree_status; break; default: die("BUG: unhandled change_type %d in wt_status_print_change_data", change_type); } one = quote_path(one_name, s->prefix, &onebuf); two = quote_path(two_name, s->prefix, &twobuf); status_printf(s, color(WT_STATUS_HEADER, s), "\t"); what = wt_status_diff_status_string(status); if (!what) die(_("bug: unhandled diff status %c"), status); /* 1 for colon, which is not part of "what" */ len = strlen(padding) - (utf8_strwidth(what) + 1); assert(len >= 0); if (status == DIFF_STATUS_COPIED || status == DIFF_STATUS_RENAMED) status_printf_more(s, c, "%s:%.*s%s -> %s", what, len, padding, one, two); else status_printf_more(s, c, "%s:%.*s%s", what, len, padding, one); if (extra.len) { status_printf_more(s, color(WT_STATUS_HEADER, s), "%s", extra.buf); strbuf_release(&extra); } status_printf_more(s, GIT_COLOR_NORMAL, "\n"); strbuf_release(&onebuf); strbuf_release(&twobuf); } static void wt_status_collect_changed_cb(struct diff_queue_struct *q, struct diff_options *options, void *data) { struct wt_status *s = data; int i; if (!q->nr) return; s->workdir_dirty = 1; for (i = 0; i < q->nr; i++) { struct diff_filepair *p; struct string_list_item *it; struct wt_status_change_data *d; p = q->queue[i]; it = string_list_insert(&s->change, p->one->path); d = it->util; if (!d) { d = xcalloc(1, sizeof(*d)); it->util = d; } if (!d->worktree_status) d->worktree_status = p->status; d->dirty_submodule = p->two->dirty_submodule; if (S_ISGITLINK(p->two->mode)) d->new_submodule_commits = !!hashcmp(p->one->sha1, p->two->sha1); } } static int unmerged_mask(const char *path) { int pos, mask; const struct cache_entry *ce; pos = cache_name_pos(path, strlen(path)); if (0 <= pos) return 0; mask = 0; pos = -pos-1; while (pos < active_nr) { ce = active_cache[pos++]; if (strcmp(ce->name, path) || !ce_stage(ce)) break; mask |= (1 << (ce_stage(ce) - 1)); } return mask; } static void wt_status_collect_updated_cb(struct diff_queue_struct *q, struct diff_options *options, void *data) { struct wt_status *s = data; int i; for (i = 0; i < q->nr; i++) { struct diff_filepair *p; struct string_list_item *it; struct wt_status_change_data *d; p = q->queue[i]; it = string_list_insert(&s->change, p->two->path); d = it->util; if (!d) { d = xcalloc(1, sizeof(*d)); it->util = d; } if (!d->index_status) d->index_status = p->status; switch (p->status) { case DIFF_STATUS_COPIED: case DIFF_STATUS_RENAMED: d->head_path = xstrdup(p->one->path); break; case DIFF_STATUS_UNMERGED: d->stagemask = unmerged_mask(p->two->path); break; } } } static void wt_status_collect_changes_worktree(struct wt_status *s) { struct rev_info rev; init_revisions(&rev, NULL); setup_revisions(0, NULL, &rev, NULL); rev.diffopt.output_format |= DIFF_FORMAT_CALLBACK; DIFF_OPT_SET(&rev.diffopt, DIRTY_SUBMODULES); if (!s->show_untracked_files) DIFF_OPT_SET(&rev.diffopt, IGNORE_UNTRACKED_IN_SUBMODULES); if (s->ignore_submodule_arg) { DIFF_OPT_SET(&rev.diffopt, OVERRIDE_SUBMODULE_CONFIG); handle_ignore_submodules_arg(&rev.diffopt, s->ignore_submodule_arg); } rev.diffopt.format_callback = wt_status_collect_changed_cb; rev.diffopt.format_callback_data = s; copy_pathspec(&rev.prune_data, &s->pathspec); run_diff_files(&rev, 0); } static void wt_status_collect_changes_index(struct wt_status *s) { struct rev_info rev; struct setup_revision_opt opt; init_revisions(&rev, NULL); memset(&opt, 0, sizeof(opt)); opt.def = s->is_initial ? EMPTY_TREE_SHA1_HEX : s->reference; setup_revisions(0, NULL, &rev, &opt); if (s->ignore_submodule_arg) { DIFF_OPT_SET(&rev.diffopt, OVERRIDE_SUBMODULE_CONFIG); handle_ignore_submodules_arg(&rev.diffopt, s->ignore_submodule_arg); } rev.diffopt.output_format |= DIFF_FORMAT_CALLBACK; rev.diffopt.format_callback = wt_status_collect_updated_cb; rev.diffopt.format_callback_data = s; rev.diffopt.detect_rename = 1; rev.diffopt.rename_limit = 200; rev.diffopt.break_opt = 0; copy_pathspec(&rev.prune_data, &s->pathspec); run_diff_index(&rev, 1); } static void wt_status_collect_changes_initial(struct wt_status *s) { int i; for (i = 0; i < active_nr; i++) { struct string_list_item *it; struct wt_status_change_data *d; const struct cache_entry *ce = active_cache[i]; if (!ce_path_match(ce, &s->pathspec, NULL)) continue; it = string_list_insert(&s->change, ce->name); d = it->util; if (!d) { d = xcalloc(1, sizeof(*d)); it->util = d; } if (ce_stage(ce)) { d->index_status = DIFF_STATUS_UNMERGED; d->stagemask |= (1 << (ce_stage(ce) - 1)); } else d->index_status = DIFF_STATUS_ADDED; } } static void wt_status_collect_untracked(struct wt_status *s) { int i; struct dir_struct dir; struct timeval t_begin; if (!s->show_untracked_files) return; if (advice_status_u_option) gettimeofday(&t_begin, NULL); memset(&dir, 0, sizeof(dir)); if (s->show_untracked_files != SHOW_ALL_UNTRACKED_FILES) dir.flags |= DIR_SHOW_OTHER_DIRECTORIES | DIR_HIDE_EMPTY_DIRECTORIES; if (s->show_ignored_files) dir.flags |= DIR_SHOW_IGNORED_TOO; setup_standard_excludes(&dir); fill_directory(&dir, &s->pathspec); for (i = 0; i < dir.nr; i++) { struct dir_entry *ent = dir.entries[i]; if (cache_name_is_other(ent->name, ent->len) && dir_path_match(ent, &s->pathspec, 0, NULL)) string_list_insert(&s->untracked, ent->name); free(ent); } for (i = 0; i < dir.ignored_nr; i++) { struct dir_entry *ent = dir.ignored[i]; if (cache_name_is_other(ent->name, ent->len) && dir_path_match(ent, &s->pathspec, 0, NULL)) string_list_insert(&s->ignored, ent->name); free(ent); } free(dir.entries); free(dir.ignored); clear_directory(&dir); if (advice_status_u_option) { struct timeval t_end; gettimeofday(&t_end, NULL); s->untracked_in_ms = (uint64_t)t_end.tv_sec * 1000 + t_end.tv_usec / 1000 - ((uint64_t)t_begin.tv_sec * 1000 + t_begin.tv_usec / 1000); } } void wt_status_collect(struct wt_status *s) { wt_status_collect_changes_worktree(s); if (s->is_initial) wt_status_collect_changes_initial(s); else wt_status_collect_changes_index(s); wt_status_collect_untracked(s); } static void wt_status_print_unmerged(struct wt_status *s) { int shown_header = 0; int i; for (i = 0; i < s->change.nr; i++) { struct wt_status_change_data *d; struct string_list_item *it; it = &(s->change.items[i]); d = it->util; if (!d->stagemask) continue; if (!shown_header) { wt_status_print_unmerged_header(s); shown_header = 1; } wt_status_print_unmerged_data(s, it); } if (shown_header) wt_status_print_trailer(s); } static void wt_status_print_updated(struct wt_status *s) { int shown_header = 0; int i; for (i = 0; i < s->change.nr; i++) { struct wt_status_change_data *d; struct string_list_item *it; it = &(s->change.items[i]); d = it->util; if (!d->index_status || d->index_status == DIFF_STATUS_UNMERGED) continue; if (!shown_header) { wt_status_print_cached_header(s); s->commitable = 1; shown_header = 1; } wt_status_print_change_data(s, WT_STATUS_UPDATED, it); } if (shown_header) wt_status_print_trailer(s); } /* * -1 : has delete * 0 : no change * 1 : some change but no delete */ static int wt_status_check_worktree_changes(struct wt_status *s, int *dirty_submodules) { int i; int changes = 0; *dirty_submodules = 0; for (i = 0; i < s->change.nr; i++) { struct wt_status_change_data *d; d = s->change.items[i].util; if (!d->worktree_status || d->worktree_status == DIFF_STATUS_UNMERGED) continue; if (!changes) changes = 1; if (d->dirty_submodule) *dirty_submodules = 1; if (d->worktree_status == DIFF_STATUS_DELETED) changes = -1; } return changes; } static void wt_status_print_changed(struct wt_status *s) { int i, dirty_submodules; int worktree_changes = wt_status_check_worktree_changes(s, &dirty_submodules); if (!worktree_changes) return; wt_status_print_dirty_header(s, worktree_changes < 0, dirty_submodules); for (i = 0; i < s->change.nr; i++) { struct wt_status_change_data *d; struct string_list_item *it; it = &(s->change.items[i]); d = it->util; if (!d->worktree_status || d->worktree_status == DIFF_STATUS_UNMERGED) continue; wt_status_print_change_data(s, WT_STATUS_CHANGED, it); } wt_status_print_trailer(s); } static void wt_status_print_submodule_summary(struct wt_status *s, int uncommitted) { struct child_process sm_summary; char summary_limit[64]; char index[PATH_MAX]; const char *env[] = { NULL, NULL }; struct argv_array argv = ARGV_ARRAY_INIT; struct strbuf cmd_stdout = STRBUF_INIT; struct strbuf summary = STRBUF_INIT; char *summary_content; size_t len; sprintf(summary_limit, "%d", s->submodule_summary); snprintf(index, sizeof(index), "GIT_INDEX_FILE=%s", s->index_file); env[0] = index; argv_array_push(&argv, "submodule"); argv_array_push(&argv, "summary"); argv_array_push(&argv, uncommitted ? "--files" : "--cached"); argv_array_push(&argv, "--for-status"); argv_array_push(&argv, "--summary-limit"); argv_array_push(&argv, summary_limit); if (!uncommitted) argv_array_push(&argv, s->amend ? "HEAD^" : "HEAD"); memset(&sm_summary, 0, sizeof(sm_summary)); sm_summary.argv = argv.argv; sm_summary.env = env; sm_summary.git_cmd = 1; sm_summary.no_stdin = 1; fflush(s->fp); sm_summary.out = -1; run_command(&sm_summary); argv_array_clear(&argv); len = strbuf_read(&cmd_stdout, sm_summary.out, 1024); /* prepend header, only if there's an actual output */ if (len) { if (uncommitted) strbuf_addstr(&summary, _("Submodules changed but not updated:")); else strbuf_addstr(&summary, _("Submodule changes to be committed:")); strbuf_addstr(&summary, "\n\n"); } strbuf_addbuf(&summary, &cmd_stdout); strbuf_release(&cmd_stdout); if (s->display_comment_prefix) { summary_content = strbuf_detach(&summary, &len); strbuf_add_commented_lines(&summary, summary_content, len); free(summary_content); } fputs(summary.buf, s->fp); strbuf_release(&summary); } static void wt_status_print_other(struct wt_status *s, struct string_list *l, const char *what, const char *how) { int i; struct strbuf buf = STRBUF_INIT; static struct string_list output = STRING_LIST_INIT_DUP; struct column_options copts; if (!l->nr) return; wt_status_print_other_header(s, what, how); for (i = 0; i < l->nr; i++) { struct string_list_item *it; const char *path; it = &(l->items[i]); path = quote_path(it->string, s->prefix, &buf); if (column_active(s->colopts)) { string_list_append(&output, path); continue; } status_printf(s, color(WT_STATUS_HEADER, s), "\t"); status_printf_more(s, color(WT_STATUS_UNTRACKED, s), "%s\n", path); } strbuf_release(&buf); if (!column_active(s->colopts)) goto conclude; strbuf_addf(&buf, "%s%s\t%s", color(WT_STATUS_HEADER, s), s->display_comment_prefix ? "#" : "", color(WT_STATUS_UNTRACKED, s)); memset(&copts, 0, sizeof(copts)); copts.padding = 1; copts.indent = buf.buf; if (want_color(s->use_color)) copts.nl = GIT_COLOR_RESET "\n"; print_columns(&output, s->colopts, &copts); string_list_clear(&output, 0); strbuf_release(&buf); conclude: status_printf_ln(s, GIT_COLOR_NORMAL, ""); } void wt_status_truncate_message_at_cut_line(struct strbuf *buf) { const char *p; struct strbuf pattern = STRBUF_INIT; strbuf_addf(&pattern, "%c %s", comment_line_char, cut_line); p = strstr(buf->buf, pattern.buf); if (p && (p == buf->buf || p[-1] == '\n')) strbuf_setlen(buf, p - buf->buf); strbuf_release(&pattern); } static void wt_status_print_verbose(struct wt_status *s) { struct rev_info rev; struct setup_revision_opt opt; init_revisions(&rev, NULL); DIFF_OPT_SET(&rev.diffopt, ALLOW_TEXTCONV); memset(&opt, 0, sizeof(opt)); opt.def = s->is_initial ? EMPTY_TREE_SHA1_HEX : s->reference; setup_revisions(0, NULL, &rev, &opt); rev.diffopt.output_format |= DIFF_FORMAT_PATCH; rev.diffopt.detect_rename = 1; rev.diffopt.file = s->fp; rev.diffopt.close_file = 0; /* * If we're not going to stdout, then we definitely don't * want color, since we are going to the commit message * file (and even the "auto" setting won't work, since it * will have checked isatty on stdout). But we then do want * to insert the scissor line here to reliably remove the * diff before committing. */ if (s->fp != stdout) { const char *explanation = _("Do not touch the line above.\nEverything below will be removed."); struct strbuf buf = STRBUF_INIT; rev.diffopt.use_color = 0; fprintf(s->fp, "%c %s", comment_line_char, cut_line); strbuf_add_commented_lines(&buf, explanation, strlen(explanation)); fputs(buf.buf, s->fp); strbuf_release(&buf); } run_diff_index(&rev, 1); } static void wt_status_print_tracking(struct wt_status *s) { struct strbuf sb = STRBUF_INIT; const char *cp, *ep; struct branch *branch; char comment_line_string[3]; int i; assert(s->branch && !s->is_initial); if (!starts_with(s->branch, "refs/heads/")) return; branch = branch_get(s->branch + 11); if (!format_tracking_info(branch, &sb)) return; i = 0; if (s->display_comment_prefix) { comment_line_string[i++] = comment_line_char; comment_line_string[i++] = ' '; } comment_line_string[i] = '\0'; for (cp = sb.buf; (ep = strchr(cp, '\n')) != NULL; cp = ep + 1) color_fprintf_ln(s->fp, color(WT_STATUS_HEADER, s), "%s%.*s", comment_line_string, (int)(ep - cp), cp); if (s->display_comment_prefix) color_fprintf_ln(s->fp, color(WT_STATUS_HEADER, s), "%c", comment_line_char); else fprintf_ln(s->fp, ""); } static int has_unmerged(struct wt_status *s) { int i; for (i = 0; i < s->change.nr; i++) { struct wt_status_change_data *d; d = s->change.items[i].util; if (d->stagemask) return 1; } return 0; } static void show_merge_in_progress(struct wt_status *s, struct wt_status_state *state, const char *color) { if (has_unmerged(s)) { status_printf_ln(s, color, _("You have unmerged paths.")); if (s->hints) status_printf_ln(s, color, _(" (fix conflicts and run \"git commit\")")); } else { status_printf_ln(s, color, _("All conflicts fixed but you are still merging.")); if (s->hints) status_printf_ln(s, color, _(" (use \"git commit\" to conclude merge)")); } wt_status_print_trailer(s); } static void show_am_in_progress(struct wt_status *s, struct wt_status_state *state, const char *color) { status_printf_ln(s, color, _("You are in the middle of an am session.")); if (state->am_empty_patch) status_printf_ln(s, color, _("The current patch is empty.")); if (s->hints) { if (!state->am_empty_patch) status_printf_ln(s, color, _(" (fix conflicts and then run \"git am --continue\")")); status_printf_ln(s, color, _(" (use \"git am --skip\" to skip this patch)")); status_printf_ln(s, color, _(" (use \"git am --abort\" to restore the original branch)")); } wt_status_print_trailer(s); } static char *read_line_from_git_path(const char *filename) { struct strbuf buf = STRBUF_INIT; FILE *fp = fopen(git_path("%s", filename), "r"); if (!fp) { strbuf_release(&buf); return NULL; } strbuf_getline(&buf, fp, '\n'); if (!fclose(fp)) { return strbuf_detach(&buf, NULL); } else { strbuf_release(&buf); return NULL; } } static int split_commit_in_progress(struct wt_status *s) { int split_in_progress = 0; char *head = read_line_from_git_path("HEAD"); char *orig_head = read_line_from_git_path("ORIG_HEAD"); char *rebase_amend = read_line_from_git_path("rebase-merge/amend"); char *rebase_orig_head = read_line_from_git_path("rebase-merge/orig-head"); if (!head || !orig_head || !rebase_amend || !rebase_orig_head || !s->branch || strcmp(s->branch, "HEAD")) return split_in_progress; if (!strcmp(rebase_amend, rebase_orig_head)) { if (strcmp(head, rebase_amend)) split_in_progress = 1; } else if (strcmp(orig_head, rebase_orig_head)) { split_in_progress = 1; } if (!s->amend && !s->nowarn && !s->workdir_dirty) split_in_progress = 0; free(head); free(orig_head); free(rebase_amend); free(rebase_orig_head); return split_in_progress; } static void show_rebase_in_progress(struct wt_status *s, struct wt_status_state *state, const char *color) { struct stat st; if (has_unmerged(s)) { if (state->branch) status_printf_ln(s, color, _("You are currently rebasing branch '%s' on '%s'."), state->branch, state->onto); else status_printf_ln(s, color, _("You are currently rebasing.")); if (s->hints) { status_printf_ln(s, color, _(" (fix conflicts and then run \"git rebase --continue\")")); status_printf_ln(s, color, _(" (use \"git rebase --skip\" to skip this patch)")); status_printf_ln(s, color, _(" (use \"git rebase --abort\" to check out the original branch)")); } } else if (state->rebase_in_progress || !stat(git_path("MERGE_MSG"), &st)) { if (state->branch) status_printf_ln(s, color, _("You are currently rebasing branch '%s' on '%s'."), state->branch, state->onto); else status_printf_ln(s, color, _("You are currently rebasing.")); if (s->hints) status_printf_ln(s, color, _(" (all conflicts fixed: run \"git rebase --continue\")")); } else if (split_commit_in_progress(s)) { if (state->branch) status_printf_ln(s, color, _("You are currently splitting a commit while rebasing branch '%s' on '%s'."), state->branch, state->onto); else status_printf_ln(s, color, _("You are currently splitting a commit during a rebase.")); if (s->hints) status_printf_ln(s, color, _(" (Once your working directory is clean, run \"git rebase --continue\")")); } else { if (state->branch) status_printf_ln(s, color, _("You are currently editing a commit while rebasing branch '%s' on '%s'."), state->branch, state->onto); else status_printf_ln(s, color, _("You are currently editing a commit during a rebase.")); if (s->hints && !s->amend) { status_printf_ln(s, color, _(" (use \"git commit --amend\" to amend the current commit)")); status_printf_ln(s, color, _(" (use \"git rebase --continue\" once you are satisfied with your changes)")); } } wt_status_print_trailer(s); } static void show_cherry_pick_in_progress(struct wt_status *s, struct wt_status_state *state, const char *color) { status_printf_ln(s, color, _("You are currently cherry-picking commit %s."), find_unique_abbrev(state->cherry_pick_head_sha1, DEFAULT_ABBREV)); if (s->hints) { if (has_unmerged(s)) status_printf_ln(s, color, _(" (fix conflicts and run \"git cherry-pick --continue\")")); else status_printf_ln(s, color, _(" (all conflicts fixed: run \"git cherry-pick --continue\")")); status_printf_ln(s, color, _(" (use \"git cherry-pick --abort\" to cancel the cherry-pick operation)")); } wt_status_print_trailer(s); } static void show_revert_in_progress(struct wt_status *s, struct wt_status_state *state, const char *color) { status_printf_ln(s, color, _("You are currently reverting commit %s."), find_unique_abbrev(state->revert_head_sha1, DEFAULT_ABBREV)); if (s->hints) { if (has_unmerged(s)) status_printf_ln(s, color, _(" (fix conflicts and run \"git revert --continue\")")); else status_printf_ln(s, color, _(" (all conflicts fixed: run \"git revert --continue\")")); status_printf_ln(s, color, _(" (use \"git revert --abort\" to cancel the revert operation)")); } wt_status_print_trailer(s); } static void show_bisect_in_progress(struct wt_status *s, struct wt_status_state *state, const char *color) { if (state->branch) status_printf_ln(s, color, _("You are currently bisecting, started from branch '%s'."), state->branch); else status_printf_ln(s, color, _("You are currently bisecting.")); if (s->hints) status_printf_ln(s, color, _(" (use \"git bisect reset\" to get back to the original branch)")); wt_status_print_trailer(s); } /* * Extract branch information from rebase/bisect */ static char *read_and_strip_branch(const char *path) { struct strbuf sb = STRBUF_INIT; unsigned char sha1[20]; if (strbuf_read_file(&sb, git_path("%s", path), 0) <= 0) goto got_nothing; while (&sb.len && sb.buf[sb.len - 1] == '\n') strbuf_setlen(&sb, sb.len - 1); if (!sb.len) goto got_nothing; if (starts_with(sb.buf, "refs/heads/")) strbuf_remove(&sb,0, strlen("refs/heads/")); else if (starts_with(sb.buf, "refs/")) ; else if (!get_sha1_hex(sb.buf, sha1)) { const char *abbrev; abbrev = find_unique_abbrev(sha1, DEFAULT_ABBREV); strbuf_reset(&sb); strbuf_addstr(&sb, abbrev); } else if (!strcmp(sb.buf, "detached HEAD")) /* rebase */ goto got_nothing; else /* bisect */ ; return strbuf_detach(&sb, NULL); got_nothing: strbuf_release(&sb); return NULL; } struct grab_1st_switch_cbdata { struct strbuf buf; unsigned char nsha1[20]; }; static int grab_1st_switch(unsigned char *osha1, unsigned char *nsha1, const char *email, unsigned long timestamp, int tz, const char *message, void *cb_data) { struct grab_1st_switch_cbdata *cb = cb_data; const char *target = NULL, *end; if (!starts_with(message, "checkout: moving from ")) return 0; message += strlen("checkout: moving from "); target = strstr(message, " to "); if (!target) return 0; target += strlen(" to "); strbuf_reset(&cb->buf); hashcpy(cb->nsha1, nsha1); for (end = target; *end && *end != '\n'; end++) ; strbuf_add(&cb->buf, target, end - target); return 1; } static void wt_status_get_detached_from(struct wt_status_state *state) { struct grab_1st_switch_cbdata cb; struct commit *commit; unsigned char sha1[20]; char *ref = NULL; strbuf_init(&cb.buf, 0); if (for_each_reflog_ent_reverse("HEAD", grab_1st_switch, &cb) <= 0) { strbuf_release(&cb.buf); return; } if (dwim_ref(cb.buf.buf, cb.buf.len, sha1, &ref) == 1 && /* sha1 is a commit? match without further lookup */ (!hashcmp(cb.nsha1, sha1) || /* perhaps sha1 is a tag, try to dereference to a commit */ ((commit = lookup_commit_reference_gently(sha1, 1)) != NULL && !hashcmp(cb.nsha1, commit->object.sha1)))) { int ofs; if (starts_with(ref, "refs/tags/")) ofs = strlen("refs/tags/"); else if (starts_with(ref, "refs/remotes/")) ofs = strlen("refs/remotes/"); else ofs = 0; state->detached_from = xstrdup(ref + ofs); } else state->detached_from = xstrdup(find_unique_abbrev(cb.nsha1, DEFAULT_ABBREV)); hashcpy(state->detached_sha1, cb.nsha1); free(ref); strbuf_release(&cb.buf); } void wt_status_get_state(struct wt_status_state *state, int get_detached_from) { struct stat st; unsigned char sha1[20]; if (!stat(git_path("MERGE_HEAD"), &st)) { state->merge_in_progress = 1; } else if (!stat(git_path("rebase-apply"), &st)) { if (!stat(git_path("rebase-apply/applying"), &st)) { state->am_in_progress = 1; if (!stat(git_path("rebase-apply/patch"), &st) && !st.st_size) state->am_empty_patch = 1; } else { state->rebase_in_progress = 1; state->branch = read_and_strip_branch("rebase-apply/head-name"); state->onto = read_and_strip_branch("rebase-apply/onto"); } } else if (!stat(git_path("rebase-merge"), &st)) { if (!stat(git_path("rebase-merge/interactive"), &st)) state->rebase_interactive_in_progress = 1; else state->rebase_in_progress = 1; state->branch = read_and_strip_branch("rebase-merge/head-name"); state->onto = read_and_strip_branch("rebase-merge/onto"); } else if (!stat(git_path("CHERRY_PICK_HEAD"), &st) && !get_sha1("CHERRY_PICK_HEAD", sha1)) { state->cherry_pick_in_progress = 1; hashcpy(state->cherry_pick_head_sha1, sha1); } if (!stat(git_path("BISECT_LOG"), &st)) { state->bisect_in_progress = 1; state->branch = read_and_strip_branch("BISECT_START"); } if (!stat(git_path("REVERT_HEAD"), &st) && !get_sha1("REVERT_HEAD", sha1)) { state->revert_in_progress = 1; hashcpy(state->revert_head_sha1, sha1); } if (get_detached_from) wt_status_get_detached_from(state); } static void wt_status_print_state(struct wt_status *s, struct wt_status_state *state) { const char *state_color = color(WT_STATUS_HEADER, s); if (state->merge_in_progress) show_merge_in_progress(s, state, state_color); else if (state->am_in_progress) show_am_in_progress(s, state, state_color); else if (state->rebase_in_progress || state->rebase_interactive_in_progress) show_rebase_in_progress(s, state, state_color); else if (state->cherry_pick_in_progress) show_cherry_pick_in_progress(s, state, state_color); else if (state->revert_in_progress) show_revert_in_progress(s, state, state_color); if (state->bisect_in_progress) show_bisect_in_progress(s, state, state_color); } void wt_status_print(struct wt_status *s) { const char *branch_color = color(WT_STATUS_ONBRANCH, s); const char *branch_status_color = color(WT_STATUS_HEADER, s); struct wt_status_state state; memset(&state, 0, sizeof(state)); wt_status_get_state(&state, s->branch && !strcmp(s->branch, "HEAD")); if (s->branch) { const char *on_what = _("On branch "); const char *branch_name = s->branch; if (starts_with(branch_name, "refs/heads/")) branch_name += 11; else if (!strcmp(branch_name, "HEAD")) { branch_status_color = color(WT_STATUS_NOBRANCH, s); if (state.rebase_in_progress || state.rebase_interactive_in_progress) { on_what = _("rebase in progress; onto "); branch_name = state.onto; } else if (state.detached_from) { unsigned char sha1[20]; branch_name = state.detached_from; if (!get_sha1("HEAD", sha1) && !hashcmp(sha1, state.detached_sha1)) on_what = _("HEAD detached at "); else on_what = _("HEAD detached from "); } else { branch_name = ""; on_what = _("Not currently on any branch."); } } status_printf(s, color(WT_STATUS_HEADER, s), ""); status_printf_more(s, branch_status_color, "%s", on_what); status_printf_more(s, branch_color, "%s\n", branch_name); if (!s->is_initial) wt_status_print_tracking(s); } wt_status_print_state(s, &state); free(state.branch); free(state.onto); free(state.detached_from); if (s->is_initial) { status_printf_ln(s, color(WT_STATUS_HEADER, s), ""); status_printf_ln(s, color(WT_STATUS_HEADER, s), _("Initial commit")); status_printf_ln(s, color(WT_STATUS_HEADER, s), ""); } wt_status_print_updated(s); wt_status_print_unmerged(s); wt_status_print_changed(s); if (s->submodule_summary && (!s->ignore_submodule_arg || strcmp(s->ignore_submodule_arg, "all"))) { wt_status_print_submodule_summary(s, 0); /* staged */ wt_status_print_submodule_summary(s, 1); /* unstaged */ } if (s->show_untracked_files) { wt_status_print_other(s, &s->untracked, _("Untracked files"), "add"); if (s->show_ignored_files) wt_status_print_other(s, &s->ignored, _("Ignored files"), "add -f"); if (advice_status_u_option && 2000 < s->untracked_in_ms) { status_printf_ln(s, GIT_COLOR_NORMAL, ""); status_printf_ln(s, GIT_COLOR_NORMAL, _("It took %.2f seconds to enumerate untracked files. 'status -uno'\n" "may speed it up, but you have to be careful not to forget to add\n" "new files yourself (see 'git help status')."), s->untracked_in_ms / 1000.0); } } else if (s->commitable) status_printf_ln(s, GIT_COLOR_NORMAL, _("Untracked files not listed%s"), s->hints ? _(" (use -u option to show untracked files)") : ""); if (s->verbose) wt_status_print_verbose(s); if (!s->commitable) { if (s->amend) status_printf_ln(s, GIT_COLOR_NORMAL, _("No changes")); else if (s->nowarn) ; /* nothing */ else if (s->workdir_dirty) { if (s->hints) printf(_("no changes added to commit " "(use \"git add\" and/or \"git commit -a\")\n")); else printf(_("no changes added to commit\n")); } else if (s->untracked.nr) { if (s->hints) printf(_("nothing added to commit but untracked files " "present (use \"git add\" to track)\n")); else printf(_("nothing added to commit but untracked files present\n")); } else if (s->is_initial) { if (s->hints) printf(_("nothing to commit (create/copy files " "and use \"git add\" to track)\n")); else printf(_("nothing to commit\n")); } else if (!s->show_untracked_files) { if (s->hints) printf(_("nothing to commit (use -u to show untracked files)\n")); else printf(_("nothing to commit\n")); } else printf(_("nothing to commit, working directory clean\n")); } } static void wt_shortstatus_unmerged(struct string_list_item *it, struct wt_status *s) { struct wt_status_change_data *d = it->util; const char *how = "??"; switch (d->stagemask) { case 1: how = "DD"; break; /* both deleted */ case 2: how = "AU"; break; /* added by us */ case 3: how = "UD"; break; /* deleted by them */ case 4: how = "UA"; break; /* added by them */ case 5: how = "DU"; break; /* deleted by us */ case 6: how = "AA"; break; /* both added */ case 7: how = "UU"; break; /* both modified */ } color_fprintf(s->fp, color(WT_STATUS_UNMERGED, s), "%s", how); if (s->null_termination) { fprintf(stdout, " %s%c", it->string, 0); } else { struct strbuf onebuf = STRBUF_INIT; const char *one; one = quote_path(it->string, s->prefix, &onebuf); printf(" %s\n", one); strbuf_release(&onebuf); } } static void wt_shortstatus_status(struct string_list_item *it, struct wt_status *s) { struct wt_status_change_data *d = it->util; if (d->index_status) color_fprintf(s->fp, color(WT_STATUS_UPDATED, s), "%c", d->index_status); else putchar(' '); if (d->worktree_status) color_fprintf(s->fp, color(WT_STATUS_CHANGED, s), "%c", d->worktree_status); else putchar(' '); putchar(' '); if (s->null_termination) { fprintf(stdout, "%s%c", it->string, 0); if (d->head_path) fprintf(stdout, "%s%c", d->head_path, 0); } else { struct strbuf onebuf = STRBUF_INIT; const char *one; if (d->head_path) { one = quote_path(d->head_path, s->prefix, &onebuf); if (*one != '"' && strchr(one, ' ') != NULL) { putchar('"'); strbuf_addch(&onebuf, '"'); one = onebuf.buf; } printf("%s -> ", one); strbuf_release(&onebuf); } one = quote_path(it->string, s->prefix, &onebuf); if (*one != '"' && strchr(one, ' ') != NULL) { putchar('"'); strbuf_addch(&onebuf, '"'); one = onebuf.buf; } printf("%s\n", one); strbuf_release(&onebuf); } } static void wt_shortstatus_other(struct string_list_item *it, struct wt_status *s, const char *sign) { if (s->null_termination) { fprintf(stdout, "%s %s%c", sign, it->string, 0); } else { struct strbuf onebuf = STRBUF_INIT; const char *one; one = quote_path(it->string, s->prefix, &onebuf); color_fprintf(s->fp, color(WT_STATUS_UNTRACKED, s), "%s", sign); printf(" %s\n", one); strbuf_release(&onebuf); } } static void wt_shortstatus_print_tracking(struct wt_status *s) { struct branch *branch; const char *header_color = color(WT_STATUS_HEADER, s); const char *branch_color_local = color(WT_STATUS_LOCAL_BRANCH, s); const char *branch_color_remote = color(WT_STATUS_REMOTE_BRANCH, s); const char *base; const char *branch_name; int num_ours, num_theirs; int upstream_is_gone = 0; color_fprintf(s->fp, color(WT_STATUS_HEADER, s), "## "); if (!s->branch) return; branch_name = s->branch; if (starts_with(branch_name, "refs/heads/")) branch_name += 11; else if (!strcmp(branch_name, "HEAD")) { branch_name = _("HEAD (no branch)"); branch_color_local = color(WT_STATUS_NOBRANCH, s); } branch = branch_get(s->branch + 11); if (s->is_initial) color_fprintf(s->fp, header_color, _("Initial commit on ")); color_fprintf(s->fp, branch_color_local, "%s", branch_name); switch (stat_tracking_info(branch, &num_ours, &num_theirs)) { case 0: /* no base */ fputc(s->null_termination ? '\0' : '\n', s->fp); return; case -1: /* with "gone" base */ upstream_is_gone = 1; break; default: /* with base */ break; } base = branch->merge[0]->dst; base = shorten_unambiguous_ref(base, 0); color_fprintf(s->fp, header_color, "..."); color_fprintf(s->fp, branch_color_remote, "%s", base); if (!upstream_is_gone && !num_ours && !num_theirs) { fputc(s->null_termination ? '\0' : '\n', s->fp); return; } color_fprintf(s->fp, header_color, " ["); if (upstream_is_gone) { color_fprintf(s->fp, header_color, _("gone")); } else if (!num_ours) { color_fprintf(s->fp, header_color, _("behind ")); color_fprintf(s->fp, branch_color_remote, "%d", num_theirs); } else if (!num_theirs) { color_fprintf(s->fp, header_color, _("ahead ")); color_fprintf(s->fp, branch_color_local, "%d", num_ours); } else { color_fprintf(s->fp, header_color, _("ahead ")); color_fprintf(s->fp, branch_color_local, "%d", num_ours); color_fprintf(s->fp, header_color, _(", behind ")); color_fprintf(s->fp, branch_color_remote, "%d", num_theirs); } color_fprintf(s->fp, header_color, "]"); fputc(s->null_termination ? '\0' : '\n', s->fp); } void wt_shortstatus_print(struct wt_status *s) { int i; if (s->show_branch) wt_shortstatus_print_tracking(s); for (i = 0; i < s->change.nr; i++) { struct wt_status_change_data *d; struct string_list_item *it; it = &(s->change.items[i]); d = it->util; if (d->stagemask) wt_shortstatus_unmerged(it, s); else wt_shortstatus_status(it, s); } for (i = 0; i < s->untracked.nr; i++) { struct string_list_item *it; it = &(s->untracked.items[i]); wt_shortstatus_other(it, s, "??"); } for (i = 0; i < s->ignored.nr; i++) { struct string_list_item *it; it = &(s->ignored.items[i]); wt_shortstatus_other(it, s, "!!"); } } void wt_porcelain_print(struct wt_status *s) { s->use_color = 0; s->relative_paths = 0; s->prefix = NULL; wt_shortstatus_print(s); }
gpl-2.0
sudosurootdev/kernel_lge_ls980
drivers/cpufreq/cpufreq_interactive.c
3
34893
/* * drivers/cpufreq/cpufreq_interactive.c * * Copyright (C) 2010 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Mike Chan (mike@android.com) * */ #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/cpufreq.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/tick.h> #include <linux/time.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/kernel_stat.h> #include <asm/cputime.h> #define CREATE_TRACE_POINTS #include <trace/events/cpufreq_interactive.h> static int active_count; struct cpufreq_interactive_cpuinfo { struct timer_list cpu_timer; struct timer_list cpu_slack_timer; spinlock_t load_lock; /* protects the next 4 fields */ u64 time_in_idle; u64 time_in_idle_timestamp; u64 cputime_speedadj; u64 cputime_speedadj_timestamp; struct cpufreq_policy *policy; struct cpufreq_frequency_table *freq_table; unsigned int target_freq; unsigned int floor_freq; u64 floor_validate_time; u64 hispeed_validate_time; struct rw_semaphore enable_sem; int governor_enabled; int prev_load; }; static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo); /* realtime thread handles frequency scaling */ static struct task_struct *speedchange_task; static cpumask_t speedchange_cpumask; static spinlock_t speedchange_cpumask_lock; static struct mutex gov_lock; /* Hi speed to bump to from lo speed when load burst (default max) */ static unsigned int hispeed_freq; /* Go to hi speed when CPU load at or above this value. */ #define DEFAULT_GO_HISPEED_LOAD 99 static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD; /* Sampling down factor to be applied to min_sample_time at max freq */ static unsigned int sampling_down_factor; /* Target load. Lower values result in higher CPU speeds. */ #define DEFAULT_TARGET_LOAD 90 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD}; static spinlock_t target_loads_lock; static unsigned int *target_loads = default_target_loads; static int ntarget_loads = ARRAY_SIZE(default_target_loads); /* * The minimum amount of time to spend at a frequency before we can ramp down. */ #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC) static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME; /* * The sample rate of the timer used to increase frequency */ #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC) static unsigned long timer_rate = DEFAULT_TIMER_RATE; /* Busy SDF parameters*/ #define MIN_BUSY_TIME (100 * USEC_PER_MSEC) /* * Wait this long before raising speed above hispeed, by default a single * timer interval. */ #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE static unsigned int default_above_hispeed_delay[] = { DEFAULT_ABOVE_HISPEED_DELAY }; static spinlock_t above_hispeed_delay_lock; static unsigned int *above_hispeed_delay = default_above_hispeed_delay; static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay); /* Non-zero means indefinite speed boost active */ static int boost_val; /* Duration of a boot pulse in usecs */ static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME; /* End time of boost pulse in ktime converted to usecs */ static u64 boostpulse_endtime; /* * Max additional time to wait in idle, beyond timer_rate, at speeds above * minimum before wakeup to reduce speed, or -1 if unnecessary. */ #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE) static int timer_slack_val = DEFAULT_TIMER_SLACK; static bool io_is_busy; /* * If the max load among the other CPUs is higher than sync_freq_load_threshold * then do not let the frequency to drop below sync_freq */ static unsigned int sync_freq_load_threshold; static unsigned int sync_freq; static int cpufreq_governor_interactive(struct cpufreq_policy *policy, unsigned int event); #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE static #endif struct cpufreq_governor cpufreq_gov_interactive = { .name = "interactive", .governor = cpufreq_governor_interactive, .max_transition_latency = 10000000, .owner = THIS_MODULE, }; static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, cputime64_t *wall) { u64 idle_time; u64 cur_wall_time; u64 busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; idle_time = cur_wall_time - busy_time; if (wall) *wall = jiffies_to_usecs(cur_wall_time); return jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) { u64 idle_time = get_cpu_idle_time_us(cpu, wall); if (idle_time == -1ULL) idle_time = get_cpu_idle_time_jiffy(cpu, wall); else if (!io_is_busy) idle_time += get_cpu_iowait_time_us(cpu, wall); return idle_time; } static void cpufreq_interactive_timer_resched( struct cpufreq_interactive_cpuinfo *pcpu) { unsigned long expires; unsigned long flags; spin_lock_irqsave(&pcpu->load_lock, flags); pcpu->time_in_idle = get_cpu_idle_time(smp_processor_id(), &pcpu->time_in_idle_timestamp); pcpu->cputime_speedadj = 0; pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; expires = jiffies + usecs_to_jiffies(timer_rate); mod_timer_pinned(&pcpu->cpu_timer, expires); if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { expires += usecs_to_jiffies(timer_slack_val); mod_timer_pinned(&pcpu->cpu_slack_timer, expires); } spin_unlock_irqrestore(&pcpu->load_lock, flags); } /* The caller shall take enable_sem write semaphore to avoid any timer race. * The cpu_timer and cpu_slack_timer must be deactivated when calling this * function. */ static void cpufreq_interactive_timer_start(int cpu) { struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); unsigned long expires = jiffies + usecs_to_jiffies(timer_rate); unsigned long flags; pcpu->cpu_timer.expires = expires; add_timer_on(&pcpu->cpu_timer, cpu); if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { expires += usecs_to_jiffies(timer_slack_val); pcpu->cpu_slack_timer.expires = expires; add_timer_on(&pcpu->cpu_slack_timer, cpu); } spin_lock_irqsave(&pcpu->load_lock, flags); pcpu->time_in_idle = get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp); pcpu->cputime_speedadj = 0; pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; spin_unlock_irqrestore(&pcpu->load_lock, flags); } static unsigned int freq_to_above_hispeed_delay(unsigned int freq) { int i; unsigned int ret; unsigned long flags; spin_lock_irqsave(&above_hispeed_delay_lock, flags); for (i = 0; i < nabove_hispeed_delay - 1 && freq >= above_hispeed_delay[i+1]; i += 2) ; ret = above_hispeed_delay[i]; ret = (ret > (1 * USEC_PER_MSEC)) ? (ret - (1 * USEC_PER_MSEC)) : ret; spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); return ret; } static unsigned int freq_to_targetload(unsigned int freq) { int i; unsigned int ret; unsigned long flags; spin_lock_irqsave(&target_loads_lock, flags); for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2) ; ret = target_loads[i]; spin_unlock_irqrestore(&target_loads_lock, flags); return ret; } /* * If increasing frequencies never map to a lower target load then * choose_freq() will find the minimum frequency that does not exceed its * target load given the current load. */ static unsigned int choose_freq( struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq) { unsigned int freq = pcpu->policy->cur; unsigned int prevfreq, freqmin, freqmax; unsigned int tl; int index; freqmin = 0; freqmax = UINT_MAX; do { prevfreq = freq; tl = freq_to_targetload(freq); /* * Find the lowest frequency where the computed load is less * than or equal to the target load. */ if (cpufreq_frequency_table_target( pcpu->policy, pcpu->freq_table, loadadjfreq / tl, CPUFREQ_RELATION_L, &index)) break; freq = pcpu->freq_table[index].frequency; if (freq > prevfreq) { /* The previous frequency is too low. */ freqmin = prevfreq; if (freq >= freqmax) { /* * Find the highest frequency that is less * than freqmax. */ if (cpufreq_frequency_table_target( pcpu->policy, pcpu->freq_table, freqmax - 1, CPUFREQ_RELATION_H, &index)) break; freq = pcpu->freq_table[index].frequency; if (freq == freqmin) { /* * The first frequency below freqmax * has already been found to be too * low. freqmax is the lowest speed * we found that is fast enough. */ freq = freqmax; break; } } } else if (freq < prevfreq) { /* The previous frequency is high enough. */ freqmax = prevfreq; if (freq <= freqmin) { /* * Find the lowest frequency that is higher * than freqmin. */ if (cpufreq_frequency_table_target( pcpu->policy, pcpu->freq_table, freqmin + 1, CPUFREQ_RELATION_L, &index)) break; freq = pcpu->freq_table[index].frequency; /* * If freqmax is the first frequency above * freqmin then we have already found that * this speed is fast enough. */ if (freq == freqmax) break; } } /* If same frequency chosen as previous then done. */ } while (freq != prevfreq); return freq; } static u64 update_load(int cpu) { struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); u64 now; u64 now_idle; unsigned int delta_idle; unsigned int delta_time; u64 active_time; now_idle = get_cpu_idle_time(cpu, &now); delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle); delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp); if (delta_time <= delta_idle) active_time = 0; else active_time = delta_time - delta_idle; pcpu->cputime_speedadj += active_time * pcpu->policy->cur; pcpu->time_in_idle = now_idle; pcpu->time_in_idle_timestamp = now; return now; } static void cpufreq_interactive_timer(unsigned long data) { u64 now; unsigned int delta_time; u64 cputime_speedadj; int cpu_load; struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, data); unsigned int new_freq; unsigned int loadadjfreq; unsigned int index; unsigned long flags; bool boosted; unsigned long mod_min_sample_time; int i, max_load_other_cpu; unsigned int max_freq_other_cpu; if (!down_read_trylock(&pcpu->enable_sem)) return; if (!pcpu->governor_enabled) goto exit; spin_lock_irqsave(&pcpu->load_lock, flags); now = update_load(data); delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp); cputime_speedadj = pcpu->cputime_speedadj; spin_unlock_irqrestore(&pcpu->load_lock, flags); if (WARN_ON_ONCE(!delta_time)) goto rearm; do_div(cputime_speedadj, delta_time); loadadjfreq = (unsigned int)cputime_speedadj * 100; cpu_load = loadadjfreq / pcpu->target_freq; pcpu->prev_load = cpu_load; boosted = boost_val || now < boostpulse_endtime; max_load_other_cpu = 0; max_freq_other_cpu = 0; for_each_online_cpu(i) { struct cpufreq_interactive_cpuinfo *picpu = &per_cpu(cpuinfo, i); if (i == data) continue; if (max_load_other_cpu < picpu->prev_load) max_load_other_cpu = picpu->prev_load; if (picpu->policy->cur > max_freq_other_cpu) max_freq_other_cpu = picpu->policy->cur; } if (cpu_load >= go_hispeed_load || boosted) { if (pcpu->target_freq < hispeed_freq) { new_freq = hispeed_freq; } else { new_freq = choose_freq(pcpu, loadadjfreq); if (new_freq < hispeed_freq) new_freq = hispeed_freq; } } else { new_freq = choose_freq(pcpu, loadadjfreq); if (sync_freq && (max_freq_other_cpu > sync_freq) && (max_load_other_cpu > sync_freq_load_threshold) && (new_freq < sync_freq)) new_freq = sync_freq; } if (pcpu->target_freq >= hispeed_freq && new_freq > pcpu->target_freq && now - pcpu->hispeed_validate_time < freq_to_above_hispeed_delay(pcpu->target_freq)) { trace_cpufreq_interactive_notyet( data, cpu_load, pcpu->target_freq, pcpu->policy->cur, new_freq); goto rearm; } pcpu->hispeed_validate_time = now; if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table, new_freq, CPUFREQ_RELATION_L, &index)) goto rearm; new_freq = pcpu->freq_table[index].frequency; /* * Do not scale below floor_freq unless we have been at or above the * floor frequency for the minimum sample time since last validated. */ if (pcpu->policy->cur == pcpu->policy->max) { mod_min_sample_time = sampling_down_factor; } else { mod_min_sample_time = min_sample_time; } if (new_freq < pcpu->floor_freq) { if (now - pcpu->floor_validate_time < mod_min_sample_time) { trace_cpufreq_interactive_notyet( data, cpu_load, pcpu->target_freq, pcpu->policy->cur, new_freq); goto rearm; } } /* * Update the timestamp for checking whether speed has been held at * or above the selected frequency for a minimum of min_sample_time, * if not boosted to hispeed_freq. If boosted to hispeed_freq then we * allow the speed to drop as soon as the boostpulse duration expires * (or the indefinite boost is turned off). */ if (!boosted || new_freq > hispeed_freq) { pcpu->floor_freq = new_freq; pcpu->floor_validate_time = now; } if (pcpu->target_freq == new_freq) { trace_cpufreq_interactive_already( data, cpu_load, pcpu->target_freq, pcpu->policy->cur, new_freq); goto rearm_if_notmax; } trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq, pcpu->policy->cur, new_freq); pcpu->target_freq = new_freq; spin_lock_irqsave(&speedchange_cpumask_lock, flags); cpumask_set_cpu(data, &speedchange_cpumask); spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); wake_up_process(speedchange_task); rearm_if_notmax: /* * Already set max speed and don't see a need to change that, * wait until next idle to re-evaluate, don't need timer. */ if (pcpu->target_freq == pcpu->policy->max) goto exit; rearm: if (!timer_pending(&pcpu->cpu_timer)) cpufreq_interactive_timer_resched(pcpu); exit: up_read(&pcpu->enable_sem); return; } static void cpufreq_interactive_idle_start(void) { struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, smp_processor_id()); int pending; u64 now; if (!down_read_trylock(&pcpu->enable_sem)) return; if (!pcpu->governor_enabled) { up_read(&pcpu->enable_sem); return; } pending = timer_pending(&pcpu->cpu_timer); if (pcpu->target_freq != pcpu->policy->min) { /* * Entering idle while not at lowest speed. On some * platforms this can hold the other CPU(s) at that speed * even though the CPU is idle. Set a timer to re-evaluate * speed so this idle CPU doesn't hold the other CPUs above * min indefinitely. This should probably be a quirk of * the CPUFreq driver. */ if (!pending) { cpufreq_interactive_timer_resched(pcpu); now = ktime_to_us(ktime_get()); if ((pcpu->policy->cur == pcpu->policy->max) && (now - pcpu->hispeed_validate_time) > MIN_BUSY_TIME) { pcpu->floor_validate_time = now; } } } up_read(&pcpu->enable_sem); } static void cpufreq_interactive_idle_end(void) { struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, smp_processor_id()); if (!down_read_trylock(&pcpu->enable_sem)) return; if (!pcpu->governor_enabled) { up_read(&pcpu->enable_sem); return; } /* Arm the timer for 1-2 ticks later if not already. */ if (!timer_pending(&pcpu->cpu_timer)) { cpufreq_interactive_timer_resched(pcpu); } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) { del_timer(&pcpu->cpu_timer); del_timer(&pcpu->cpu_slack_timer); cpufreq_interactive_timer(smp_processor_id()); } up_read(&pcpu->enable_sem); } static int cpufreq_interactive_speedchange_task(void *data) { unsigned int cpu; cpumask_t tmp_mask; unsigned long flags; struct cpufreq_interactive_cpuinfo *pcpu; while (1) { set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&speedchange_cpumask_lock, flags); if (cpumask_empty(&speedchange_cpumask)) { spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); schedule(); if (kthread_should_stop()) break; spin_lock_irqsave(&speedchange_cpumask_lock, flags); } set_current_state(TASK_RUNNING); tmp_mask = speedchange_cpumask; cpumask_clear(&speedchange_cpumask); spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); for_each_cpu(cpu, &tmp_mask) { unsigned int j; unsigned int max_freq = 0; pcpu = &per_cpu(cpuinfo, cpu); if (!down_read_trylock(&pcpu->enable_sem)) continue; if (!pcpu->governor_enabled) { up_read(&pcpu->enable_sem); continue; } for_each_cpu(j, pcpu->policy->cpus) { struct cpufreq_interactive_cpuinfo *pjcpu = &per_cpu(cpuinfo, j); if (pjcpu->target_freq > max_freq) max_freq = pjcpu->target_freq; } if (max_freq != pcpu->policy->cur) __cpufreq_driver_target(pcpu->policy, max_freq, CPUFREQ_RELATION_H); trace_cpufreq_interactive_setspeed(cpu, pcpu->target_freq, pcpu->policy->cur); up_read(&pcpu->enable_sem); } } return 0; } static void cpufreq_interactive_boost(void) { int i; int anyboost = 0; unsigned long flags; struct cpufreq_interactive_cpuinfo *pcpu; spin_lock_irqsave(&speedchange_cpumask_lock, flags); for_each_online_cpu(i) { pcpu = &per_cpu(cpuinfo, i); if (pcpu->target_freq < hispeed_freq) { pcpu->target_freq = hispeed_freq; cpumask_set_cpu(i, &speedchange_cpumask); pcpu->hispeed_validate_time = ktime_to_us(ktime_get()); anyboost = 1; } /* * Set floor freq and (re)start timer for when last * validated. */ pcpu->floor_freq = hispeed_freq; pcpu->floor_validate_time = ktime_to_us(ktime_get()); } spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); if (anyboost) wake_up_process(speedchange_task); } static int cpufreq_interactive_notifier( struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct cpufreq_interactive_cpuinfo *pcpu; int cpu; unsigned long flags; if (val == CPUFREQ_POSTCHANGE) { pcpu = &per_cpu(cpuinfo, freq->cpu); if (!down_read_trylock(&pcpu->enable_sem)) return 0; if (!pcpu->governor_enabled) { up_read(&pcpu->enable_sem); return 0; } for_each_cpu(cpu, pcpu->policy->cpus) { struct cpufreq_interactive_cpuinfo *pjcpu = &per_cpu(cpuinfo, cpu); if (cpu != freq->cpu) { if (!down_read_trylock(&pjcpu->enable_sem)) continue; if (!pjcpu->governor_enabled) { up_read(&pjcpu->enable_sem); continue; } } spin_lock_irqsave(&pjcpu->load_lock, flags); update_load(cpu); spin_unlock_irqrestore(&pjcpu->load_lock, flags); if (cpu != freq->cpu) up_read(&pjcpu->enable_sem); } up_read(&pcpu->enable_sem); } return 0; } static struct notifier_block cpufreq_notifier_block = { .notifier_call = cpufreq_interactive_notifier, }; static unsigned int *get_tokenized_data(const char *buf, int *num_tokens) { const char *cp; int i; int ntokens = 1; unsigned int *tokenized_data; int err = -EINVAL; cp = buf; while ((cp = strpbrk(cp + 1, " :"))) ntokens++; if (!(ntokens & 0x1)) goto err; tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL); if (!tokenized_data) { err = -ENOMEM; goto err; } cp = buf; i = 0; while (i < ntokens) { if (sscanf(cp, "%u", &tokenized_data[i++]) != 1) goto err_kfree; cp = strpbrk(cp, " :"); if (!cp) break; cp++; } if (i != ntokens) goto err_kfree; *num_tokens = ntokens; return tokenized_data; err_kfree: kfree(tokenized_data); err: return ERR_PTR(err); } static ssize_t show_target_loads( struct kobject *kobj, struct attribute *attr, char *buf) { int i; ssize_t ret = 0; unsigned long flags; spin_lock_irqsave(&target_loads_lock, flags); for (i = 0; i < ntarget_loads; i++) ret += sprintf(buf + ret, "%u%s", target_loads[i], i & 0x1 ? ":" : " "); sprintf(buf + ret - 1, "\n"); spin_unlock_irqrestore(&target_loads_lock, flags); return ret; } static ssize_t store_target_loads( struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ntokens; unsigned int *new_target_loads = NULL; unsigned long flags; new_target_loads = get_tokenized_data(buf, &ntokens); if (IS_ERR(new_target_loads)) return PTR_RET(new_target_loads); spin_lock_irqsave(&target_loads_lock, flags); if (target_loads != default_target_loads) kfree(target_loads); target_loads = new_target_loads; ntarget_loads = ntokens; spin_unlock_irqrestore(&target_loads_lock, flags); return count; } static struct global_attr target_loads_attr = __ATTR(target_loads, S_IRUGO | S_IWUSR, show_target_loads, store_target_loads); static ssize_t show_above_hispeed_delay( struct kobject *kobj, struct attribute *attr, char *buf) { int i; ssize_t ret = 0; unsigned long flags; spin_lock_irqsave(&above_hispeed_delay_lock, flags); for (i = 0; i < nabove_hispeed_delay; i++) ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i], i & 0x1 ? ":" : " "); sprintf(buf + ret - 1, "\n"); spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); return ret; } static ssize_t store_above_hispeed_delay( struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ntokens; unsigned int *new_above_hispeed_delay = NULL; unsigned long flags; new_above_hispeed_delay = get_tokenized_data(buf, &ntokens); if (IS_ERR(new_above_hispeed_delay)) return PTR_RET(new_above_hispeed_delay); spin_lock_irqsave(&above_hispeed_delay_lock, flags); if (above_hispeed_delay != default_above_hispeed_delay) kfree(above_hispeed_delay); above_hispeed_delay = new_above_hispeed_delay; nabove_hispeed_delay = ntokens; spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); return count; } static struct global_attr above_hispeed_delay_attr = __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR, show_above_hispeed_delay, store_above_hispeed_delay); static ssize_t show_hispeed_freq(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%u\n", hispeed_freq); } static ssize_t store_hispeed_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; long unsigned int val; ret = strict_strtoul(buf, 0, &val); if (ret < 0) return ret; hispeed_freq = val; return count; } static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644, show_hispeed_freq, store_hispeed_freq); static ssize_t show_sampling_down_factor(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%u\n", sampling_down_factor); } static ssize_t store_sampling_down_factor(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; long unsigned int val; ret = strict_strtoul(buf, 0, &val); if (ret < 0) return ret; sampling_down_factor = val; return count; } static struct global_attr sampling_down_factor_attr = __ATTR(sampling_down_factor, 0644, show_sampling_down_factor, store_sampling_down_factor); static ssize_t show_go_hispeed_load(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%lu\n", go_hispeed_load); } static ssize_t store_go_hispeed_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; unsigned long val; ret = strict_strtoul(buf, 0, &val); if (ret < 0) return ret; go_hispeed_load = val; return count; } static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644, show_go_hispeed_load, store_go_hispeed_load); static ssize_t show_min_sample_time(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%lu\n", min_sample_time); } static ssize_t store_min_sample_time(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; unsigned long val; ret = strict_strtoul(buf, 0, &val); if (ret < 0) return ret; min_sample_time = val; return count; } static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644, show_min_sample_time, store_min_sample_time); static ssize_t show_timer_rate(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%lu\n", timer_rate); } static ssize_t store_timer_rate(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; unsigned long val; ret = strict_strtoul(buf, 0, &val); if (ret < 0) return ret; timer_rate = val; return count; } static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644, show_timer_rate, store_timer_rate); static ssize_t show_timer_slack( struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%d\n", timer_slack_val); } static ssize_t store_timer_slack( struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtol(buf, 10, &val); if (ret < 0) return ret; timer_slack_val = val; return count; } define_one_global_rw(timer_slack); static ssize_t show_boost(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%d\n", boost_val); } static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; boost_val = val; if (boost_val) { trace_cpufreq_interactive_boost("on"); cpufreq_interactive_boost(); } else { trace_cpufreq_interactive_unboost("off"); } return count; } define_one_global_rw(boost); static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val; trace_cpufreq_interactive_boost("pulse"); cpufreq_interactive_boost(); return count; } static struct global_attr boostpulse = __ATTR(boostpulse, 0200, NULL, store_boostpulse); static ssize_t show_boostpulse_duration( struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%d\n", boostpulse_duration_val); } static ssize_t store_boostpulse_duration( struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; boostpulse_duration_val = val; return count; } define_one_global_rw(boostpulse_duration); static ssize_t show_io_is_busy(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%u\n", io_is_busy); } static ssize_t store_io_is_busy(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; io_is_busy = val; return count; } static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644, show_io_is_busy, store_io_is_busy); static ssize_t show_sync_freq(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%u\n", sync_freq); } static ssize_t store_sync_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; sync_freq = val; return count; } static struct global_attr sync_freq_attr = __ATTR(sync_freq, 0644, show_sync_freq, store_sync_freq); static ssize_t show_sync_freq_load_threshold(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%u\n", sync_freq_load_threshold); } static ssize_t store_sync_freq_load_threshold(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; sync_freq_load_threshold = val; return count; } static struct global_attr sync_freq_load_threshold_attr = __ATTR(sync_freq_load_threshold, 0644, show_sync_freq_load_threshold, store_sync_freq_load_threshold); static struct attribute *interactive_attributes[] = { &target_loads_attr.attr, &above_hispeed_delay_attr.attr, &hispeed_freq_attr.attr, &go_hispeed_load_attr.attr, &min_sample_time_attr.attr, &timer_rate_attr.attr, &timer_slack.attr, &boost.attr, &boostpulse.attr, &boostpulse_duration.attr, &io_is_busy_attr.attr, &sampling_down_factor_attr.attr, &sync_freq_attr.attr, &sync_freq_load_threshold_attr.attr, NULL, }; static struct attribute_group interactive_attr_group = { .attrs = interactive_attributes, .name = "interactive", }; static int cpufreq_interactive_idle_notifier(struct notifier_block *nb, unsigned long val, void *data) { switch (val) { case IDLE_START: cpufreq_interactive_idle_start(); break; case IDLE_END: cpufreq_interactive_idle_end(); break; } return 0; } static struct notifier_block cpufreq_interactive_idle_nb = { .notifier_call = cpufreq_interactive_idle_notifier, }; static int cpufreq_governor_interactive(struct cpufreq_policy *policy, unsigned int event) { int rc; unsigned int j; struct cpufreq_interactive_cpuinfo *pcpu; struct cpufreq_frequency_table *freq_table; switch (event) { case CPUFREQ_GOV_START: if (!cpu_online(policy->cpu)) return -EINVAL; mutex_lock(&gov_lock); freq_table = cpufreq_frequency_get_table(policy->cpu); if (!hispeed_freq) hispeed_freq = policy->max; for_each_cpu(j, policy->cpus) { pcpu = &per_cpu(cpuinfo, j); pcpu->policy = policy; pcpu->target_freq = policy->cur; pcpu->freq_table = freq_table; pcpu->floor_freq = pcpu->target_freq; pcpu->floor_validate_time = ktime_to_us(ktime_get()); pcpu->hispeed_validate_time = pcpu->floor_validate_time; down_write(&pcpu->enable_sem); cpufreq_interactive_timer_start(j); pcpu->governor_enabled = 1; up_write(&pcpu->enable_sem); } /* * Do not register the idle hook and create sysfs * entries if we have already done so. */ if (++active_count > 1) { mutex_unlock(&gov_lock); return 0; } rc = sysfs_create_group(cpufreq_global_kobject, &interactive_attr_group); if (rc) { mutex_unlock(&gov_lock); return rc; } idle_notifier_register(&cpufreq_interactive_idle_nb); cpufreq_register_notifier( &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); mutex_unlock(&gov_lock); break; case CPUFREQ_GOV_STOP: mutex_lock(&gov_lock); for_each_cpu(j, policy->cpus) { pcpu = &per_cpu(cpuinfo, j); down_write(&pcpu->enable_sem); pcpu->governor_enabled = 0; del_timer_sync(&pcpu->cpu_timer); del_timer_sync(&pcpu->cpu_slack_timer); up_write(&pcpu->enable_sem); } if (--active_count > 0) { mutex_unlock(&gov_lock); return 0; } cpufreq_unregister_notifier( &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); idle_notifier_unregister(&cpufreq_interactive_idle_nb); sysfs_remove_group(cpufreq_global_kobject, &interactive_attr_group); mutex_unlock(&gov_lock); break; case CPUFREQ_GOV_LIMITS: if (policy->max < policy->cur) __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); else if (policy->min > policy->cur) __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); for_each_cpu(j, policy->cpus) { pcpu = &per_cpu(cpuinfo, j); /* hold write semaphore to avoid race */ down_write(&pcpu->enable_sem); if (pcpu->governor_enabled == 0) { up_write(&pcpu->enable_sem); continue; } /* update target_freq firstly */ if (policy->max < pcpu->target_freq) pcpu->target_freq = policy->max; else if (policy->min > pcpu->target_freq) pcpu->target_freq = policy->min; /* Reschedule timer. * Delete the timers, else the timer callback may * return without re-arm the timer when failed * acquire the semaphore. This race may cause timer * stopped unexpectedly. */ del_timer_sync(&pcpu->cpu_timer); del_timer_sync(&pcpu->cpu_slack_timer); cpufreq_interactive_timer_start(j); up_write(&pcpu->enable_sem); } break; } return 0; } static void cpufreq_interactive_nop_timer(unsigned long data) { } static int __init cpufreq_interactive_init(void) { unsigned int i; struct cpufreq_interactive_cpuinfo *pcpu; struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; /* Initalize per-cpu timers */ for_each_possible_cpu(i) { pcpu = &per_cpu(cpuinfo, i); init_timer_deferrable(&pcpu->cpu_timer); pcpu->cpu_timer.function = cpufreq_interactive_timer; pcpu->cpu_timer.data = i; init_timer(&pcpu->cpu_slack_timer); pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer; spin_lock_init(&pcpu->load_lock); init_rwsem(&pcpu->enable_sem); } spin_lock_init(&target_loads_lock); spin_lock_init(&speedchange_cpumask_lock); spin_lock_init(&above_hispeed_delay_lock); mutex_init(&gov_lock); speedchange_task = kthread_create(cpufreq_interactive_speedchange_task, NULL, "cfinteractive"); if (IS_ERR(speedchange_task)) return PTR_ERR(speedchange_task); sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param); get_task_struct(speedchange_task); /* NB: wake up so the thread does not look hung to the freezer */ wake_up_process(speedchange_task); return cpufreq_register_governor(&cpufreq_gov_interactive); } #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE fs_initcall(cpufreq_interactive_init); #else module_init(cpufreq_interactive_init); #endif static void __exit cpufreq_interactive_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_interactive); kthread_stop(speedchange_task); put_task_struct(speedchange_task); } module_exit(cpufreq_interactive_exit); MODULE_AUTHOR("Mike Chan <mike@android.com>"); MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for " "Latency sensitive workloads"); MODULE_LICENSE("GPL");
gpl-2.0
gunine/htc-rider-ics-kernel
block/cfq-iosched.c
3
110293
/* * CFQ, or complete fairness queueing, disk scheduler. * * Based on ideas from a previously unfinished io * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. * * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/jiffies.h> #include <linux/rbtree.h> #include <linux/ioprio.h> #include <linux/blktrace_api.h> #include "cfq.h" /* * tunables */ /* max queue in one round of service */ static const int cfq_quantum = 8; static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; /* maximum backwards seek, in KiB */ static const int cfq_back_max = 16 * 1024; /* penalty of a backwards seek */ static const int cfq_back_penalty = 2; static const int cfq_slice_sync = HZ / 10; static int cfq_slice_async = HZ / 25; static const int cfq_slice_async_rq = 2; static int cfq_slice_idle = HZ / 125; static int cfq_group_idle = HZ / 125; static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ static const int cfq_hist_divisor = 4; /* * offset from end of service tree */ #define CFQ_IDLE_DELAY (HZ / 5) /* * below this threshold, we consider thinktime immediate */ #define CFQ_MIN_TT (2) #define CFQ_SLICE_SCALE (5) #define CFQ_HW_QUEUE_MIN (5) #define CFQ_SERVICE_SHIFT 12 #define CFQQ_SEEK_THR (sector_t)(8 * 100) #define CFQQ_CLOSE_THR (sector_t)(8 * 1024) #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) #define RQ_CIC(rq) \ ((struct cfq_io_context *) (rq)->elevator_private[0]) #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private[1]) #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private[2]) static struct kmem_cache *cfq_pool; static struct kmem_cache *cfq_ioc_pool; static DEFINE_PER_CPU(unsigned long, cfq_ioc_count); static struct completion *ioc_gone; static DEFINE_SPINLOCK(ioc_gone_lock); static DEFINE_SPINLOCK(cic_index_lock); static DEFINE_IDA(cic_index_ida); #define CFQ_PRIO_LISTS IOPRIO_BE_NR #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) #define sample_valid(samples) ((samples) > 80) #define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node) /* * Most of our rbtree usage is for sorting with min extraction, so * if we cache the leftmost node we don't have to walk down the tree * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should * move this into the elevator for the rq sorting as well. */ struct cfq_rb_root { struct rb_root rb; struct rb_node *left; unsigned count; unsigned total_weight; u64 min_vdisktime; }; #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \ .count = 0, .min_vdisktime = 0, } /* * Per process-grouping structure */ struct cfq_queue { /* reference count */ int ref; /* various state flags, see below */ unsigned int flags; /* parent cfq_data */ struct cfq_data *cfqd; /* service_tree member */ struct rb_node rb_node; /* service_tree key */ unsigned long rb_key; /* prio tree member */ struct rb_node p_node; /* prio tree root we belong to, if any */ struct rb_root *p_root; /* sorted list of pending requests */ struct rb_root sort_list; /* if fifo isn't expired, next request to serve */ struct request *next_rq; /* requests queued in sort_list */ int queued[2]; /* currently allocated requests */ int allocated[2]; /* fifo list of requests in sort_list */ struct list_head fifo; /* time when queue got scheduled in to dispatch first request. */ unsigned long dispatch_start; unsigned int allocated_slice; unsigned int slice_dispatch; /* time when first request from queue completed and slice started. */ unsigned long slice_start; unsigned long slice_end; long slice_resid; /* pending metadata requests */ int meta_pending; /* number of requests that are on the dispatch list or inside driver */ int dispatched; /* io prio of this group */ unsigned short ioprio, org_ioprio; unsigned short ioprio_class, org_ioprio_class; pid_t pid; u32 seek_history; sector_t last_request_pos; struct cfq_rb_root *service_tree; struct cfq_queue *new_cfqq; struct cfq_group *cfqg; /* Number of sectors dispatched from queue in single dispatch round */ unsigned long nr_sectors; }; /* * First index in the service_trees. * IDLE is handled separately, so it has negative index */ enum wl_prio_t { BE_WORKLOAD = 0, RT_WORKLOAD = 1, IDLE_WORKLOAD = 2, CFQ_PRIO_NR, }; /* * Second index in the service_trees. */ enum wl_type_t { ASYNC_WORKLOAD = 0, SYNC_NOIDLE_WORKLOAD = 1, SYNC_WORKLOAD = 2 }; /* This is per cgroup per device grouping structure */ struct cfq_group { /* group service_tree member */ struct rb_node rb_node; /* group service_tree key */ u64 vdisktime; unsigned int weight; unsigned int new_weight; bool needs_update; /* number of cfqq currently on this group */ int nr_cfqq; /* * Per group busy queues average. Useful for workload slice calc. We * create the array for each prio class but at run time it is used * only for RT and BE class and slot for IDLE class remains unused. * This is primarily done to avoid confusion and a gcc warning. */ unsigned int busy_queues_avg[CFQ_PRIO_NR]; /* * rr lists of queues with requests. We maintain service trees for * RT and BE classes. These trees are subdivided in subclasses * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE * class there is no subclassification and all the cfq queues go on * a single tree service_tree_idle. * Counts are embedded in the cfq_rb_root */ struct cfq_rb_root service_trees[2][3]; struct cfq_rb_root service_tree_idle; unsigned long saved_workload_slice; enum wl_type_t saved_workload; enum wl_prio_t saved_serving_prio; struct blkio_group blkg; #ifdef CONFIG_CFQ_GROUP_IOSCHED struct hlist_node cfqd_node; int ref; #endif /* number of requests that are on the dispatch list or inside driver */ int dispatched; }; /* * Per block device queue structure */ struct cfq_data { struct request_queue *queue; /* Root service tree for cfq_groups */ struct cfq_rb_root grp_service_tree; struct cfq_group root_group; /* * The priority currently being served */ enum wl_prio_t serving_prio; enum wl_type_t serving_type; unsigned long workload_expires; struct cfq_group *serving_group; /* * Each priority tree is sorted by next_request position. These * trees are used when determining if two or more queues are * interleaving requests (see cfq_close_cooperator). */ struct rb_root prio_trees[CFQ_PRIO_LISTS]; unsigned int busy_queues; unsigned int busy_sync_queues; int rq_in_driver; int rq_in_flight[2]; /* * queue-depth detection */ int rq_queued; int hw_tag; /* * hw_tag can be * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection) * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth) * 0 => no NCQ */ int hw_tag_est_depth; unsigned int hw_tag_samples; /* * idle window management */ struct timer_list idle_slice_timer; struct work_struct unplug_work; struct cfq_queue *active_queue; struct cfq_io_context *active_cic; /* * async queue for each priority case */ struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; struct cfq_queue *async_idle_cfqq; sector_t last_position; /* * tunables, see top of file */ unsigned int cfq_quantum; unsigned int cfq_fifo_expire[2]; unsigned int cfq_back_penalty; unsigned int cfq_back_max; unsigned int cfq_slice[2]; unsigned int cfq_slice_async_rq; unsigned int cfq_slice_idle; unsigned int cfq_group_idle; unsigned int cfq_latency; unsigned int cic_index; struct list_head cic_list; /* * Fallback dummy cfqq for extreme OOM conditions */ struct cfq_queue oom_cfqq; unsigned long last_delayed_sync; /* List of cfq groups being managed on this device*/ struct hlist_head cfqg_list; /* Number of groups which are on blkcg->blkg_list */ unsigned int nr_blkcg_linked_grps; }; static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, enum wl_prio_t prio, enum wl_type_t type) { if (!cfqg) return NULL; if (prio == IDLE_WORKLOAD) return &cfqg->service_tree_idle; return &cfqg->service_trees[prio][type]; } enum cfqq_state_flags { CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ CFQ_CFQQ_FLAG_sync, /* synchronous queue */ CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */ CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ }; #define CFQ_CFQQ_FNS(name) \ static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ { \ (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ } \ static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ { \ (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ } \ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ { \ return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ } CFQ_CFQQ_FNS(on_rr); CFQ_CFQQ_FNS(wait_request); CFQ_CFQQ_FNS(must_dispatch); CFQ_CFQQ_FNS(must_alloc_slice); CFQ_CFQQ_FNS(fifo_expire); CFQ_CFQQ_FNS(idle_window); CFQ_CFQQ_FNS(prio_changed); CFQ_CFQQ_FNS(slice_new); CFQ_CFQQ_FNS(sync); CFQ_CFQQ_FNS(coop); CFQ_CFQQ_FNS(split_coop); CFQ_CFQQ_FNS(deep); CFQ_CFQQ_FNS(wait_busy); #undef CFQ_CFQQ_FNS #ifdef CONFIG_CFQ_GROUP_IOSCHED #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \ cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ blkg_path(&(cfqq)->cfqg->blkg), ##args) #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "%s " fmt, \ blkg_path(&(cfqg)->blkg), ##args) \ #else #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) #endif #define cfq_log(cfqd, fmt, args...) \ blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) /* Traverses through cfq group service trees */ #define for_each_cfqg_st(cfqg, i, j, st) \ for (i = 0; i <= IDLE_WORKLOAD; i++) \ for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\ : &cfqg->service_tree_idle; \ (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \ (i == IDLE_WORKLOAD && j == 0); \ j++, st = i < IDLE_WORKLOAD ? \ &cfqg->service_trees[i][j]: NULL) \ static inline bool iops_mode(struct cfq_data *cfqd) { /* * If we are not idling on queues and it is a NCQ drive, parallel * execution of requests is on and measuring time is not possible * in most of the cases until and unless we drive shallower queue * depths and that becomes a performance bottleneck. In such cases * switch to start providing fairness in terms of number of IOs. */ if (!cfqd->cfq_slice_idle && cfqd->hw_tag) return true; else return false; } static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) { if (cfq_class_idle(cfqq)) return IDLE_WORKLOAD; if (cfq_class_rt(cfqq)) return RT_WORKLOAD; return BE_WORKLOAD; } static enum wl_type_t cfqq_type(struct cfq_queue *cfqq) { if (!cfq_cfqq_sync(cfqq)) return ASYNC_WORKLOAD; if (!cfq_cfqq_idle_window(cfqq)) return SYNC_NOIDLE_WORKLOAD; return SYNC_WORKLOAD; } static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl, struct cfq_data *cfqd, struct cfq_group *cfqg) { if (wl == IDLE_WORKLOAD) return cfqg->service_tree_idle.count; return cfqg->service_trees[wl][ASYNC_WORKLOAD].count + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count + cfqg->service_trees[wl][SYNC_WORKLOAD].count; } static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, struct cfq_group *cfqg) { return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; } static void cfq_dispatch_insert(struct request_queue *, struct request *); static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, struct io_context *, gfp_t); static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, struct io_context *); static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, bool is_sync) { return cic->cfqq[is_sync]; } static inline void cic_set_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq, bool is_sync) { cic->cfqq[is_sync] = cfqq; } #define CIC_DEAD_KEY 1ul #define CIC_DEAD_INDEX_SHIFT 1 static inline void *cfqd_dead_key(struct cfq_data *cfqd) { return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY); } static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic) { struct cfq_data *cfqd = cic->key; if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY)) return NULL; return cfqd; } /* * We regard a request as SYNC, if it's either a read or has the SYNC bit * set (in which case it could also be direct WRITE). */ static inline bool cfq_bio_sync(struct bio *bio) { return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); } /* * scheduler run of queue, if there are requests pending and no one in the * driver that will restart queueing */ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) { if (cfqd->busy_queues) { cfq_log(cfqd, "schedule dispatch"); kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); } } /* * Scale schedule slice based on io priority. Use the sync time slice only * if a queue is marked sync and has sync io queued. A sync queue with async * io only, should not get full sync slice length. */ static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, unsigned short prio) { const int base_slice = cfqd->cfq_slice[sync]; WARN_ON(prio >= IOPRIO_BE_NR); return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); } static inline int cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); } static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg) { u64 d = delta << CFQ_SERVICE_SHIFT; d = d * BLKIO_WEIGHT_DEFAULT; do_div(d, cfqg->weight); return d; } static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime) { s64 delta = (s64)(vdisktime - min_vdisktime); if (delta > 0) min_vdisktime = vdisktime; return min_vdisktime; } static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime) { s64 delta = (s64)(vdisktime - min_vdisktime); if (delta < 0) min_vdisktime = vdisktime; return min_vdisktime; } static void update_min_vdisktime(struct cfq_rb_root *st) { struct cfq_group *cfqg; if (st->left) { cfqg = rb_entry_cfqg(st->left); st->min_vdisktime = max_vdisktime(st->min_vdisktime, cfqg->vdisktime); } } /* * get averaged number of queues of RT/BE priority. * average is updated, with a formula that gives more weight to higher numbers, * to quickly follows sudden increases and decrease slowly */ static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd, struct cfq_group *cfqg, bool rt) { unsigned min_q, max_q; unsigned mult = cfq_hist_divisor - 1; unsigned round = cfq_hist_divisor / 2; unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg); min_q = min(cfqg->busy_queues_avg[rt], busy); max_q = max(cfqg->busy_queues_avg[rt], busy); cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) / cfq_hist_divisor; return cfqg->busy_queues_avg[rt]; } static inline unsigned cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) { struct cfq_rb_root *st = &cfqd->grp_service_tree; return cfq_target_latency * cfqg->weight / st->total_weight; } static inline unsigned cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { unsigned slice = cfq_prio_to_slice(cfqd, cfqq); if (cfqd->cfq_latency) { /* * interested queues (we consider only the ones with the same * priority class in the cfq group) */ unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, cfq_class_rt(cfqq)); unsigned sync_slice = cfqd->cfq_slice[1]; unsigned expect_latency = sync_slice * iq; unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg); if (expect_latency > group_slice) { unsigned base_low_slice = 2 * cfqd->cfq_slice_idle; /* scale low_slice according to IO priority * and sync vs async */ unsigned low_slice = min(slice, base_low_slice * slice / sync_slice); /* the adapted slice value is scaled to fit all iqs * into the target latency */ slice = max(slice * group_slice / expect_latency, low_slice); } } return slice; } static inline void cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); cfqq->slice_start = jiffies; cfqq->slice_end = jiffies + slice; cfqq->allocated_slice = slice; cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); } /* * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end * isn't valid until the first request from the dispatch is activated * and the slice time set. */ static inline bool cfq_slice_used(struct cfq_queue *cfqq) { if (cfq_cfqq_slice_new(cfqq)) return false; if (time_before(jiffies, cfqq->slice_end)) return false; return true; } /* * Lifted from AS - choose which of rq1 and rq2 that is best served now. * We choose the request that is closest to the head right now. Distance * behind the head is penalized and only allowed to a certain extent. */ static struct request * cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last) { sector_t s1, s2, d1 = 0, d2 = 0; unsigned long back_max; #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ unsigned wrap = 0; /* bit mask: requests behind the disk head? */ if (rq1 == NULL || rq1 == rq2) return rq2; if (rq2 == NULL) return rq1; if (rq_is_sync(rq1) != rq_is_sync(rq2)) return rq_is_sync(rq1) ? rq1 : rq2; if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META) return rq1->cmd_flags & REQ_META ? rq1 : rq2; s1 = blk_rq_pos(rq1); s2 = blk_rq_pos(rq2); /* * by definition, 1KiB is 2 sectors */ back_max = cfqd->cfq_back_max * 2; /* * Strict one way elevator _except_ in the case where we allow * short backward seeks which are biased as twice the cost of a * similar forward seek. */ if (s1 >= last) d1 = s1 - last; else if (s1 + back_max >= last) d1 = (last - s1) * cfqd->cfq_back_penalty; else wrap |= CFQ_RQ1_WRAP; if (s2 >= last) d2 = s2 - last; else if (s2 + back_max >= last) d2 = (last - s2) * cfqd->cfq_back_penalty; else wrap |= CFQ_RQ2_WRAP; /* Found required data */ /* * By doing switch() on the bit mask "wrap" we avoid having to * check two variables for all permutations: --> faster! */ switch (wrap) { case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ if (d1 < d2) return rq1; else if (d2 < d1) return rq2; else { if (s1 >= s2) return rq1; else return rq2; } case CFQ_RQ2_WRAP: return rq1; case CFQ_RQ1_WRAP: return rq2; case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ default: /* * Since both rqs are wrapped, * start with the one that's further behind head * (--> only *one* back seek required), * since back seek takes more time than forward. */ if (s1 <= s2) return rq1; else return rq2; } } /* * The below is leftmost cache rbtree addon */ static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) { /* Service tree is empty */ if (!root->count) return NULL; if (!root->left) root->left = rb_first(&root->rb); if (root->left) return rb_entry(root->left, struct cfq_queue, rb_node); return NULL; } static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root) { if (!root->left) root->left = rb_first(&root->rb); if (root->left) return rb_entry_cfqg(root->left); return NULL; } static void rb_erase_init(struct rb_node *n, struct rb_root *root) { rb_erase(n, root); RB_CLEAR_NODE(n); } static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) { if (root->left == n) root->left = NULL; rb_erase_init(n, &root->rb); --root->count; } /* * would be nice to take fifo expire time into account as well */ static struct request * cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *last) { struct rb_node *rbnext = rb_next(&last->rb_node); struct rb_node *rbprev = rb_prev(&last->rb_node); struct request *next = NULL, *prev = NULL; BUG_ON(RB_EMPTY_NODE(&last->rb_node)); if (rbprev) prev = rb_entry_rq(rbprev); if (rbnext) next = rb_entry_rq(rbnext); else { rbnext = rb_first(&cfqq->sort_list); if (rbnext && rbnext != &last->rb_node) next = rb_entry_rq(rbnext); } return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); } static unsigned long cfq_slice_offset(struct cfq_data *cfqd, struct cfq_queue *cfqq) { /* * just an approximation, should be ok. */ return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); } static inline s64 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg) { return cfqg->vdisktime - st->min_vdisktime; } static void __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) { struct rb_node **node = &st->rb.rb_node; struct rb_node *parent = NULL; struct cfq_group *__cfqg; s64 key = cfqg_key(st, cfqg); int left = 1; while (*node != NULL) { parent = *node; __cfqg = rb_entry_cfqg(parent); if (key < cfqg_key(st, __cfqg)) node = &parent->rb_left; else { node = &parent->rb_right; left = 0; } } if (left) st->left = &cfqg->rb_node; rb_link_node(&cfqg->rb_node, parent, node); rb_insert_color(&cfqg->rb_node, &st->rb); } static void cfq_update_group_weight(struct cfq_group *cfqg) { BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); if (cfqg->needs_update) { cfqg->weight = cfqg->new_weight; cfqg->needs_update = false; } } static void cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) { BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); cfq_update_group_weight(cfqg); __cfq_group_service_tree_add(st, cfqg); st->total_weight += cfqg->weight; } static void cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) { struct cfq_rb_root *st = &cfqd->grp_service_tree; struct cfq_group *__cfqg; struct rb_node *n; cfqg->nr_cfqq++; if (!RB_EMPTY_NODE(&cfqg->rb_node)) return; /* * Currently put the group at the end. Later implement something * so that groups get lesser vtime based on their weights, so that * if group does not loose all if it was not continuously backlogged. */ n = rb_last(&st->rb); if (n) { __cfqg = rb_entry_cfqg(n); cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; } else cfqg->vdisktime = st->min_vdisktime; cfq_group_service_tree_add(st, cfqg); } static void cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg) { st->total_weight -= cfqg->weight; if (!RB_EMPTY_NODE(&cfqg->rb_node)) cfq_rb_erase(&cfqg->rb_node, st); } static void cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) { struct cfq_rb_root *st = &cfqd->grp_service_tree; BUG_ON(cfqg->nr_cfqq < 1); cfqg->nr_cfqq--; /* If there are other cfq queues under this group, don't delete it */ if (cfqg->nr_cfqq) return; cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); cfq_group_service_tree_del(st, cfqg); cfqg->saved_workload_slice = 0; cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1); } static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, unsigned int *unaccounted_time) { unsigned int slice_used; /* * Queue got expired before even a single request completed or * got expired immediately after first request completion. */ if (!cfqq->slice_start || cfqq->slice_start == jiffies) { /* * Also charge the seek time incurred to the group, otherwise * if there are mutiple queues in the group, each can dispatch * a single request on seeky media and cause lots of seek time * and group will never know it. */ slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start), 1); } else { slice_used = jiffies - cfqq->slice_start; if (slice_used > cfqq->allocated_slice) { *unaccounted_time = slice_used - cfqq->allocated_slice; slice_used = cfqq->allocated_slice; } if (time_after(cfqq->slice_start, cfqq->dispatch_start)) *unaccounted_time += cfqq->slice_start - cfqq->dispatch_start; } return slice_used; } static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, struct cfq_queue *cfqq) { struct cfq_rb_root *st = &cfqd->grp_service_tree; unsigned int used_sl, charge, unaccounted_sl = 0; int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) - cfqg->service_tree_idle.count; BUG_ON(nr_sync < 0); used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); if (iops_mode(cfqd)) charge = cfqq->slice_dispatch; else if (!cfq_cfqq_sync(cfqq) && !nr_sync) charge = cfqq->allocated_slice; /* Can't update vdisktime while group is on service tree */ cfq_group_service_tree_del(st, cfqg); cfqg->vdisktime += cfq_scale_slice(charge, cfqg); /* If a new weight was requested, update now, off tree */ cfq_group_service_tree_add(st, cfqg); /* This group is being expired. Save the context */ if (time_after(cfqd->workload_expires, jiffies)) { cfqg->saved_workload_slice = cfqd->workload_expires - jiffies; cfqg->saved_workload = cfqd->serving_type; cfqg->saved_serving_prio = cfqd->serving_prio; } else cfqg->saved_workload_slice = 0; cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, st->min_vdisktime); cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", used_sl, cfqq->slice_dispatch, charge, iops_mode(cfqd), cfqq->nr_sectors); cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl, unaccounted_sl); cfq_blkiocg_set_start_empty_time(&cfqg->blkg); } #ifdef CONFIG_CFQ_GROUP_IOSCHED static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg) { if (blkg) return container_of(blkg, struct cfq_group, blkg); return NULL; } void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg, unsigned int weight) { struct cfq_group *cfqg = cfqg_of_blkg(blkg); cfqg->new_weight = weight; cfqg->needs_update = true; } static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd, struct cfq_group *cfqg, struct blkio_cgroup *blkcg) { struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info; unsigned int major, minor; /* * Add group onto cgroup list. It might happen that bdi->dev is * not initialized yet. Initialize this new group without major * and minor info and this info will be filled in once a new thread * comes for IO. */ if (bdi->dev) { sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, MKDEV(major, minor)); } else cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, 0); cfqd->nr_blkcg_linked_grps++; cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); /* Add group on cfqd list */ hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); } /* * Should be called from sleepable context. No request queue lock as per * cpu stats are allocated dynamically and alloc_percpu needs to be called * from sleepable context. */ static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd) { struct cfq_group *cfqg = NULL; int i, j, ret; struct cfq_rb_root *st; cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node); if (!cfqg) return NULL; for_each_cfqg_st(cfqg, i, j, st) *st = CFQ_RB_ROOT; RB_CLEAR_NODE(&cfqg->rb_node); /* * Take the initial reference that will be released on destroy * This can be thought of a joint reference by cgroup and * elevator which will be dropped by either elevator exit * or cgroup deletion path depending on who is exiting first. */ cfqg->ref = 1; ret = blkio_alloc_blkg_stats(&cfqg->blkg); if (ret) { kfree(cfqg); return NULL; } return cfqg; } static struct cfq_group * cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg) { struct cfq_group *cfqg = NULL; void *key = cfqd; struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info; unsigned int major, minor; /* * This is the common case when there are no blkio cgroups. * Avoid lookup in this case */ if (blkcg == &blkio_root_cgroup) cfqg = &cfqd->root_group; else cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); cfqg->blkg.dev = MKDEV(major, minor); } return cfqg; } /* * Search for the cfq group current task belongs to. request_queue lock must * be held. */ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) { struct blkio_cgroup *blkcg; struct cfq_group *cfqg = NULL, *__cfqg = NULL; struct request_queue *q = cfqd->queue; rcu_read_lock(); blkcg = task_blkio_cgroup(current); cfqg = cfq_find_cfqg(cfqd, blkcg); if (cfqg) { rcu_read_unlock(); return cfqg; } /* * Need to allocate a group. Allocation of group also needs allocation * of per cpu stats which in-turn takes a mutex() and can block. Hence * we need to drop rcu lock and queue_lock before we call alloc. * * Not taking any queue reference here and assuming that queue is * around by the time we return. CFQ queue allocation code does * the same. It might be racy though. */ rcu_read_unlock(); spin_unlock_irq(q->queue_lock); cfqg = cfq_alloc_cfqg(cfqd); spin_lock_irq(q->queue_lock); rcu_read_lock(); blkcg = task_blkio_cgroup(current); /* * If some other thread already allocated the group while we were * not holding queue lock, free up the group */ __cfqg = cfq_find_cfqg(cfqd, blkcg); if (__cfqg) { kfree(cfqg); rcu_read_unlock(); return __cfqg; } if (!cfqg) cfqg = &cfqd->root_group; cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg); rcu_read_unlock(); return cfqg; } static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg) { cfqg->ref++; return cfqg; } static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { /* Currently, all async queues are mapped to root group */ if (!cfq_cfqq_sync(cfqq)) cfqg = &cfqq->cfqd->root_group; cfqq->cfqg = cfqg; /* cfqq reference on cfqg */ cfqq->cfqg->ref++; } static void cfq_put_cfqg(struct cfq_group *cfqg) { struct cfq_rb_root *st; int i, j; BUG_ON(cfqg->ref <= 0); cfqg->ref--; if (cfqg->ref) return; for_each_cfqg_st(cfqg, i, j, st) BUG_ON(!RB_EMPTY_ROOT(&st->rb)); free_percpu(cfqg->blkg.stats_cpu); kfree(cfqg); } static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg) { /* Something wrong if we are trying to remove same group twice */ BUG_ON(hlist_unhashed(&cfqg->cfqd_node)); hlist_del_init(&cfqg->cfqd_node); /* * Put the reference taken at the time of creation so that when all * queues are gone, group can be destroyed. */ cfq_put_cfqg(cfqg); } static void cfq_release_cfq_groups(struct cfq_data *cfqd) { struct hlist_node *pos, *n; struct cfq_group *cfqg; hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) { /* * If cgroup removal path got to blk_group first and removed * it from cgroup list, then it will take care of destroying * cfqg also. */ if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg)) cfq_destroy_cfqg(cfqd, cfqg); } } /* * Blk cgroup controller notification saying that blkio_group object is being * delinked as associated cgroup object is going away. That also means that * no new IO will come in this group. So get rid of this group as soon as * any pending IO in the group is finished. * * This function is called under rcu_read_lock(). key is the rcu protected * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu * read lock. * * "key" was fetched from blkio_group under blkio_cgroup->lock. That means * it should not be NULL as even if elevator was exiting, cgroup deltion * path got to it first. */ void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg) { unsigned long flags; struct cfq_data *cfqd = key; spin_lock_irqsave(cfqd->queue->queue_lock, flags); cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg)); spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } #else /* GROUP_IOSCHED */ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) { return &cfqd->root_group; } static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg) { return cfqg; } static inline void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { cfqq->cfqg = cfqg; } static void cfq_release_cfq_groups(struct cfq_data *cfqd) {} static inline void cfq_put_cfqg(struct cfq_group *cfqg) {} #endif /* GROUP_IOSCHED */ /* * The cfqd->service_trees holds all pending cfq_queue's that have * requests waiting to be processed. It is sorted in the order that * we will service the queues. */ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, bool add_front) { struct rb_node **p, *parent; struct cfq_queue *__cfqq; unsigned long rb_key; struct cfq_rb_root *service_tree; int left; int new_cfqq = 1; service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), cfqq_type(cfqq)); BUG_ON(service_tree == NULL); if (cfq_class_idle(cfqq)) { rb_key = CFQ_IDLE_DELAY; parent = rb_last(&service_tree->rb); if (parent && parent != &cfqq->rb_node) { __cfqq = rb_entry(parent, struct cfq_queue, rb_node); rb_key += __cfqq->rb_key; } else rb_key += jiffies; } else if (!add_front) { /* * Get our rb key offset. Subtract any residual slice * value carried from last service. A negative resid * count indicates slice overrun, and this should position * the next service time further away in the tree. */ rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; rb_key -= cfqq->slice_resid; cfqq->slice_resid = 0; } else { rb_key = -HZ; __cfqq = cfq_rb_first(service_tree); rb_key += __cfqq ? __cfqq->rb_key : jiffies; } if (!RB_EMPTY_NODE(&cfqq->rb_node)) { new_cfqq = 0; /* * same position, nothing more to do */ if (rb_key == cfqq->rb_key && cfqq->service_tree == service_tree) return; cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); cfqq->service_tree = NULL; } left = 1; parent = NULL; cfqq->service_tree = service_tree; p = &service_tree->rb.rb_node; while (*p) { struct rb_node **n; parent = *p; __cfqq = rb_entry(parent, struct cfq_queue, rb_node); /* * sort by key, that represents service time. */ if (time_before(rb_key, __cfqq->rb_key)) n = &(*p)->rb_left; else { n = &(*p)->rb_right; left = 0; } p = n; } if (left) service_tree->left = &cfqq->rb_node; cfqq->rb_key = rb_key; rb_link_node(&cfqq->rb_node, parent, p); rb_insert_color(&cfqq->rb_node, &service_tree->rb); service_tree->count++; if (add_front || !new_cfqq) return; cfq_group_notify_queue_add(cfqd, cfqq->cfqg); } static struct cfq_queue * cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, sector_t sector, struct rb_node **ret_parent, struct rb_node ***rb_link) { struct rb_node **p, *parent; struct cfq_queue *cfqq = NULL; parent = NULL; p = &root->rb_node; while (*p) { struct rb_node **n; parent = *p; cfqq = rb_entry(parent, struct cfq_queue, p_node); /* * Sort strictly based on sector. Smallest to the left, * largest to the right. */ if (sector > blk_rq_pos(cfqq->next_rq)) n = &(*p)->rb_right; else if (sector < blk_rq_pos(cfqq->next_rq)) n = &(*p)->rb_left; else break; p = n; cfqq = NULL; } *ret_parent = parent; if (rb_link) *rb_link = p; return cfqq; } static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) { struct rb_node **p, *parent; struct cfq_queue *__cfqq; if (cfqq->p_root) { rb_erase(&cfqq->p_node, cfqq->p_root); cfqq->p_root = NULL; } if (cfq_class_idle(cfqq)) return; if (!cfqq->next_rq) return; cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, blk_rq_pos(cfqq->next_rq), &parent, &p); if (!__cfqq) { rb_link_node(&cfqq->p_node, parent, p); rb_insert_color(&cfqq->p_node, cfqq->p_root); } else cfqq->p_root = NULL; } /* * Update cfqq's position in the service tree. */ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) { /* * Resorting requires the cfqq to be on the RR list already. */ if (cfq_cfqq_on_rr(cfqq)) { cfq_service_tree_add(cfqd, cfqq, 0); cfq_prio_tree_add(cfqd, cfqq); } } /* * add to busy list of queues for service, trying to be fair in ordering * the pending list according to last request service */ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) { cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); BUG_ON(cfq_cfqq_on_rr(cfqq)); cfq_mark_cfqq_on_rr(cfqq); cfqd->busy_queues++; if (cfq_cfqq_sync(cfqq)) cfqd->busy_sync_queues++; cfq_resort_rr_list(cfqd, cfqq); } /* * Called when the cfqq no longer has requests pending, remove it from * the service tree. */ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) { cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); BUG_ON(!cfq_cfqq_on_rr(cfqq)); cfq_clear_cfqq_on_rr(cfqq); if (!RB_EMPTY_NODE(&cfqq->rb_node)) { cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); cfqq->service_tree = NULL; } if (cfqq->p_root) { rb_erase(&cfqq->p_node, cfqq->p_root); cfqq->p_root = NULL; } cfq_group_notify_queue_del(cfqd, cfqq->cfqg); BUG_ON(!cfqd->busy_queues); cfqd->busy_queues--; if (cfq_cfqq_sync(cfqq)) cfqd->busy_sync_queues--; } /* * rb tree support functions */ static void cfq_del_rq_rb(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); const int sync = rq_is_sync(rq); BUG_ON(!cfqq->queued[sync]); cfqq->queued[sync]--; elv_rb_del(&cfqq->sort_list, rq); if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) { /* * Queue will be deleted from service tree when we actually * expire it later. Right now just remove it from prio tree * as it is empty. */ if (cfqq->p_root) { rb_erase(&cfqq->p_node, cfqq->p_root); cfqq->p_root = NULL; } } } static void cfq_add_rq_rb(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); struct cfq_data *cfqd = cfqq->cfqd; struct request *__alias, *prev; cfqq->queued[rq_is_sync(rq)]++; /* * looks a little odd, but the first insert might return an alias. * if that happens, put the alias on the dispatch list */ while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) cfq_dispatch_insert(cfqd->queue, __alias); if (!cfq_cfqq_on_rr(cfqq)) cfq_add_cfqq_rr(cfqd, cfqq); /* * check if this request is a better next-serve candidate */ prev = cfqq->next_rq; cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position); /* * adjust priority tree position, if ->next_rq changes */ if (prev != cfqq->next_rq) cfq_prio_tree_add(cfqd, cfqq); BUG_ON(!cfqq->next_rq); } static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) { elv_rb_del(&cfqq->sort_list, rq); cfqq->queued[rq_is_sync(rq)]--; cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), rq_is_sync(rq)); cfq_add_rq_rb(rq); cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq), rq_is_sync(rq)); } static struct request * cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) { struct task_struct *tsk = current; struct cfq_io_context *cic; struct cfq_queue *cfqq; cic = cfq_cic_lookup(cfqd, tsk->io_context); if (!cic) return NULL; cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); if (cfqq) { sector_t sector = bio->bi_sector + bio_sectors(bio); return elv_rb_find(&cfqq->sort_list, sector); } return NULL; } static void cfq_activate_request(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; cfqd->rq_in_driver++; cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", cfqd->rq_in_driver); cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); } static void cfq_deactivate_request(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; WARN_ON(!cfqd->rq_in_driver); cfqd->rq_in_driver--; cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", cfqd->rq_in_driver); } static void cfq_remove_request(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); if (cfqq->next_rq == rq) cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); list_del_init(&rq->queuelist); cfq_del_rq_rb(rq); cfqq->cfqd->rq_queued--; cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), rq_is_sync(rq)); if (rq->cmd_flags & REQ_META) { WARN_ON(!cfqq->meta_pending); cfqq->meta_pending--; } } static int cfq_merge(struct request_queue *q, struct request **req, struct bio *bio) { struct cfq_data *cfqd = q->elevator->elevator_data; struct request *__rq; __rq = cfq_find_rq_fmerge(cfqd, bio); if (__rq && elv_rq_merge_ok(__rq, bio)) { *req = __rq; return ELEVATOR_FRONT_MERGE; } return ELEVATOR_NO_MERGE; } static void cfq_merged_request(struct request_queue *q, struct request *req, int type) { if (type == ELEVATOR_FRONT_MERGE) { struct cfq_queue *cfqq = RQ_CFQQ(req); cfq_reposition_rq_rb(cfqq, req); } } static void cfq_bio_merged(struct request_queue *q, struct request *req, struct bio *bio) { cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio), cfq_bio_sync(bio)); } static void cfq_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { struct cfq_queue *cfqq = RQ_CFQQ(rq); /* * reposition in fifo if next is older than rq */ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && time_before(rq_fifo_time(next), rq_fifo_time(rq))) { list_move(&rq->queuelist, &next->queuelist); rq_set_fifo_time(rq, rq_fifo_time(next)); } if (cfqq->next_rq == next) cfqq->next_rq = rq; cfq_remove_request(next); cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next), rq_is_sync(next)); } static int cfq_allow_merge(struct request_queue *q, struct request *rq, struct bio *bio) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_io_context *cic; struct cfq_queue *cfqq; /* * Disallow merge of a sync bio into an async request. */ if (cfq_bio_sync(bio) && !rq_is_sync(rq)) return false; /* * Lookup the cfqq that this bio will be queued with. Allow * merge only if rq is queued there. */ cic = cfq_cic_lookup(cfqd, current->io_context); if (!cic) return false; cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); return cfqq == RQ_CFQQ(rq); } static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) { del_timer(&cfqd->idle_slice_timer); cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg); } static void __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { if (cfqq) { cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", cfqd->serving_prio, cfqd->serving_type); cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg); cfqq->slice_start = 0; cfqq->dispatch_start = jiffies; cfqq->allocated_slice = 0; cfqq->slice_end = 0; cfqq->slice_dispatch = 0; cfqq->nr_sectors = 0; cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_must_alloc_slice(cfqq); cfq_clear_cfqq_fifo_expire(cfqq); cfq_mark_cfqq_slice_new(cfqq); cfq_del_timer(cfqd, cfqq); } cfqd->active_queue = cfqq; } /* * current cfqq expired its slice (or was too idle), select new one */ static void __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, bool timed_out) { cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); if (cfq_cfqq_wait_request(cfqq)) cfq_del_timer(cfqd, cfqq); cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_wait_busy(cfqq); /* * If this cfqq is shared between multiple processes, check to * make sure that those processes are still issuing I/Os within * the mean seek distance. If not, it may be time to break the * queues apart again. */ if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq)) cfq_mark_cfqq_split_coop(cfqq); /* * store what was left of this slice, if the queue idled/timed out */ if (timed_out) { if (cfq_cfqq_slice_new(cfqq)) cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); else cfqq->slice_resid = cfqq->slice_end - jiffies; cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); } cfq_group_served(cfqd, cfqq->cfqg, cfqq); if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) cfq_del_cfqq_rr(cfqd, cfqq); cfq_resort_rr_list(cfqd, cfqq); if (cfqq == cfqd->active_queue) cfqd->active_queue = NULL; if (cfqd->active_cic) { put_io_context(cfqd->active_cic->ioc); cfqd->active_cic = NULL; } } static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) { struct cfq_queue *cfqq = cfqd->active_queue; if (cfqq) __cfq_slice_expired(cfqd, cfqq, timed_out); } /* * Get next queue for service. Unless we have a queue preemption, * we'll simply select the first cfqq in the service tree. */ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) { struct cfq_rb_root *service_tree = service_tree_for(cfqd->serving_group, cfqd->serving_prio, cfqd->serving_type); if (!cfqd->rq_queued) return NULL; /* There is nothing to dispatch */ if (!service_tree) return NULL; if (RB_EMPTY_ROOT(&service_tree->rb)) return NULL; return cfq_rb_first(service_tree); } static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) { struct cfq_group *cfqg; struct cfq_queue *cfqq; int i, j; struct cfq_rb_root *st; if (!cfqd->rq_queued) return NULL; cfqg = cfq_get_next_cfqg(cfqd); if (!cfqg) return NULL; for_each_cfqg_st(cfqg, i, j, st) if ((cfqq = cfq_rb_first(st)) != NULL) return cfqq; return NULL; } /* * Get and set a new active queue for service. */ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { if (!cfqq) cfqq = cfq_get_next_queue(cfqd); __cfq_set_active_queue(cfqd, cfqq); return cfqq; } static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, struct request *rq) { if (blk_rq_pos(rq) >= cfqd->last_position) return blk_rq_pos(rq) - cfqd->last_position; else return cfqd->last_position - blk_rq_pos(rq); } static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq) { return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR; } static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, struct cfq_queue *cur_cfqq) { struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; struct rb_node *parent, *node; struct cfq_queue *__cfqq; sector_t sector = cfqd->last_position; if (RB_EMPTY_ROOT(root)) return NULL; /* * First, if we find a request starting at the end of the last * request, choose it. */ __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); if (__cfqq) return __cfqq; /* * If the exact sector wasn't found, the parent of the NULL leaf * will contain the closest sector. */ __cfqq = rb_entry(parent, struct cfq_queue, p_node); if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) return __cfqq; if (blk_rq_pos(__cfqq->next_rq) < sector) node = rb_next(&__cfqq->p_node); else node = rb_prev(&__cfqq->p_node); if (!node) return NULL; __cfqq = rb_entry(node, struct cfq_queue, p_node); if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) return __cfqq; return NULL; } /* * cfqd - obvious * cur_cfqq - passed in so that we don't decide that the current queue is * closely cooperating with itself. * * So, basically we're assuming that that cur_cfqq has dispatched at least * one request, and that cfqd->last_position reflects a position on the disk * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid * assumption. */ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, struct cfq_queue *cur_cfqq) { struct cfq_queue *cfqq; if (cfq_class_idle(cur_cfqq)) return NULL; if (!cfq_cfqq_sync(cur_cfqq)) return NULL; if (CFQQ_SEEKY(cur_cfqq)) return NULL; /* * Don't search priority tree if it's the only queue in the group. */ if (cur_cfqq->cfqg->nr_cfqq == 1) return NULL; /* * We should notice if some of the queues are cooperating, eg * working closely on the same area of the disk. In that case, * we can group them together and don't waste time idling. */ cfqq = cfqq_close(cfqd, cur_cfqq); if (!cfqq) return NULL; /* If new queue belongs to different cfq_group, don't choose it */ if (cur_cfqq->cfqg != cfqq->cfqg) return NULL; /* * It only makes sense to merge sync queues. */ if (!cfq_cfqq_sync(cfqq)) return NULL; if (CFQQ_SEEKY(cfqq)) return NULL; /* * Do not merge queues of different priority classes */ if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq)) return NULL; return cfqq; } /* * Determine whether we should enforce idle window for this queue. */ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) { enum wl_prio_t prio = cfqq_prio(cfqq); struct cfq_rb_root *service_tree = cfqq->service_tree; BUG_ON(!service_tree); BUG_ON(!service_tree->count); if (!cfqd->cfq_slice_idle) return false; /* We never do for idle class queues. */ if (prio == IDLE_WORKLOAD) return false; /* We do for queues that were marked with idle window flag. */ if (cfq_cfqq_idle_window(cfqq) && !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)) return true; /* * Otherwise, we do only if they are the last ones * in their service tree. */ if (service_tree->count == 1 && cfq_cfqq_sync(cfqq)) return true; cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", service_tree->count); return false; } static void cfq_arm_slice_timer(struct cfq_data *cfqd) { struct cfq_queue *cfqq = cfqd->active_queue; struct cfq_io_context *cic; unsigned long sl, group_idle = 0; /* * SSD device without seek penalty, disable idling. But only do so * for devices that support queuing, otherwise we still have a problem * with sync vs async workloads. */ if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) return; WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); WARN_ON(cfq_cfqq_slice_new(cfqq)); /* * idle is disabled, either manually or by past process history */ if (!cfq_should_idle(cfqd, cfqq)) { /* no queue idling. Check for group idling */ if (cfqd->cfq_group_idle) group_idle = cfqd->cfq_group_idle; else return; } /* * still active requests from this queue, don't idle */ if (cfqq->dispatched) return; /* * task has exited, don't wait */ cic = cfqd->active_cic; if (!cic || !atomic_read(&cic->ioc->nr_tasks)) return; /* * If our average think time is larger than the remaining time * slice, then don't idle. This avoids overrunning the allotted * time slice. */ if (sample_valid(cic->ttime_samples) && (cfqq->slice_end - jiffies < cic->ttime_mean)) { cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu", cic->ttime_mean); return; } /* There are other queues in the group, don't do group idle */ if (group_idle && cfqq->cfqg->nr_cfqq > 1) return; cfq_mark_cfqq_wait_request(cfqq); if (group_idle) sl = cfqd->cfq_group_idle; else sl = cfqd->cfq_slice_idle; mod_timer(&cfqd->idle_slice_timer, jiffies + sl); cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, group_idle ? 1 : 0); } /* * Move request from internal lists to the request queue dispatch list. */ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq = RQ_CFQQ(rq); cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); cfq_remove_request(rq); cfqq->dispatched++; (RQ_CFQG(rq))->dispatched++; elv_dispatch_sort(q, rq); cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; cfqq->nr_sectors += blk_rq_sectors(rq); cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), rq_data_dir(rq), rq_is_sync(rq)); } /* * return expired entry, or NULL to just start from scratch in rbtree */ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) { struct request *rq = NULL; if (cfq_cfqq_fifo_expire(cfqq)) return NULL; cfq_mark_cfqq_fifo_expire(cfqq); if (list_empty(&cfqq->fifo)) return NULL; rq = rq_entry_fifo(cfqq->fifo.next); if (time_before(jiffies, rq_fifo_time(rq))) rq = NULL; cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); return rq; } static inline int cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) { const int base_rq = cfqd->cfq_slice_async_rq; WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio); } /* * Must be called with the queue_lock held. */ static int cfqq_process_refs(struct cfq_queue *cfqq) { int process_refs, io_refs; io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; process_refs = cfqq->ref - io_refs; BUG_ON(process_refs < 0); return process_refs; } static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) { int process_refs, new_process_refs; struct cfq_queue *__cfqq; /* * If there are no process references on the new_cfqq, then it is * unsafe to follow the ->new_cfqq chain as other cfqq's in the * chain may have dropped their last reference (not just their * last process reference). */ if (!cfqq_process_refs(new_cfqq)) return; /* Avoid a circular list and skip interim queue merges */ while ((__cfqq = new_cfqq->new_cfqq)) { if (__cfqq == cfqq) return; new_cfqq = __cfqq; } process_refs = cfqq_process_refs(cfqq); new_process_refs = cfqq_process_refs(new_cfqq); /* * If the process for the cfqq has gone away, there is no * sense in merging the queues. */ if (process_refs == 0 || new_process_refs == 0) return; /* * Merge in the direction of the lesser amount of work. */ if (new_process_refs >= process_refs) { cfqq->new_cfqq = new_cfqq; new_cfqq->ref += process_refs; } else { new_cfqq->new_cfqq = cfqq; cfqq->ref += new_process_refs; } } static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, struct cfq_group *cfqg, enum wl_prio_t prio) { struct cfq_queue *queue; int i; bool key_valid = false; unsigned long lowest_key = 0; enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; for (i = 0; i <= SYNC_WORKLOAD; ++i) { /* select the one with lowest rb_key */ struct cfq_rb_root *service_tree; service_tree = service_tree_for(cfqg, prio, i); BUG_ON(service_tree == NULL); queue = cfq_rb_first(service_tree); if (queue && (!key_valid || time_before(queue->rb_key, lowest_key))) { lowest_key = queue->rb_key; cur_best = i; key_valid = true; } } return cur_best; } static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) { unsigned slice; unsigned count; struct cfq_rb_root *st; unsigned group_slice; enum wl_prio_t original_prio = cfqd->serving_prio; /* Choose next priority. RT > BE > IDLE */ if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) cfqd->serving_prio = RT_WORKLOAD; else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) cfqd->serving_prio = BE_WORKLOAD; else { cfqd->serving_prio = IDLE_WORKLOAD; cfqd->workload_expires = jiffies + 1; return; } if (original_prio != cfqd->serving_prio) goto new_workload; /* * For RT and BE, we have to choose also the type * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload * expiration time */ st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type); BUG_ON(st == NULL); count = st->count; /* * check workload expiration, and that we still have other queues ready */ if (count && !time_after(jiffies, cfqd->workload_expires)) return; new_workload: /* otherwise select new workload type */ cfqd->serving_type = cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio); st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type); count = st->count; /* * the workload slice is computed as a fraction of target latency * proportional to the number of queues in that workload, over * all the queues in the same priority class */ group_slice = cfq_group_slice(cfqd, cfqg); slice = group_slice * count / max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio], cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg)); if (cfqd->serving_type == ASYNC_WORKLOAD) { unsigned int tmp; /* * Async queues are currently system wide. Just taking * proportion of queues with-in same group will lead to higher * async ratio system wide as generally root group is going * to have higher weight. A more accurate thing would be to * calculate system wide asnc/sync ratio. */ tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg); tmp = tmp/cfqd->busy_queues; slice = min_t(unsigned, slice, tmp); /* async workload slice is scaled down according to * the sync/async slice ratio. */ slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1]; } else /* sync workload slice is at least 2 * cfq_slice_idle */ slice = max(slice, 2 * cfqd->cfq_slice_idle); slice = max_t(unsigned, slice, CFQ_MIN_TT); cfq_log(cfqd, "workload slice:%d", slice); cfqd->workload_expires = jiffies + slice; } static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) { struct cfq_rb_root *st = &cfqd->grp_service_tree; struct cfq_group *cfqg; if (RB_EMPTY_ROOT(&st->rb)) return NULL; cfqg = cfq_rb_first_group(st); update_min_vdisktime(st); return cfqg; } static void cfq_choose_cfqg(struct cfq_data *cfqd) { struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); BUG_ON(cfqg == NULL); cfqd->serving_group = cfqg; /* Restore the workload type data */ if (cfqg->saved_workload_slice) { cfqd->workload_expires = jiffies + cfqg->saved_workload_slice; cfqd->serving_type = cfqg->saved_workload; cfqd->serving_prio = cfqg->saved_serving_prio; } else cfqd->workload_expires = jiffies - 1; choose_service_tree(cfqd, cfqg); } /* * Select a queue for service. If we have a current active queue, * check whether to continue servicing it, or retrieve and set a new one. */ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) { struct cfq_queue *cfqq, *new_cfqq = NULL; cfqq = cfqd->active_queue; if (!cfqq) goto new_queue; if (!cfqd->rq_queued) return NULL; /* * We were waiting for group to get backlogged. Expire the queue */ if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list)) goto expire; /* * The active queue has run out of time, expire it and select new. */ if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) { /* * If slice had not expired at the completion of last request * we might not have turned on wait_busy flag. Don't expire * the queue yet. Allow the group to get backlogged. * * The very fact that we have used the slice, that means we * have been idling all along on this queue and it should be * ok to wait for this request to complete. */ if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list) && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { cfqq = NULL; goto keep_queue; } else goto check_group_idle; } /* * The active queue has requests and isn't expired, allow it to * dispatch. */ if (!RB_EMPTY_ROOT(&cfqq->sort_list)) goto keep_queue; /* * If another queue has a request waiting within our mean seek * distance, let it run. The expire code will check for close * cooperators and put the close queue at the front of the service * tree. If possible, merge the expiring queue with the new cfqq. */ new_cfqq = cfq_close_cooperator(cfqd, cfqq); if (new_cfqq) { if (!cfqq->new_cfqq) cfq_setup_merge(cfqq, new_cfqq); goto expire; } /* * No requests pending. If the active queue still has requests in * flight or is idling for a new request, allow either of these * conditions to happen (or time out) before selecting a new queue. */ if (timer_pending(&cfqd->idle_slice_timer)) { cfqq = NULL; goto keep_queue; } /* * This is a deep seek queue, but the device is much faster than * the queue can deliver, don't idle **/ if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) && (cfq_cfqq_slice_new(cfqq) || (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) { cfq_clear_cfqq_deep(cfqq); cfq_clear_cfqq_idle_window(cfqq); } if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { cfqq = NULL; goto keep_queue; } /* * If group idle is enabled and there are requests dispatched from * this group, wait for requests to complete. */ check_group_idle: if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 && cfqq->cfqg->dispatched) { cfqq = NULL; goto keep_queue; } expire: cfq_slice_expired(cfqd, 0); new_queue: /* * Current queue expired. Check if we have to switch to a new * service tree */ if (!new_cfqq) cfq_choose_cfqg(cfqd); cfqq = cfq_set_active_queue(cfqd, new_cfqq); keep_queue: return cfqq; } static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) { int dispatched = 0; while (cfqq->next_rq) { cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); dispatched++; } BUG_ON(!list_empty(&cfqq->fifo)); /* By default cfqq is not expired if it is empty. Do it explicitly */ __cfq_slice_expired(cfqq->cfqd, cfqq, 0); return dispatched; } /* * Drain our current requests. Used for barriers and when switching * io schedulers on-the-fly. */ static int cfq_forced_dispatch(struct cfq_data *cfqd) { struct cfq_queue *cfqq; int dispatched = 0; /* Expire the timeslice of the current active queue first */ cfq_slice_expired(cfqd, 0); while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { __cfq_set_active_queue(cfqd, cfqq); dispatched += __cfq_forced_dispatch_cfqq(cfqq); } BUG_ON(cfqd->busy_queues); cfq_log(cfqd, "forced_dispatch=%d", dispatched); return dispatched; } static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, struct cfq_queue *cfqq) { /* the queue hasn't finished any request, can't estimate */ if (cfq_cfqq_slice_new(cfqq)) return true; if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, cfqq->slice_end)) return true; return false; } static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) { unsigned int max_dispatch; /* * Drain async requests before we start sync IO */ if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC]) return false; /* * If this is an async queue and we have sync IO in flight, let it wait */ if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) return false; max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1); if (cfq_class_idle(cfqq)) max_dispatch = 1; /* * Does this cfqq already have too much IO in flight? */ if (cfqq->dispatched >= max_dispatch) { bool promote_sync = false; /* * idle queue must always only have a single IO in flight */ if (cfq_class_idle(cfqq)) return false; /* * If there is only one sync queue * we can ignore async queue here and give the sync * queue no dispatch limit. The reason is a sync queue can * preempt async queue, limiting the sync queue doesn't make * sense. This is useful for aiostress test. */ if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) promote_sync = true; /* * We have other queues, don't allow more IO from this one */ if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) && !promote_sync) return false; /* * Sole queue user, no limit */ if (cfqd->busy_queues == 1 || promote_sync) max_dispatch = -1; else /* * Normally we start throttling cfqq when cfq_quantum/2 * requests have been dispatched. But we can drive * deeper queue depths at the beginning of slice * subjected to upper limit of cfq_quantum. * */ max_dispatch = cfqd->cfq_quantum; } /* * Async queues must wait a bit before being allowed dispatch. * We also ramp up the dispatch depth gradually for async IO, * based on the last sync IO we serviced */ if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { unsigned long last_sync = jiffies - cfqd->last_delayed_sync; unsigned int depth; depth = last_sync / cfqd->cfq_slice[1]; if (!depth && !cfqq->dispatched) depth = 1; if (depth < max_dispatch) max_dispatch = depth; } /* * If we're below the current max, allow a dispatch */ return cfqq->dispatched < max_dispatch; } /* * Dispatch a request from cfqq, moving them to the request queue * dispatch list. */ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) { struct request *rq; BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); if (!cfq_may_dispatch(cfqd, cfqq)) return false; /* * follow expired path, else get first next available */ rq = cfq_check_fifo(cfqq); if (!rq) rq = cfqq->next_rq; /* * insert request into driver dispatch list */ cfq_dispatch_insert(cfqd->queue, rq); if (!cfqd->active_cic) { struct cfq_io_context *cic = RQ_CIC(rq); atomic_long_inc(&cic->ioc->refcount); cfqd->active_cic = cic; } return true; } /* * Find the cfqq that we need to service and move a request from that to the * dispatch list */ static int cfq_dispatch_requests(struct request_queue *q, int force) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq; if (!cfqd->busy_queues) return 0; if (unlikely(force)) return cfq_forced_dispatch(cfqd); cfqq = cfq_select_queue(cfqd); if (!cfqq) return 0; /* * Dispatch a request from this cfqq, if it is allowed */ if (!cfq_dispatch_request(cfqd, cfqq)) return 0; cfqq->slice_dispatch++; cfq_clear_cfqq_must_dispatch(cfqq); /* * expire an async queue immediately if it has used up its slice. idle * queue always expire after 1 dispatch round. */ if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || cfq_class_idle(cfqq))) { cfqq->slice_end = jiffies + 1; cfq_slice_expired(cfqd, 0); } cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); return 1; } /* * task holds one reference to the queue, dropped when task exits. each rq * in-flight on this queue also holds a reference, dropped when rq is freed. * * Each cfq queue took a reference on the parent group. Drop it now. * queue lock must be held here. */ static void cfq_put_queue(struct cfq_queue *cfqq) { struct cfq_data *cfqd = cfqq->cfqd; struct cfq_group *cfqg; BUG_ON(cfqq->ref <= 0); cfqq->ref--; if (cfqq->ref) return; cfq_log_cfqq(cfqd, cfqq, "put_queue"); BUG_ON(rb_first(&cfqq->sort_list)); BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); cfqg = cfqq->cfqg; if (unlikely(cfqd->active_queue == cfqq)) { __cfq_slice_expired(cfqd, cfqq, 0); cfq_schedule_dispatch(cfqd); } BUG_ON(cfq_cfqq_on_rr(cfqq)); kmem_cache_free(cfq_pool, cfqq); cfq_put_cfqg(cfqg); } /* * Call func for each cic attached to this ioc. */ static void call_for_each_cic(struct io_context *ioc, void (*func)(struct io_context *, struct cfq_io_context *)) { struct cfq_io_context *cic; struct hlist_node *n; rcu_read_lock(); hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) func(ioc, cic); rcu_read_unlock(); } static void cfq_cic_free_rcu(struct rcu_head *head) { struct cfq_io_context *cic; cic = container_of(head, struct cfq_io_context, rcu_head); kmem_cache_free(cfq_ioc_pool, cic); elv_ioc_count_dec(cfq_ioc_count); if (ioc_gone) { /* * CFQ scheduler is exiting, grab exit lock and check * the pending io context count. If it hits zero, * complete ioc_gone and set it back to NULL */ spin_lock(&ioc_gone_lock); if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) { complete(ioc_gone); ioc_gone = NULL; } spin_unlock(&ioc_gone_lock); } } static void cfq_cic_free(struct cfq_io_context *cic) { call_rcu(&cic->rcu_head, cfq_cic_free_rcu); } static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) { unsigned long flags; unsigned long dead_key = (unsigned long) cic->key; BUG_ON(!(dead_key & CIC_DEAD_KEY)); spin_lock_irqsave(&ioc->lock, flags); radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT); hlist_del_rcu(&cic->cic_list); spin_unlock_irqrestore(&ioc->lock, flags); cfq_cic_free(cic); } /* * Must be called with rcu_read_lock() held or preemption otherwise disabled. * Only two callers of this - ->dtor() which is called with the rcu_read_lock(), * and ->trim() which is called with the task lock held */ static void cfq_free_io_context(struct io_context *ioc) { /* * ioc->refcount is zero here, or we are called from elv_unregister(), * so no more cic's are allowed to be linked into this ioc. So it * should be ok to iterate over the known list, we will see all cic's * since no new ones are added. */ call_for_each_cic(ioc, cic_free_func); } static void cfq_put_cooperator(struct cfq_queue *cfqq) { struct cfq_queue *__cfqq, *next; /* * If this queue was scheduled to merge with another queue, be * sure to drop the reference taken on that queue (and others in * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs. */ __cfqq = cfqq->new_cfqq; while (__cfqq) { if (__cfqq == cfqq) { WARN(1, "cfqq->new_cfqq loop detected\n"); break; } next = __cfqq->new_cfqq; cfq_put_queue(__cfqq); __cfqq = next; } } static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) { if (unlikely(cfqq == cfqd->active_queue)) { __cfq_slice_expired(cfqd, cfqq, 0); cfq_schedule_dispatch(cfqd); } cfq_put_cooperator(cfqq); cfq_put_queue(cfqq); } static void __cfq_exit_single_io_context(struct cfq_data *cfqd, struct cfq_io_context *cic) { struct io_context *ioc = cic->ioc; list_del_init(&cic->queue_list); /* * Make sure dead mark is seen for dead queues */ smp_wmb(); cic->key = cfqd_dead_key(cfqd); rcu_read_lock(); if (rcu_dereference(ioc->ioc_data) == cic) { rcu_read_unlock(); spin_lock(&ioc->lock); rcu_assign_pointer(ioc->ioc_data, NULL); spin_unlock(&ioc->lock); } else rcu_read_unlock(); if (cic->cfqq[BLK_RW_ASYNC]) { cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); cic->cfqq[BLK_RW_ASYNC] = NULL; } if (cic->cfqq[BLK_RW_SYNC]) { cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]); cic->cfqq[BLK_RW_SYNC] = NULL; } } static void cfq_exit_single_io_context(struct io_context *ioc, struct cfq_io_context *cic) { struct cfq_data *cfqd = cic_to_cfqd(cic); if (cfqd) { struct request_queue *q = cfqd->queue; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); /* * Ensure we get a fresh copy of the ->key to prevent * race between exiting task and queue */ smp_read_barrier_depends(); if (cic->key == cfqd) __cfq_exit_single_io_context(cfqd, cic); spin_unlock_irqrestore(q->queue_lock, flags); } } /* * The process that ioc belongs to has exited, we need to clean up * and put the internal structures we have that belongs to that process. */ static void cfq_exit_io_context(struct io_context *ioc) { call_for_each_cic(ioc, cfq_exit_single_io_context); } static struct cfq_io_context * cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) { struct cfq_io_context *cic; cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, cfqd->queue->node); if (cic) { cic->last_end_request = jiffies; INIT_LIST_HEAD(&cic->queue_list); INIT_HLIST_NODE(&cic->cic_list); cic->dtor = cfq_free_io_context; cic->exit = cfq_exit_io_context; elv_ioc_count_inc(cfq_ioc_count); } return cic; } static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) { struct task_struct *tsk = current; int ioprio_class; if (!cfq_cfqq_prio_changed(cfqq)) return; ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); switch (ioprio_class) { default: printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); case IOPRIO_CLASS_NONE: /* * no prio set, inherit CPU scheduling settings */ cfqq->ioprio = task_nice_ioprio(tsk); cfqq->ioprio_class = task_nice_ioclass(tsk); break; case IOPRIO_CLASS_RT: cfqq->ioprio = task_ioprio(ioc); cfqq->ioprio_class = IOPRIO_CLASS_RT; break; case IOPRIO_CLASS_BE: cfqq->ioprio = task_ioprio(ioc); cfqq->ioprio_class = IOPRIO_CLASS_BE; break; case IOPRIO_CLASS_IDLE: cfqq->ioprio_class = IOPRIO_CLASS_IDLE; cfqq->ioprio = 7; cfq_clear_cfqq_idle_window(cfqq); break; } /* * keep track of original prio settings in case we have to temporarily * elevate the priority of this queue */ cfqq->org_ioprio = cfqq->ioprio; cfqq->org_ioprio_class = cfqq->ioprio_class; cfq_clear_cfqq_prio_changed(cfqq); } static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) { struct cfq_data *cfqd = cic_to_cfqd(cic); struct cfq_queue *cfqq; unsigned long flags; if (unlikely(!cfqd)) return; spin_lock_irqsave(cfqd->queue->queue_lock, flags); cfqq = cic->cfqq[BLK_RW_ASYNC]; if (cfqq) { struct cfq_queue *new_cfqq; new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc, GFP_ATOMIC); if (new_cfqq) { cic->cfqq[BLK_RW_ASYNC] = new_cfqq; cfq_put_queue(cfqq); } } cfqq = cic->cfqq[BLK_RW_SYNC]; if (cfqq) cfq_mark_cfqq_prio_changed(cfqq); spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } static void cfq_ioc_set_ioprio(struct io_context *ioc) { call_for_each_cic(ioc, changed_ioprio); } static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, pid_t pid, bool is_sync) { RB_CLEAR_NODE(&cfqq->rb_node); RB_CLEAR_NODE(&cfqq->p_node); INIT_LIST_HEAD(&cfqq->fifo); cfqq->ref = 0; cfqq->cfqd = cfqd; cfq_mark_cfqq_prio_changed(cfqq); if (is_sync) { if (!cfq_class_idle(cfqq)) cfq_mark_cfqq_idle_window(cfqq); cfq_mark_cfqq_sync(cfqq); } cfqq->pid = pid; } #ifdef CONFIG_CFQ_GROUP_IOSCHED static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic) { struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1); struct cfq_data *cfqd = cic_to_cfqd(cic); unsigned long flags; struct request_queue *q; if (unlikely(!cfqd)) return; q = cfqd->queue; spin_lock_irqsave(q->queue_lock, flags); if (sync_cfqq) { /* * Drop reference to sync queue. A new sync queue will be * assigned in new group upon arrival of a fresh request. */ cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup"); cic_set_cfqq(cic, NULL, 1); cfq_put_queue(sync_cfqq); } spin_unlock_irqrestore(q->queue_lock, flags); } static void cfq_ioc_set_cgroup(struct io_context *ioc) { call_for_each_cic(ioc, changed_cgroup); ioc->cgroup_changed = 0; } #endif /* CONFIG_CFQ_GROUP_IOSCHED */ static struct cfq_queue * cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, gfp_t gfp_mask) { struct cfq_queue *cfqq, *new_cfqq = NULL; struct cfq_io_context *cic; struct cfq_group *cfqg; retry: cfqg = cfq_get_cfqg(cfqd); cic = cfq_cic_lookup(cfqd, ioc); /* cic always exists here */ cfqq = cic_to_cfqq(cic, is_sync); /* * Always try a new alloc if we fell back to the OOM cfqq * originally, since it should just be a temporary situation. */ if (!cfqq || cfqq == &cfqd->oom_cfqq) { cfqq = NULL; if (new_cfqq) { cfqq = new_cfqq; new_cfqq = NULL; } else if (gfp_mask & __GFP_WAIT) { spin_unlock_irq(cfqd->queue->queue_lock); new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask | __GFP_ZERO, cfqd->queue->node); spin_lock_irq(cfqd->queue->queue_lock); if (new_cfqq) goto retry; } else { cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask | __GFP_ZERO, cfqd->queue->node); } if (cfqq) { cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); cfq_init_prio_data(cfqq, ioc); cfq_link_cfqq_cfqg(cfqq, cfqg); cfq_log_cfqq(cfqd, cfqq, "alloced"); } else cfqq = &cfqd->oom_cfqq; } if (new_cfqq) kmem_cache_free(cfq_pool, new_cfqq); return cfqq; } static struct cfq_queue ** cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) { switch (ioprio_class) { case IOPRIO_CLASS_RT: return &cfqd->async_cfqq[0][ioprio]; case IOPRIO_CLASS_BE: return &cfqd->async_cfqq[1][ioprio]; case IOPRIO_CLASS_IDLE: return &cfqd->async_idle_cfqq; default: BUG(); } } static struct cfq_queue * cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, gfp_t gfp_mask) { const int ioprio = task_ioprio(ioc); const int ioprio_class = task_ioprio_class(ioc); struct cfq_queue **async_cfqq = NULL; struct cfq_queue *cfqq = NULL; if (!is_sync) { async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); cfqq = *async_cfqq; } if (!cfqq) cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); /* * pin the queue now that it's allocated, scheduler exit will prune it */ if (!is_sync && !(*async_cfqq)) { cfqq->ref++; *async_cfqq = cfqq; } cfqq->ref++; return cfqq; } /* * We drop cfq io contexts lazily, so we may find a dead one. */ static void cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, struct cfq_io_context *cic) { unsigned long flags; WARN_ON(!list_empty(&cic->queue_list)); BUG_ON(cic->key != cfqd_dead_key(cfqd)); spin_lock_irqsave(&ioc->lock, flags); BUG_ON(rcu_dereference_check(ioc->ioc_data, lockdep_is_held(&ioc->lock)) == cic); radix_tree_delete(&ioc->radix_root, cfqd->cic_index); hlist_del_rcu(&cic->cic_list); spin_unlock_irqrestore(&ioc->lock, flags); cfq_cic_free(cic); } static struct cfq_io_context * cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) { struct cfq_io_context *cic; unsigned long flags; if (unlikely(!ioc)) return NULL; rcu_read_lock(); /* * we maintain a last-hit cache, to avoid browsing over the tree */ cic = rcu_dereference(ioc->ioc_data); if (cic && cic->key == cfqd) { rcu_read_unlock(); return cic; } do { cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index); rcu_read_unlock(); if (!cic) break; if (unlikely(cic->key != cfqd)) { cfq_drop_dead_cic(cfqd, ioc, cic); rcu_read_lock(); continue; } spin_lock_irqsave(&ioc->lock, flags); rcu_assign_pointer(ioc->ioc_data, cic); spin_unlock_irqrestore(&ioc->lock, flags); break; } while (1); return cic; } /* * Add cic into ioc, using cfqd as the search key. This enables us to lookup * the process specific cfq io context when entered from the block layer. * Also adds the cic to a per-cfqd list, used when this queue is removed. */ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, struct cfq_io_context *cic, gfp_t gfp_mask) { unsigned long flags; int ret; ret = radix_tree_preload(gfp_mask); if (!ret) { cic->ioc = ioc; cic->key = cfqd; spin_lock_irqsave(&ioc->lock, flags); ret = radix_tree_insert(&ioc->radix_root, cfqd->cic_index, cic); if (!ret) hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); spin_unlock_irqrestore(&ioc->lock, flags); radix_tree_preload_end(); if (!ret) { spin_lock_irqsave(cfqd->queue->queue_lock, flags); list_add(&cic->queue_list, &cfqd->cic_list); spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } } if (ret && ret != -EEXIST) printk(KERN_ERR "cfq: cic link failed!\n"); return ret; } /* * Setup general io context and cfq io context. There can be several cfq * io contexts per general io context, if this process is doing io to more * than one device managed by cfq. */ static struct cfq_io_context * cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) { struct io_context *ioc = NULL; struct cfq_io_context *cic; int ret; might_sleep_if(gfp_mask & __GFP_WAIT); ioc = get_io_context(gfp_mask, cfqd->queue->node); if (!ioc) return NULL; retry: cic = cfq_cic_lookup(cfqd, ioc); if (cic) goto out; cic = cfq_alloc_io_context(cfqd, gfp_mask); if (cic == NULL) goto err; ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask); if (ret == -EEXIST) { /* someone has linked cic to ioc already */ cfq_cic_free(cic); goto retry; } else if (ret) goto err_free; out: if (unlikely(test_and_clear_bit(IOC_CFQ_IOPRIO_CHANGED, ioc->ioprio_changed))) cfq_ioc_set_ioprio(ioc); #ifdef CONFIG_CFQ_GROUP_IOSCHED if (unlikely(ioc->cgroup_changed)) cfq_ioc_set_cgroup(ioc); #endif return cic; err_free: cfq_cic_free(cic); err: put_io_context(ioc); return NULL; } static void cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) { unsigned long elapsed = jiffies - cic->last_end_request; unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; } static void cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq) { sector_t sdist = 0; sector_t n_sec = blk_rq_sectors(rq); if (cfqq->last_request_pos) { if (cfqq->last_request_pos < blk_rq_pos(rq)) sdist = blk_rq_pos(rq) - cfqq->last_request_pos; else sdist = cfqq->last_request_pos - blk_rq_pos(rq); } cfqq->seek_history <<= 1; if (blk_queue_nonrot(cfqd->queue)) cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT); else cfqq->seek_history |= (sdist > CFQQ_SEEK_THR); } /* * Disable idle window if the process thinks too long or seeks so much that * it doesn't matter */ static void cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_io_context *cic) { int old_idle, enable_idle; /* * Don't idle for async or idle io prio class */ if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) return; enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); if (cfqq->queued[0] + cfqq->queued[1] >= 4) cfq_mark_cfqq_deep(cfqq); if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) enable_idle = 0; else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) enable_idle = 0; else if (sample_valid(cic->ttime_samples)) { if (cic->ttime_mean > cfqd->cfq_slice_idle) enable_idle = 0; else enable_idle = 1; } if (old_idle != enable_idle) { cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); if (enable_idle) cfq_mark_cfqq_idle_window(cfqq); else cfq_clear_cfqq_idle_window(cfqq); } } /* * Check if new_cfqq should preempt the currently active queue. Return 0 for * no or if we aren't sure, a 1 will cause a preempt. */ static bool cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, struct request *rq) { struct cfq_queue *cfqq; cfqq = cfqd->active_queue; if (!cfqq) return false; if (cfq_class_idle(new_cfqq)) return false; if (cfq_class_idle(cfqq)) return true; /* * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. */ if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) return false; /* * if the new request is sync, but the currently running queue is * not, let the sync request have priority. */ if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) return true; if (new_cfqq->cfqg != cfqq->cfqg) return false; if (cfq_slice_used(cfqq)) return true; /* Allow preemption only if we are idling on sync-noidle tree */ if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD && cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && new_cfqq->service_tree->count == 2 && RB_EMPTY_ROOT(&cfqq->sort_list)) return true; /* * So both queues are sync. Let the new request get disk time if * it's a metadata request and the current queue is doing regular IO. */ if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending) return true; /* * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. */ if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) return true; /* An idle queue should not be idle now for some reason */ if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq)) return true; if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) return false; /* * if this request is as-good as one we would expect from the * current cfqq, let it preempt */ if (cfq_rq_close(cfqd, cfqq, rq)) return true; return false; } /* * cfqq preempts the active queue. if we allowed preempt with no slice left, * let it have half of its nominal slice. */ static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { struct cfq_queue *old_cfqq = cfqd->active_queue; cfq_log_cfqq(cfqd, cfqq, "preempt"); cfq_slice_expired(cfqd, 1); /* * workload type is changed, don't save slice, otherwise preempt * doesn't happen */ if (cfqq_type(old_cfqq) != cfqq_type(cfqq)) cfqq->cfqg->saved_workload_slice = 0; /* * Put the new queue at the front of the of the current list, * so we know that it will be selected next. */ BUG_ON(!cfq_cfqq_on_rr(cfqq)); cfq_service_tree_add(cfqd, cfqq, 1); cfqq->slice_end = 0; cfq_mark_cfqq_slice_new(cfqq); } /* * Called when a new fs request (rq) is added (to cfqq). Check if there's * something we should do about it */ static void cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq) { struct cfq_io_context *cic = RQ_CIC(rq); cfqd->rq_queued++; if (rq->cmd_flags & REQ_META) cfqq->meta_pending++; cfq_update_io_thinktime(cfqd, cic); cfq_update_io_seektime(cfqd, cfqq, rq); cfq_update_idle_window(cfqd, cfqq, cic); cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); if (cfqq == cfqd->active_queue) { /* * Remember that we saw a request from this process, but * don't start queuing just yet. Otherwise we risk seeing lots * of tiny requests, because we disrupt the normal plugging * and merging. If the request is already larger than a single * page, let it rip immediately. For that case we assume that * merging is already done. Ditto for a busy system that * has other work pending, don't risk delaying until the * idle timer unplug to continue working. */ if (cfq_cfqq_wait_request(cfqq)) { if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || cfqd->busy_queues > 1) { cfq_del_timer(cfqd, cfqq); cfq_clear_cfqq_wait_request(cfqq); __blk_run_queue(cfqd->queue); } else { cfq_blkiocg_update_idle_time_stats( &cfqq->cfqg->blkg); cfq_mark_cfqq_must_dispatch(cfqq); } } } else if (cfq_should_preempt(cfqd, cfqq, rq)) { /* * not the active queue - expire current slice if it is * idle and has expired it's mean thinktime or this new queue * has some old slice time left and is of higher priority or * this new queue is RT and the current one is BE */ cfq_preempt_queue(cfqd, cfqq); __blk_run_queue(cfqd->queue); } } static void cfq_insert_request(struct request_queue *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq = RQ_CFQQ(rq); cfq_log_cfqq(cfqd, cfqq, "insert_request"); cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); list_add_tail(&rq->queuelist, &cfqq->fifo); cfq_add_rq_rb(rq); cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, &cfqd->serving_group->blkg, rq_data_dir(rq), rq_is_sync(rq)); cfq_rq_enqueued(cfqd, cfqq, rq); } /* * Update hw_tag based on peak queue depth over 50 samples under * sufficient load. */ static void cfq_update_hw_tag(struct cfq_data *cfqd) { struct cfq_queue *cfqq = cfqd->active_queue; if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth) cfqd->hw_tag_est_depth = cfqd->rq_in_driver; if (cfqd->hw_tag == 1) return; if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) return; /* * If active queue hasn't enough requests and can idle, cfq might not * dispatch sufficient requests to hardware. Don't zero hw_tag in this * case */ if (cfqq && cfq_cfqq_idle_window(cfqq) && cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN) return; if (cfqd->hw_tag_samples++ < 50) return; if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) cfqd->hw_tag = 1; else cfqd->hw_tag = 0; } static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) { struct cfq_io_context *cic = cfqd->active_cic; /* If the queue already has requests, don't wait */ if (!RB_EMPTY_ROOT(&cfqq->sort_list)) return false; /* If there are other queues in the group, don't wait */ if (cfqq->cfqg->nr_cfqq > 1) return false; if (cfq_slice_used(cfqq)) return true; /* if slice left is less than think time, wait busy */ if (cic && sample_valid(cic->ttime_samples) && (cfqq->slice_end - jiffies < cic->ttime_mean)) return true; /* * If think times is less than a jiffy than ttime_mean=0 and above * will not be true. It might happen that slice has not expired yet * but will expire soon (4-5 ns) during select_queue(). To cover the * case where think time is less than a jiffy, mark the queue wait * busy if only 1 jiffy is left in the slice. */ if (cfqq->slice_end - jiffies == 1) return true; return false; } static void cfq_completed_request(struct request_queue *q, struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); struct cfq_data *cfqd = cfqq->cfqd; const int sync = rq_is_sync(rq); unsigned long now; now = jiffies; cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!(rq->cmd_flags & REQ_NOIDLE)); cfq_update_hw_tag(cfqd); WARN_ON(!cfqd->rq_in_driver); WARN_ON(!cfqq->dispatched); cfqd->rq_in_driver--; cfqq->dispatched--; (RQ_CFQG(rq))->dispatched--; cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq), rq_io_start_time_ns(rq), rq_data_dir(rq), rq_is_sync(rq)); cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; if (sync) { RQ_CIC(rq)->last_end_request = now; if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) cfqd->last_delayed_sync = now; } /* * If this is the active queue, check if it needs to be expired, * or if we want to idle in case it has no pending requests. */ if (cfqd->active_queue == cfqq) { const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list); if (cfq_cfqq_slice_new(cfqq)) { cfq_set_prio_slice(cfqd, cfqq); cfq_clear_cfqq_slice_new(cfqq); } /* * Should we wait for next request to come in before we expire * the queue. */ if (cfq_should_wait_busy(cfqd, cfqq)) { unsigned long extend_sl = cfqd->cfq_slice_idle; if (!cfqd->cfq_slice_idle) extend_sl = cfqd->cfq_group_idle; cfqq->slice_end = jiffies + extend_sl; cfq_mark_cfqq_wait_busy(cfqq); cfq_log_cfqq(cfqd, cfqq, "will busy wait"); } /* * Idling is not enabled on: * - expired queues * - idle-priority queues * - async queues * - queues with still some requests queued * - when there is a close cooperator */ if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) cfq_slice_expired(cfqd, 1); else if (sync && cfqq_empty && !cfq_close_cooperator(cfqd, cfqq)) { cfq_arm_slice_timer(cfqd); } } if (!cfqd->rq_in_driver) cfq_schedule_dispatch(cfqd); } /* * we temporarily boost lower priority queues if they are holding fs exclusive * resources. they are boosted to normal prio (CLASS_BE/4) */ static void cfq_prio_boost(struct cfq_queue *cfqq) { if (has_fs_excl()) { /* * boost idle prio on transactions that would lock out other * users of the filesystem */ if (cfq_class_idle(cfqq)) cfqq->ioprio_class = IOPRIO_CLASS_BE; if (cfqq->ioprio > IOPRIO_NORM) cfqq->ioprio = IOPRIO_NORM; } else { /* * unboost the queue (if needed) */ cfqq->ioprio_class = cfqq->org_ioprio_class; cfqq->ioprio = cfqq->org_ioprio; } } static inline int __cfq_may_queue(struct cfq_queue *cfqq) { if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) { cfq_mark_cfqq_must_alloc_slice(cfqq); return ELV_MQUEUE_MUST; } return ELV_MQUEUE_MAY; } static int cfq_may_queue(struct request_queue *q, int rw) { struct cfq_data *cfqd = q->elevator->elevator_data; struct task_struct *tsk = current; struct cfq_io_context *cic; struct cfq_queue *cfqq; /* * don't force setup of a queue from here, as a call to may_queue * does not necessarily imply that a request actually will be queued. * so just lookup a possibly existing queue, or return 'may queue' * if that fails */ cic = cfq_cic_lookup(cfqd, tsk->io_context); if (!cic) return ELV_MQUEUE_MAY; cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); if (cfqq) { cfq_init_prio_data(cfqq, cic->ioc); cfq_prio_boost(cfqq); return __cfq_may_queue(cfqq); } return ELV_MQUEUE_MAY; } /* * queue lock held here */ static void cfq_put_request(struct request *rq) { struct cfq_queue *cfqq = RQ_CFQQ(rq); if (cfqq) { const int rw = rq_data_dir(rq); BUG_ON(!cfqq->allocated[rw]); cfqq->allocated[rw]--; put_io_context(RQ_CIC(rq)->ioc); rq->elevator_private[0] = NULL; rq->elevator_private[1] = NULL; /* Put down rq reference on cfqg */ cfq_put_cfqg(RQ_CFQG(rq)); rq->elevator_private[2] = NULL; cfq_put_queue(cfqq); } } static struct cfq_queue * cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic, struct cfq_queue *cfqq) { cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); cic_set_cfqq(cic, cfqq->new_cfqq, 1); cfq_mark_cfqq_coop(cfqq->new_cfqq); cfq_put_queue(cfqq); return cic_to_cfqq(cic, 1); } /* * Returns NULL if a new cfqq should be allocated, or the old cfqq if this * was the last process referring to said cfqq. */ static struct cfq_queue * split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) { if (cfqq_process_refs(cfqq) == 1) { cfqq->pid = current->pid; cfq_clear_cfqq_coop(cfqq); cfq_clear_cfqq_split_coop(cfqq); return cfqq; } cic_set_cfqq(cic, NULL, 1); cfq_put_cooperator(cfqq); cfq_put_queue(cfqq); return NULL; } /* * Allocate cfq data structures associated with this request. */ static int cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_io_context *cic; const int rw = rq_data_dir(rq); const bool is_sync = rq_is_sync(rq); struct cfq_queue *cfqq; unsigned long flags; might_sleep_if(gfp_mask & __GFP_WAIT); cic = cfq_get_io_context(cfqd, gfp_mask); spin_lock_irqsave(q->queue_lock, flags); if (!cic) goto queue_fail; new_queue: cfqq = cic_to_cfqq(cic, is_sync); if (!cfqq || cfqq == &cfqd->oom_cfqq) { cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); cic_set_cfqq(cic, cfqq, is_sync); } else { /* * If the queue was seeky for too long, break it apart. */ if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) { cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); cfqq = split_cfqq(cic, cfqq); if (!cfqq) goto new_queue; } /* * Check to see if this queue is scheduled to merge with * another, closely cooperating queue. The merging of * queues happens here as it must be done in process context. * The reference on new_cfqq was taken in merge_cfqqs. */ if (cfqq->new_cfqq) cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); } cfqq->allocated[rw]++; cfqq->ref++; rq->elevator_private[0] = cic; rq->elevator_private[1] = cfqq; rq->elevator_private[2] = cfq_ref_get_cfqg(cfqq->cfqg); spin_unlock_irqrestore(q->queue_lock, flags); return 0; queue_fail: cfq_schedule_dispatch(cfqd); spin_unlock_irqrestore(q->queue_lock, flags); cfq_log(cfqd, "set_request fail"); return 1; } static void cfq_kick_queue(struct work_struct *work) { struct cfq_data *cfqd = container_of(work, struct cfq_data, unplug_work); struct request_queue *q = cfqd->queue; spin_lock_irq(q->queue_lock); __blk_run_queue(cfqd->queue); spin_unlock_irq(q->queue_lock); } /* * Timer running if the active_queue is currently idling inside its time slice */ static void cfq_idle_slice_timer(unsigned long data) { struct cfq_data *cfqd = (struct cfq_data *) data; struct cfq_queue *cfqq; unsigned long flags; int timed_out = 1; cfq_log(cfqd, "idle timer fired"); spin_lock_irqsave(cfqd->queue->queue_lock, flags); cfqq = cfqd->active_queue; if (cfqq) { timed_out = 0; /* * We saw a request before the queue expired, let it through */ if (cfq_cfqq_must_dispatch(cfqq)) goto out_kick; /* * expired */ if (cfq_slice_used(cfqq)) goto expire; /* * only expire and reinvoke request handler, if there are * other queues with pending requests */ if (!cfqd->busy_queues) goto out_cont; /* * not expired and it has a request pending, let it dispatch */ if (!RB_EMPTY_ROOT(&cfqq->sort_list)) goto out_kick; /* * Queue depth flag is reset only when the idle didn't succeed */ cfq_clear_cfqq_deep(cfqq); } expire: cfq_slice_expired(cfqd, timed_out); out_kick: cfq_schedule_dispatch(cfqd); out_cont: spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) { del_timer_sync(&cfqd->idle_slice_timer); cancel_work_sync(&cfqd->unplug_work); } static void cfq_put_async_queues(struct cfq_data *cfqd) { int i; for (i = 0; i < IOPRIO_BE_NR; i++) { if (cfqd->async_cfqq[0][i]) cfq_put_queue(cfqd->async_cfqq[0][i]); if (cfqd->async_cfqq[1][i]) cfq_put_queue(cfqd->async_cfqq[1][i]); } if (cfqd->async_idle_cfqq) cfq_put_queue(cfqd->async_idle_cfqq); } static void cfq_exit_queue(struct elevator_queue *e) { struct cfq_data *cfqd = e->elevator_data; struct request_queue *q = cfqd->queue; bool wait = false; cfq_shutdown_timer_wq(cfqd); spin_lock_irq(q->queue_lock); if (cfqd->active_queue) __cfq_slice_expired(cfqd, cfqd->active_queue, 0); while (!list_empty(&cfqd->cic_list)) { struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, struct cfq_io_context, queue_list); __cfq_exit_single_io_context(cfqd, cic); } cfq_put_async_queues(cfqd); cfq_release_cfq_groups(cfqd); /* * If there are groups which we could not unlink from blkcg list, * wait for a rcu period for them to be freed. */ if (cfqd->nr_blkcg_linked_grps) wait = true; spin_unlock_irq(q->queue_lock); cfq_shutdown_timer_wq(cfqd); spin_lock(&cic_index_lock); ida_remove(&cic_index_ida, cfqd->cic_index); spin_unlock(&cic_index_lock); /* * Wait for cfqg->blkg->key accessors to exit their grace periods. * Do this wait only if there are other unlinked groups out * there. This can happen if cgroup deletion path claimed the * responsibility of cleaning up a group before queue cleanup code * get to the group. * * Do not call synchronize_rcu() unconditionally as there are drivers * which create/delete request queue hundreds of times during scan/boot * and synchronize_rcu() can take significant time and slow down boot. */ if (wait) synchronize_rcu(); #ifdef CONFIG_CFQ_GROUP_IOSCHED /* Free up per cpu stats for root group */ free_percpu(cfqd->root_group.blkg.stats_cpu); #endif kfree(cfqd); } static int cfq_alloc_cic_index(void) { int index, error; do { if (!ida_pre_get(&cic_index_ida, GFP_KERNEL)) return -ENOMEM; spin_lock(&cic_index_lock); error = ida_get_new(&cic_index_ida, &index); spin_unlock(&cic_index_lock); if (error && error != -EAGAIN) return error; } while (error); return index; } static void *cfq_init_queue(struct request_queue *q) { struct cfq_data *cfqd; int i, j; struct cfq_group *cfqg; struct cfq_rb_root *st; i = cfq_alloc_cic_index(); if (i < 0) return NULL; cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); if (!cfqd) { spin_lock(&cic_index_lock); ida_remove(&cic_index_ida, i); spin_unlock(&cic_index_lock); return NULL; } /* * Don't need take queue_lock in the routine, since we are * initializing the ioscheduler, and nobody is using cfqd */ cfqd->cic_index = i; /* Init root service tree */ cfqd->grp_service_tree = CFQ_RB_ROOT; /* Init root group */ cfqg = &cfqd->root_group; for_each_cfqg_st(cfqg, i, j, st) *st = CFQ_RB_ROOT; RB_CLEAR_NODE(&cfqg->rb_node); /* Give preference to root group over other groups */ cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT; #ifdef CONFIG_CFQ_GROUP_IOSCHED /* * Set root group reference to 2. One reference will be dropped when * all groups on cfqd->cfqg_list are being deleted during queue exit. * Other reference will remain there as we don't want to delete this * group as it is statically allocated and gets destroyed when * throtl_data goes away. */ cfqg->ref = 2; if (blkio_alloc_blkg_stats(&cfqg->blkg)) { kfree(cfqg); spin_lock(&cic_index_lock); ida_remove(&cic_index_ida, cfqd->cic_index); spin_unlock(&cic_index_lock); kfree(cfqd); return NULL; } rcu_read_lock(); cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd, 0); rcu_read_unlock(); cfqd->nr_blkcg_linked_grps++; /* Add group on cfqd->cfqg_list */ hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); #endif /* * Not strictly needed (since RB_ROOT just clears the node and we * zeroed cfqd on alloc), but better be safe in case someone decides * to add magic to the rb code */ for (i = 0; i < CFQ_PRIO_LISTS; i++) cfqd->prio_trees[i] = RB_ROOT; /* * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues. * Grab a permanent reference to it, so that the normal code flow * will not attempt to free it. */ cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); cfqd->oom_cfqq.ref++; cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); INIT_LIST_HEAD(&cfqd->cic_list); cfqd->queue = q; init_timer(&cfqd->idle_slice_timer); cfqd->idle_slice_timer.function = cfq_idle_slice_timer; cfqd->idle_slice_timer.data = (unsigned long) cfqd; INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); cfqd->cfq_quantum = cfq_quantum; cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; cfqd->cfq_back_max = cfq_back_max; cfqd->cfq_back_penalty = cfq_back_penalty; cfqd->cfq_slice[0] = cfq_slice_async; cfqd->cfq_slice[1] = cfq_slice_sync; cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; cfqd->cfq_group_idle = cfq_group_idle; cfqd->cfq_latency = 1; cfqd->hw_tag = -1; /* * we optimistically start assuming sync ops weren't delayed in last * second, in order to have larger depth for async operations. */ cfqd->last_delayed_sync = jiffies - HZ; return cfqd; } static void cfq_slab_kill(void) { /* * Caller already ensured that pending RCU callbacks are completed, * so we should have no busy allocations at this point. */ if (cfq_pool) kmem_cache_destroy(cfq_pool); if (cfq_ioc_pool) kmem_cache_destroy(cfq_ioc_pool); } static int __init cfq_slab_setup(void) { cfq_pool = KMEM_CACHE(cfq_queue, 0); if (!cfq_pool) goto fail; cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0); if (!cfq_ioc_pool) goto fail; return 0; fail: cfq_slab_kill(); return -ENOMEM; } /* * sysfs parts below --> */ static ssize_t cfq_var_show(unsigned int var, char *page) { return sprintf(page, "%d\n", var); } static ssize_t cfq_var_store(unsigned int *var, const char *page, size_t count) { char *p = (char *) page; *var = simple_strtoul(p, &p, 10); return count; } #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct cfq_data *cfqd = e->elevator_data; \ unsigned int __data = __VAR; \ if (__CONV) \ __data = jiffies_to_msecs(__data); \ return cfq_var_show(__data, (page)); \ } SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1); SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct cfq_data *cfqd = e->elevator_data; \ unsigned int __data; \ int ret = cfq_var_store(&__data, (page), count); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ if (__CONV) \ *(__PTR) = msecs_to_jiffies(__data); \ else \ *(__PTR) = __data; \ return ret; \ } STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); #undef STORE_FUNCTION #define CFQ_ATTR(name) \ __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) static struct elv_fs_entry cfq_attrs[] = { CFQ_ATTR(quantum), CFQ_ATTR(fifo_expire_sync), CFQ_ATTR(fifo_expire_async), CFQ_ATTR(back_seek_max), CFQ_ATTR(back_seek_penalty), CFQ_ATTR(slice_sync), CFQ_ATTR(slice_async), CFQ_ATTR(slice_async_rq), CFQ_ATTR(slice_idle), CFQ_ATTR(group_idle), CFQ_ATTR(low_latency), __ATTR_NULL }; static struct elevator_type iosched_cfq = { .ops = { .elevator_merge_fn = cfq_merge, .elevator_merged_fn = cfq_merged_request, .elevator_merge_req_fn = cfq_merged_requests, .elevator_allow_merge_fn = cfq_allow_merge, .elevator_bio_merged_fn = cfq_bio_merged, .elevator_dispatch_fn = cfq_dispatch_requests, .elevator_add_req_fn = cfq_insert_request, .elevator_activate_req_fn = cfq_activate_request, .elevator_deactivate_req_fn = cfq_deactivate_request, .elevator_completed_req_fn = cfq_completed_request, .elevator_former_req_fn = elv_rb_former_request, .elevator_latter_req_fn = elv_rb_latter_request, .elevator_set_req_fn = cfq_set_request, .elevator_put_req_fn = cfq_put_request, .elevator_may_queue_fn = cfq_may_queue, .elevator_init_fn = cfq_init_queue, .elevator_exit_fn = cfq_exit_queue, .trim = cfq_free_io_context, }, .elevator_attrs = cfq_attrs, .elevator_name = "cfq", .elevator_owner = THIS_MODULE, }; #ifdef CONFIG_CFQ_GROUP_IOSCHED static struct blkio_policy_type blkio_policy_cfq = { .ops = { .blkio_unlink_group_fn = cfq_unlink_blkio_group, .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, }, .plid = BLKIO_POLICY_PROP, }; #else static struct blkio_policy_type blkio_policy_cfq; #endif static int __init cfq_init(void) { /* * could be 0 on HZ < 1000 setups */ if (!cfq_slice_async) cfq_slice_async = 1; if (!cfq_slice_idle) cfq_slice_idle = 1; #ifdef CONFIG_CFQ_GROUP_IOSCHED if (!cfq_group_idle) cfq_group_idle = 1; #else cfq_group_idle = 0; #endif if (cfq_slab_setup()) return -ENOMEM; elv_register(&iosched_cfq); blkio_policy_register(&blkio_policy_cfq); return 0; } static void __exit cfq_exit(void) { DECLARE_COMPLETION_ONSTACK(all_gone); blkio_policy_unregister(&blkio_policy_cfq); elv_unregister(&iosched_cfq); ioc_gone = &all_gone; /* ioc_gone's update must be visible before reading ioc_count */ smp_wmb(); /* * this also protects us from entering cfq_slab_kill() with * pending RCU callbacks */ if (elv_ioc_count_read(cfq_ioc_count)) wait_for_completion(&all_gone); ida_destroy(&cic_index_ida); cfq_slab_kill(); } module_init(cfq_init); module_exit(cfq_exit); MODULE_AUTHOR("Jens Axboe"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
gpl-2.0
ivanovp/Marlin
Marlin/src/lcd/extui/lib/mks_ui/draw_encoder_settings.cpp
3
6008
/** * Marlin 3D Printer Firmware * Copyright (c) 2020 MarlinFirmware [https://github.com/MarlinFirmware/Marlin] * * Based on Sprinter and grbl. * Copyright (c) 2011 Camiel Gubbels / Erik van der Zalm * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. * */ #include "../../../../inc/MarlinConfigPre.h" #if HAS_TFT_LVGL_UI #include "lv_conf.h" #include "draw_ui.h" #include "../../../../MarlinCore.h" #include "../../../../module/planner.h" #include "../../../../module/stepper/indirection.h" #include "../../../../feature/tmc_util.h" #include "../../../../gcode/gcode.h" #include "../../../../module/planner.h" #if BUTTONS_EXIST(EN1, EN2) extern lv_group_t * g; static lv_obj_t * scr; static lv_obj_t * buttonEncoderState = NULL; static lv_obj_t *labelEncoderState = NULL; #define ID_ENCODER_RETURN 1 #define ID_ENCODER_STATE 2 static void event_handler(lv_obj_t * obj, lv_event_t event) { switch (obj->mks_obj_id) { case ID_ENCODER_RETURN: if (event == LV_EVENT_CLICKED) { } else if (event == LV_EVENT_RELEASED) { lv_clear_encoder_settings(); draw_return_ui(); } break; case ID_ENCODER_STATE: if (event == LV_EVENT_CLICKED) { } else if (event == LV_EVENT_RELEASED) { if (gCfgItems.encoder_enable) { gCfgItems.encoder_enable = false; lv_imgbtn_set_src(buttonEncoderState, LV_BTN_STATE_REL, "F:/bmp_disable.bin"); lv_imgbtn_set_src(buttonEncoderState, LV_BTN_STATE_PR, "F:/bmp_disable.bin"); lv_label_set_text(labelEncoderState, machine_menu.disable); update_spi_flash(); } else { gCfgItems.encoder_enable = true; lv_imgbtn_set_src(buttonEncoderState, LV_BTN_STATE_REL, "F:/bmp_enable.bin"); lv_imgbtn_set_src(buttonEncoderState, LV_BTN_STATE_PR, "F:/bmp_enable.bin"); lv_label_set_text(labelEncoderState, machine_menu.enable); update_spi_flash(); } } break; } } void lv_draw_encoder_settings(void) { lv_obj_t *buttonBack = NULL, *label_Back = NULL; lv_obj_t *labelEncoderTips = NULL; lv_obj_t * line1 = NULL; if (disp_state_stack._disp_state[disp_state_stack._disp_index] != ENCODER_SETTINGS_UI) { disp_state_stack._disp_index++; disp_state_stack._disp_state[disp_state_stack._disp_index] = ENCODER_SETTINGS_UI; } disp_state = ENCODER_SETTINGS_UI; scr = lv_obj_create(NULL, NULL); lv_obj_set_style(scr, &tft_style_scr); lv_scr_load(scr); lv_obj_clean(scr); lv_obj_t * title = lv_label_create(scr, NULL); lv_obj_set_style(title, &tft_style_label_rel); lv_obj_set_pos(title, TITLE_XPOS, TITLE_YPOS); lv_label_set_text(title, machine_menu.EncoderConfTitle); lv_refr_now(lv_refr_get_disp_refreshing()); labelEncoderTips = lv_label_create(scr, NULL); lv_obj_set_style(labelEncoderTips, &tft_style_label_rel); lv_obj_set_pos(labelEncoderTips, PARA_UI_POS_X, PARA_UI_POS_Y + 10); lv_label_set_text(labelEncoderTips, machine_menu.EncoderConfText); buttonEncoderState = lv_imgbtn_create(scr, NULL); lv_obj_set_pos(buttonEncoderState, PARA_UI_STATE_POS_X, PARA_UI_POS_Y + PARA_UI_STATE_V); if (gCfgItems.encoder_enable) { lv_imgbtn_set_src(buttonEncoderState, LV_BTN_STATE_REL, "F:/bmp_enable.bin"); lv_imgbtn_set_src(buttonEncoderState, LV_BTN_STATE_PR, "F:/bmp_enable.bin"); } else { lv_imgbtn_set_src(buttonEncoderState, LV_BTN_STATE_REL, "F:/bmp_disable.bin"); lv_imgbtn_set_src(buttonEncoderState, LV_BTN_STATE_PR, "F:/bmp_disable.bin"); } lv_obj_set_event_cb_mks(buttonEncoderState, event_handler, ID_ENCODER_STATE, NULL, 0); lv_imgbtn_set_style(buttonEncoderState, LV_BTN_STATE_PR, &tft_style_label_pre); lv_imgbtn_set_style(buttonEncoderState, LV_BTN_STATE_REL, &tft_style_label_rel); lv_btn_set_layout(buttonEncoderState, LV_LAYOUT_OFF); labelEncoderState = lv_label_create(buttonEncoderState, NULL); line1 = lv_line_create(scr, NULL); lv_ex_line(line1, line_points[0]); buttonBack = lv_imgbtn_create(scr, NULL); lv_obj_set_event_cb_mks(buttonBack, event_handler, ID_ENCODER_RETURN, NULL, 0); lv_imgbtn_set_src(buttonBack, LV_BTN_STATE_REL, "F:/bmp_back70x40.bin"); lv_imgbtn_set_src(buttonBack, LV_BTN_STATE_PR, "F:/bmp_back70x40.bin"); lv_imgbtn_set_style(buttonBack, LV_BTN_STATE_PR, &tft_style_label_pre); lv_imgbtn_set_style(buttonBack, LV_BTN_STATE_REL, &tft_style_label_rel); lv_obj_set_pos(buttonBack, PARA_UI_BACL_POS_X, PARA_UI_BACL_POS_Y); lv_btn_set_layout(buttonBack, LV_LAYOUT_OFF); label_Back = lv_label_create(buttonBack, NULL); if (gCfgItems.encoder_enable) { lv_label_set_text(labelEncoderState, machine_menu.enable); lv_obj_align(labelEncoderState, buttonEncoderState, LV_ALIGN_CENTER, 0, 0); } else { lv_label_set_text(labelEncoderState, machine_menu.disable); lv_obj_align(labelEncoderState, buttonEncoderState, LV_ALIGN_CENTER, 0, 0); } lv_label_set_text(label_Back, common_menu.text_back); lv_obj_align(label_Back, buttonBack, LV_ALIGN_CENTER, 0, 0); #if HAS_ROTARY_ENCODER if (gCfgItems.encoder_enable) { lv_group_add_obj(g, buttonEncoderState); lv_group_add_obj(g, buttonBack); } #endif } void lv_clear_encoder_settings() { #if HAS_ROTARY_ENCODER lv_group_remove_all_objs(g); #endif lv_obj_del(scr); } #endif // BUTTONS_EXIST(EN1, EN2) #endif // HAS_TFT_LVGL_UI
gpl-2.0
deafnote/kernel-gigabyte-rior1
arch/arm/mach-msm/board-8930-regulator.c
3
18876
/* * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/regulator/pm8xxx-regulator.h> #include "board-8930.h" #define VREG_CONSUMERS(_id) \ static struct regulator_consumer_supply vreg_consumers_##_id[] /* * Consumer specific regulator names: * regulator name consumer dev_name */ VREG_CONSUMERS(L1) = { REGULATOR_SUPPLY("8038_l1", NULL), REGULATOR_SUPPLY("iris_vddrfa", "wcnss_wlan.0"), }; VREG_CONSUMERS(L2) = { REGULATOR_SUPPLY("8038_l2", NULL), REGULATOR_SUPPLY("iris_vdddig", "wcnss_wlan.0"), REGULATOR_SUPPLY("dsi_vdda", "mipi_dsi.1"), REGULATOR_SUPPLY("mipi_csi_vdd", "msm_csid.0"), REGULATOR_SUPPLY("mipi_csi_vdd", "msm_csid.1"), REGULATOR_SUPPLY("mipi_csi_vdd", "msm_csid.2"), }; VREG_CONSUMERS(L3) = { REGULATOR_SUPPLY("8038_l3", NULL), REGULATOR_SUPPLY("HSUSB_3p3", "msm_otg"), }; VREG_CONSUMERS(L4) = { REGULATOR_SUPPLY("8038_l4", NULL), REGULATOR_SUPPLY("HSUSB_1p8", "msm_otg"), REGULATOR_SUPPLY("iris_vddxo", "wcnss_wlan.0"), }; VREG_CONSUMERS(L5) = { REGULATOR_SUPPLY("8038_l5", NULL), REGULATOR_SUPPLY("sdc_vdd", "msm_sdcc.1"), }; VREG_CONSUMERS(L6) = { REGULATOR_SUPPLY("8038_l6", NULL), REGULATOR_SUPPLY("sdc_vdd", "msm_sdcc.3"), }; VREG_CONSUMERS(L7) = { REGULATOR_SUPPLY("8038_l7", NULL), }; VREG_CONSUMERS(L8) = { REGULATOR_SUPPLY("8038_l8", NULL), REGULATOR_SUPPLY("dsi_vdc", "mipi_dsi.1"), }; VREG_CONSUMERS(L9) = { REGULATOR_SUPPLY("8038_l9", NULL), REGULATOR_SUPPLY("vdd_ana", "3-004a"), REGULATOR_SUPPLY("vdd", "3-0024"), REGULATOR_SUPPLY("cam_vana", "4-001a"), REGULATOR_SUPPLY("cam_vana", "4-006c"), REGULATOR_SUPPLY("cam_vana", "4-0048"), REGULATOR_SUPPLY("cam_vaf", "4-001a"), REGULATOR_SUPPLY("cam_vaf", "4-006c"), REGULATOR_SUPPLY("cam_vaf", "4-0048"), REGULATOR_SUPPLY("cam_vana", "4-0020"), REGULATOR_SUPPLY("cam_vaf", "4-0020"), }; VREG_CONSUMERS(L10) = { REGULATOR_SUPPLY("8038_l10", NULL), REGULATOR_SUPPLY("iris_vddpa", "wcnss_wlan.0"), }; VREG_CONSUMERS(L11) = { REGULATOR_SUPPLY("8038_l11", NULL), REGULATOR_SUPPLY("vdd_dig", "3-004a"), REGULATOR_SUPPLY("iris_vddio", "wcnss_wlan.0"), REGULATOR_SUPPLY("riva_vddpx", "wcnss_wlan.0"), REGULATOR_SUPPLY("sdc_vdd_io", "msm_sdcc.1"), REGULATOR_SUPPLY("VDDIO_CDC", "sitar-slim"), REGULATOR_SUPPLY("CDC_VDDA_TX", "sitar-slim"), REGULATOR_SUPPLY("CDC_VDDA_RX", "sitar-slim"), REGULATOR_SUPPLY("VDDIO_CDC", "sitar1p1-slim"), REGULATOR_SUPPLY("CDC_VDDA_TX", "sitar1p1-slim"), REGULATOR_SUPPLY("CDC_VDDA_RX", "sitar1p1-slim"), REGULATOR_SUPPLY("vddp", "0-0048"), }; VREG_CONSUMERS(L12) = { REGULATOR_SUPPLY("8038_l12", NULL), REGULATOR_SUPPLY("cam_vdig", "4-001a"), REGULATOR_SUPPLY("cam_vdig", "4-006c"), REGULATOR_SUPPLY("cam_vdig", "4-0048"), REGULATOR_SUPPLY("cam_vdig", "4-0020"), }; VREG_CONSUMERS(L14) = { REGULATOR_SUPPLY("8038_l14", NULL), REGULATOR_SUPPLY("pa_therm", "pm8xxx-adc"), }; VREG_CONSUMERS(L15) = { REGULATOR_SUPPLY("8038_l15", NULL), }; VREG_CONSUMERS(L16) = { REGULATOR_SUPPLY("8038_l16", NULL), REGULATOR_SUPPLY("core_vdd", "pil_qdsp6v4.2"), }; VREG_CONSUMERS(L17) = { REGULATOR_SUPPLY("8038_l17", NULL), }; VREG_CONSUMERS(L18) = { REGULATOR_SUPPLY("8038_l18", NULL), }; VREG_CONSUMERS(L19) = { REGULATOR_SUPPLY("8038_l19", NULL), REGULATOR_SUPPLY("core_vdd", "pil_qdsp6v4.1"), }; VREG_CONSUMERS(L20) = { REGULATOR_SUPPLY("8038_l20", NULL), REGULATOR_SUPPLY("VDDD_CDC_D", "sitar-slim"), REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "sitar-slim"), REGULATOR_SUPPLY("VDDD_CDC_D", "sitar1p1-slim"), REGULATOR_SUPPLY("CDC_VDDA_A_1P2V", "sitar1p1-slim"), }; VREG_CONSUMERS(L21) = { REGULATOR_SUPPLY("8038_l21", NULL), }; VREG_CONSUMERS(L22) = { REGULATOR_SUPPLY("8038_l22", NULL), REGULATOR_SUPPLY("sdc_vdd_io", "msm_sdcc.3"), }; VREG_CONSUMERS(L23) = { REGULATOR_SUPPLY("8038_l23", NULL), REGULATOR_SUPPLY("dsi_vddio", "mipi_dsi.1"), REGULATOR_SUPPLY("hdmi_avdd", "hdmi_msm.0"), REGULATOR_SUPPLY("hdmi_vcc", "hdmi_msm.0"), REGULATOR_SUPPLY("pll_vdd", "pil_riva"), REGULATOR_SUPPLY("pll_vdd", "pil_qdsp6v4.1"), REGULATOR_SUPPLY("pll_vdd", "pil_qdsp6v4.2"), }; VREG_CONSUMERS(L24) = { REGULATOR_SUPPLY("8038_l24", NULL), REGULATOR_SUPPLY("riva_vddmx", "wcnss_wlan.0"), }; VREG_CONSUMERS(L26) = { REGULATOR_SUPPLY("8038_l26", NULL), }; VREG_CONSUMERS(L27) = { REGULATOR_SUPPLY("8038_l27", NULL), REGULATOR_SUPPLY("core_vdd", "pil_qdsp6v4.0"), }; VREG_CONSUMERS(S1) = { REGULATOR_SUPPLY("8038_s1", NULL), REGULATOR_SUPPLY("riva_vddcx", "wcnss_wlan.0"), }; VREG_CONSUMERS(S2) = { REGULATOR_SUPPLY("8038_s2", NULL), }; VREG_CONSUMERS(S3) = { REGULATOR_SUPPLY("8038_s3", NULL), }; VREG_CONSUMERS(S4) = { REGULATOR_SUPPLY("8038_s4", NULL), REGULATOR_SUPPLY("CDC_VDD_CP", "sitar-slim"), REGULATOR_SUPPLY("CDC_VDD_CP", "sitar1p1-slim"), }; VREG_CONSUMERS(S5) = { REGULATOR_SUPPLY("8038_s5", NULL), REGULATOR_SUPPLY("krait0", NULL), }; VREG_CONSUMERS(S6) = { REGULATOR_SUPPLY("8038_s6", NULL), REGULATOR_SUPPLY("krait1", NULL), }; VREG_CONSUMERS(LVS1) = { REGULATOR_SUPPLY("8038_lvs1", NULL), REGULATOR_SUPPLY("cam_vio", "4-001a"), REGULATOR_SUPPLY("cam_vio", "4-006c"), REGULATOR_SUPPLY("cam_vio", "4-0048"), REGULATOR_SUPPLY("cam_vio", "4-0020"), }; VREG_CONSUMERS(LVS2) = { REGULATOR_SUPPLY("8038_lvs2", NULL), REGULATOR_SUPPLY("vcc_i2c", "3-004a"), REGULATOR_SUPPLY("vcc_i2c", "3-0024"), REGULATOR_SUPPLY("vcc_i2c", "0-0048"), }; VREG_CONSUMERS(EXT_5V) = { REGULATOR_SUPPLY("ext_5v", NULL), REGULATOR_SUPPLY("hdmi_mvs", "hdmi_msm.0"), }; VREG_CONSUMERS(EXT_OTG_SW) = { REGULATOR_SUPPLY("ext_otg_sw", NULL), REGULATOR_SUPPLY("vbus_otg", "msm_otg"), }; VREG_CONSUMERS(VDD_DIG_CORNER) = { REGULATOR_SUPPLY("vdd_dig_corner", NULL), REGULATOR_SUPPLY("hsusb_vdd_dig", "msm_otg"), }; #define PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, _modes, _ops, \ _apply_uV, _pull_down, _always_on, _supply_regulator, \ _system_uA, _enable_time, _reg_id) \ { \ .init_data = { \ .constraints = { \ .valid_modes_mask = _modes, \ .valid_ops_mask = _ops, \ .min_uV = _min_uV, \ .max_uV = _max_uV, \ .input_uV = _max_uV, \ .apply_uV = _apply_uV, \ .always_on = _always_on, \ .name = _name, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id), \ .consumer_supplies = vreg_consumers_##_id, \ .supply_regulator = _supply_regulator, \ }, \ .id = _reg_id, \ .pull_down_enable = _pull_down, \ .system_uA = _system_uA, \ .enable_time = _enable_time, \ } #define PM8XXX_LDO(_id, _name, _always_on, _pull_down, _min_uV, _max_uV, \ _enable_time, _supply_regulator, _system_uA, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE | \ REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | \ REGULATOR_CHANGE_DRMS, 0, _pull_down, _always_on, \ _supply_regulator, _system_uA, _enable_time, _reg_id) #define PM8XXX_NLDO1200(_id, _name, _always_on, _pull_down, _min_uV, \ _max_uV, _enable_time, _supply_regulator, _system_uA, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE | \ REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | \ REGULATOR_CHANGE_DRMS, 0, _pull_down, _always_on, \ _supply_regulator, _system_uA, _enable_time, _reg_id) #define PM8XXX_SMPS(_id, _name, _always_on, _pull_down, _min_uV, _max_uV, \ _enable_time, _supply_regulator, _system_uA, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE | \ REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE | \ REGULATOR_CHANGE_DRMS, 0, _pull_down, _always_on, \ _supply_regulator, _system_uA, _enable_time, _reg_id) #define PM8XXX_FTSMPS(_id, _name, _always_on, _pull_down, _min_uV, _max_uV, \ _enable_time, _supply_regulator, _system_uA, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, REGULATOR_MODE_NORMAL, \ REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS \ | REGULATOR_CHANGE_MODE, 0, _pull_down, _always_on, \ _supply_regulator, _system_uA, _enable_time, _reg_id) #define PM8XXX_VS(_id, _name, _always_on, _pull_down, _enable_time, \ _supply_regulator, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, 0, 0, 0, REGULATOR_CHANGE_STATUS, 0, \ _pull_down, _always_on, _supply_regulator, 0, _enable_time, \ _reg_id) #define PM8XXX_VS300(_id, _name, _always_on, _pull_down, _enable_time, \ _supply_regulator, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, 0, 0, 0, REGULATOR_CHANGE_STATUS, 0, \ _pull_down, _always_on, _supply_regulator, 0, _enable_time, \ _reg_id) #define PM8XXX_NCP(_id, _name, _always_on, _min_uV, _max_uV, _enable_time, \ _supply_regulator, _reg_id) \ PM8XXX_VREG_INIT(_id, _name, _min_uV, _max_uV, 0, \ REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS, 0, 0, \ _always_on, _supply_regulator, 0, _enable_time, _reg_id) /* Pin control initialization */ #define PM8XXX_PC(_id, _name, _always_on, _pin_fn, _pin_ctrl, \ _supply_regulator, _reg_id) \ { \ .init_data = { \ .constraints = { \ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \ .always_on = _always_on, \ .name = _name, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id##_PC), \ .consumer_supplies = vreg_consumers_##_id##_PC, \ .supply_regulator = _supply_regulator, \ }, \ .id = _reg_id, \ .pin_fn = PM8XXX_VREG_PIN_FN_##_pin_fn, \ .pin_ctrl = _pin_ctrl, \ } #define RPM_INIT(_id, _min_uV, _max_uV, _modes, _ops, _apply_uV, _default_uV, \ _peak_uA, _avg_uA, _pull_down, _pin_ctrl, _freq, _pin_fn, \ _force_mode, _sleep_set_force_mode, _power_mode, _state, \ _sleep_selectable, _always_on, _supply_regulator, _system_uA) \ { \ .init_data = { \ .constraints = { \ .valid_modes_mask = _modes, \ .valid_ops_mask = _ops, \ .min_uV = _min_uV, \ .max_uV = _max_uV, \ .input_uV = _min_uV, \ .apply_uV = _apply_uV, \ .always_on = _always_on, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id), \ .consumer_supplies = vreg_consumers_##_id, \ .supply_regulator = _supply_regulator, \ }, \ .id = RPM_VREG_ID_PM8038_##_id, \ .default_uV = _default_uV, \ .peak_uA = _peak_uA, \ .avg_uA = _avg_uA, \ .pull_down_enable = _pull_down, \ .pin_ctrl = _pin_ctrl, \ .freq = RPM_VREG_FREQ_##_freq, \ .pin_fn = _pin_fn, \ .force_mode = _force_mode, \ .sleep_set_force_mode = _sleep_set_force_mode, \ .power_mode = _power_mode, \ .state = _state, \ .sleep_selectable = _sleep_selectable, \ .system_uA = _system_uA, \ } #define RPM_LDO(_id, _always_on, _pd, _sleep_selectable, _min_uV, _max_uV, \ _supply_regulator, _system_uA, _init_peak_uA) \ RPM_INIT(_id, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE \ | REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE \ | REGULATOR_CHANGE_DRMS, 0, _max_uV, _init_peak_uA, 0, _pd, \ RPM_VREG_PIN_CTRL_NONE, NONE, RPM_VREG_PIN_FN_8930_NONE, \ RPM_VREG_FORCE_MODE_8930_NONE, \ RPM_VREG_FORCE_MODE_8930_NONE, RPM_VREG_POWER_MODE_8930_PWM, \ RPM_VREG_STATE_OFF, _sleep_selectable, _always_on, \ _supply_regulator, _system_uA) #define RPM_SMPS(_id, _always_on, _pd, _sleep_selectable, _min_uV, _max_uV, \ _supply_regulator, _system_uA, _freq, _force_mode, \ _sleep_set_force_mode) \ RPM_INIT(_id, _min_uV, _max_uV, REGULATOR_MODE_NORMAL \ | REGULATOR_MODE_IDLE, REGULATOR_CHANGE_VOLTAGE \ | REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_MODE \ | REGULATOR_CHANGE_DRMS, 0, _min_uV, _system_uA, 0, _pd, \ RPM_VREG_PIN_CTRL_NONE, _freq, RPM_VREG_PIN_FN_8930_NONE, \ RPM_VREG_FORCE_MODE_8930_##_force_mode, \ RPM_VREG_FORCE_MODE_8930_##_sleep_set_force_mode, \ RPM_VREG_POWER_MODE_8930_PWM, RPM_VREG_STATE_OFF, \ _sleep_selectable, _always_on, _supply_regulator, _system_uA) #define RPM_VS(_id, _always_on, _pd, _sleep_selectable, _supply_regulator) \ RPM_INIT(_id, 0, 0, 0, REGULATOR_CHANGE_STATUS, 0, 0, 1000, 1000, _pd, \ RPM_VREG_PIN_CTRL_NONE, NONE, RPM_VREG_PIN_FN_8930_NONE, \ RPM_VREG_FORCE_MODE_8930_NONE, \ RPM_VREG_FORCE_MODE_8930_NONE, RPM_VREG_POWER_MODE_8930_PWM, \ RPM_VREG_STATE_OFF, _sleep_selectable, _always_on, \ _supply_regulator, 0) #define RPM_NCP(_id, _always_on, _sleep_selectable, _min_uV, _max_uV, \ _supply_regulator, _freq) \ RPM_INIT(_id, _min_uV, _max_uV, 0, REGULATOR_CHANGE_VOLTAGE \ | REGULATOR_CHANGE_STATUS, 0, _max_uV, 1000, 1000, 0, \ RPM_VREG_PIN_CTRL_NONE, _freq, RPM_VREG_PIN_FN_8930_NONE, \ RPM_VREG_FORCE_MODE_8930_NONE, \ RPM_VREG_FORCE_MODE_8930_NONE, RPM_VREG_POWER_MODE_8930_PWM, \ RPM_VREG_STATE_OFF, _sleep_selectable, _always_on, \ _supply_regulator, 0) #define RPM_CORNER(_id, _always_on, _sleep_selectable, _min_uV, _max_uV, \ _supply_regulator) \ RPM_INIT(_id, _min_uV, _max_uV, 0, REGULATOR_CHANGE_VOLTAGE \ | REGULATOR_CHANGE_STATUS, 0, _max_uV, 0, 0, 0, \ RPM_VREG_PIN_CTRL_NONE, NONE, RPM_VREG_PIN_FN_8930_NONE, \ RPM_VREG_FORCE_MODE_8930_NONE, \ RPM_VREG_FORCE_MODE_8930_NONE, RPM_VREG_POWER_MODE_8930_PWM, \ RPM_VREG_STATE_OFF, _sleep_selectable, _always_on, \ _supply_regulator, 0) /* Pin control initialization */ #define RPM_PC_INIT(_id, _always_on, _pin_fn, _pin_ctrl, _supply_regulator) \ { \ .init_data = { \ .constraints = { \ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \ .always_on = _always_on, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id##_PC), \ .consumer_supplies = vreg_consumers_##_id##_PC, \ .supply_regulator = _supply_regulator, \ }, \ .id = RPM_VREG_ID_PM8038_##_id##_PC, \ .pin_fn = RPM_VREG_PIN_FN_8930_##_pin_fn, \ .pin_ctrl = _pin_ctrl, \ } #define GPIO_VREG(_id, _reg_name, _gpio_label, _gpio, _supply_regulator) \ [MSM8930_GPIO_VREG_ID_##_id] = { \ .init_data = { \ .constraints = { \ .valid_ops_mask = REGULATOR_CHANGE_STATUS, \ }, \ .num_consumer_supplies = \ ARRAY_SIZE(vreg_consumers_##_id), \ .consumer_supplies = vreg_consumers_##_id, \ .supply_regulator = _supply_regulator, \ }, \ .regulator_name = _reg_name, \ .gpio_label = _gpio_label, \ .gpio = _gpio, \ } #define SAW_VREG_INIT(_id, _name, _min_uV, _max_uV) \ { \ .constraints = { \ .name = _name, \ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, \ .min_uV = _min_uV, \ .max_uV = _max_uV, \ }, \ .num_consumer_supplies = ARRAY_SIZE(vreg_consumers_##_id), \ .consumer_supplies = vreg_consumers_##_id, \ } /* GPIO regulator constraints */ struct gpio_regulator_platform_data msm8930_gpio_regulator_pdata[] __devinitdata = { /* ID vreg_name gpio_label gpio supply */ GPIO_VREG(EXT_5V, "ext_5v", "ext_5v_en", 63, NULL), GPIO_VREG(EXT_OTG_SW, "ext_otg_sw", "ext_otg_sw_en", 97, "ext_5v"), }; /* SAW regulator constraints */ struct regulator_init_data msm8930_saw_regulator_core0_pdata = /* ID vreg_name min_uV max_uV */ SAW_VREG_INIT(S5, "8038_s5", 850000, 1300000); struct regulator_init_data msm8930_saw_regulator_core1_pdata = SAW_VREG_INIT(S6, "8038_s6", 850000, 1300000); /* PM8038 regulator constraints */ struct pm8xxx_regulator_platform_data msm8930_pm8038_regulator_pdata[] __devinitdata = { /* * ID name always_on pd min_uV max_uV en_t supply * system_uA reg_ID */ PM8XXX_NLDO1200(L16, "8038_l16", 0, 1, 375000, 1050000, 200, "8038_s3", 0, 0), PM8XXX_NLDO1200(L19, "8038_l19", 0, 1, 375000, 1050000, 200, "8038_s3", 0, 1), PM8XXX_NLDO1200(L27, "8038_l27", 0, 1, 375000, 1050000, 200, "8038_s3", 0, 2), }; static struct rpm_regulator_init_data msm8930_rpm_regulator_init_data[] __devinitdata = { /* ID a_on pd ss min_uV max_uV supply sys_uA freq fm ss_fm */ RPM_SMPS(S1, 0, 1, 1, 500000, 1150000, NULL, 100000, 4p80, AUTO, LPM), RPM_SMPS(S2, 1, 1, 1, 1400000, 1400000, NULL, 100000, 1p60, AUTO, LPM), RPM_SMPS(S3, 0, 1, 1, 1150000, 1150000, NULL, 100000, 3p20, AUTO, LPM), RPM_SMPS(S4, 1, 1, 1, 1950000, 2200000, NULL, 100000, 1p60, AUTO, LPM), /* ID a_on pd ss min_uV max_uV supply sys_uA init_ip */ RPM_LDO(L1, 0, 1, 0, 1300000, 1300000, "8038_s2", 0, 0), RPM_LDO(L2, 0, 1, 0, 1200000, 1200000, "8038_s2", 0, 0), RPM_LDO(L3, 0, 1, 0, 3075000, 3075000, NULL, 0, 0), RPM_LDO(L4, 1, 1, 0, 1800000, 1800000, NULL, 10000, 10000), RPM_LDO(L5, 0, 1, 0, 2950000, 2950000, NULL, 0, 0), RPM_LDO(L6, 0, 1, 0, 2950000, 2950000, NULL, 0, 0), RPM_LDO(L7, 0, 1, 0, 2050000, 2050000, "8038_s4", 0, 0), RPM_LDO(L8, 0, 1, 0, 2800000, 2800000, NULL, 0, 0), RPM_LDO(L9, 0, 1, 0, 2850000, 2850000, NULL, 0, 0), RPM_LDO(L10, 0, 1, 0, 2900000, 2900000, NULL, 0, 0), RPM_LDO(L11, 1, 1, 0, 1800000, 1800000, "8038_s4", 10000, 10000), RPM_LDO(L12, 0, 1, 0, 1200000, 1200000, "8038_s2", 0, 0), RPM_LDO(L14, 0, 1, 0, 1800000, 1800000, NULL, 0, 0), RPM_LDO(L15, 0, 1, 0, 1800000, 2950000, NULL, 0, 0), RPM_LDO(L17, 0, 1, 0, 1800000, 2950000, NULL, 0, 0), RPM_LDO(L18, 0, 1, 0, 1800000, 1800000, NULL, 0, 0), RPM_LDO(L20, 1, 1, 0, 1250000, 1250000, "8038_s2", 10000, 10000), RPM_LDO(L21, 0, 1, 0, 1900000, 1900000, "8038_s4", 0, 0), RPM_LDO(L22, 1, 1, 0, 1850000, 2950000, NULL, 10000, 10000), RPM_LDO(L23, 1, 1, 1, 1800000, 1800000, "8038_s4", 0, 0), RPM_LDO(L24, 0, 1, 1, 500000, 1150000, "8038_s2", 10000, 10000), RPM_LDO(L26, 1, 1, 0, 1050000, 1050000, "8038_s2", 10000, 10000), /* ID a_on pd ss supply */ RPM_VS(LVS1, 0, 1, 0, "8038_l11"), RPM_VS(LVS2, 0, 1, 0, "8038_l11"), /* ID a_on ss min_corner max_corner supply */ RPM_CORNER(VDD_DIG_CORNER, 0, 1, RPM_VREG_CORNER_NONE, RPM_VREG_CORNER_HIGH, NULL), }; int msm8930_pm8038_regulator_pdata_len __devinitdata = ARRAY_SIZE(msm8930_pm8038_regulator_pdata); struct rpm_regulator_platform_data msm8930_rpm_regulator_pdata __devinitdata = { .init_data = msm8930_rpm_regulator_init_data, .num_regulators = ARRAY_SIZE(msm8930_rpm_regulator_init_data), .version = RPM_VREG_VERSION_8930, .vreg_id_vdd_mem = RPM_VREG_ID_PM8038_L24, .vreg_id_vdd_dig = RPM_VREG_ID_PM8038_VDD_DIG_CORNER, };
gpl-2.0
kyoushuu/anjuta
plugins/am-project/am-project.c
3
70963
/* -*- Mode: C; indent-tabs-mode: t; c-basic-offset: 4; tab-width: 4; coding: utf-8 -*- */ /* am-project.c * * Copyright (C) 2009 Sébastien Granjoux * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include "am-project.h" #include "am-project-private.h" #include "amp-node.h" #include "amp-module.h" #include "amp-package.h" #include "amp-group.h" #include "amp-target.h" #include "amp-source.h" #include "amp-object.h" #include "command-queue.h" #include <libanjuta/interfaces/ianjuta-project.h> #include <libanjuta/anjuta-debug.h> #include <libanjuta/anjuta-utils.h> #include <libanjuta/anjuta-pkg-config.h> #include <string.h> #include <memory.h> #include <errno.h> #include <fcntl.h> #include <unistd.h> #include <ctype.h> #include <sys/types.h> #include <signal.h> #include <glib/gi18n.h> #include <gio/gio.h> #include <glib.h> #include "ac-scanner.h" #include "ac-writer.h" #include "am-scanner.h" #include "am-writer.h" //#include "am-config.h" #include "am-properties.h" #define UNIMPLEMENTED G_STMT_START { g_warning (G_STRLOC": unimplemented"); } G_STMT_END /* Constant strings for parsing perl script error output */ #define ERROR_PREFIX "ERROR(" #define WARNING_PREFIX "WARNING(" #define MESSAGE_DELIMITER ": " const gchar *valid_am_makefiles[] = {"GNUmakefile.am", "makefile.am", "Makefile.am", NULL}; #define STR_REPLACE(target, source) \ { g_free (target); target = source == NULL ? NULL : g_strdup (source);} typedef struct _AmpConfigFile AmpConfigFile; struct _AmpConfigFile { GFile *file; AnjutaToken *token; }; /* Node types *---------------------------------------------------------------------------*/ static AmpNodeInfo AmpNodeInformations[] = { {{ANJUTA_PROJECT_GROUP | ANJUTA_PROJECT_ROOT_GROUP, N_("Root"), "text/plain", "autotools-project-root-edit"}, ANJUTA_TOKEN_NONE, NULL, NULL}, {{ANJUTA_PROJECT_GROUP, N_("Group"), "text/plain", "autotools-project-folder-edit"}, ANJUTA_TOKEN_NONE, NULL, NULL}, {{ANJUTA_PROJECT_SOURCE, N_("Source"), "text/plain", NULL}, ANJUTA_TOKEN_NONE, NULL, NULL}, {{ANJUTA_PROJECT_TARGET | ANJUTA_PROJECT_UNKNOWN | ANJUTA_PROJECT_READ_ONLY, /* Translator: Unknown here is a target type, if not unknown it can * be a program or a shared library by example */ N_("Unknown"), "text/plain", "autotools-project-target-edit"}, ANJUTA_TOKEN_NONE, NULL, NULL}, {{ANJUTA_PROJECT_TARGET | ANJUTA_PROJECT_PRIMARY | ANJUTA_PROJECT_SHAREDLIB, N_("Shared Library (libtool)"), "application/x-sharedlib", "autotools-project-target-edit"}, AM_TOKEN__LTLIBRARIES, "LTLIBRARIES", "lib"}, {{ANJUTA_PROJECT_TARGET | ANJUTA_PROJECT_PRIMARY | ANJUTA_PROJECT_LT_MODULE, N_("Module (Libtool)"), "application/x-sharedlib", "autotools-project-target-edit"}, AM_TOKEN__LTLIBRARIES, "LTLIBRARIES", "lib"}, {{ANJUTA_PROJECT_TARGET | ANJUTA_PROJECT_PRIMARY | ANJUTA_PROJECT_STATICLIB, N_("Static Library (Libtool)"), "application/x-archive", "autotools-project-target-edit"}, AM_TOKEN__LIBRARIES, "LIBRARIES", "lib"}, {{ANJUTA_PROJECT_TARGET | ANJUTA_PROJECT_PRIMARY | ANJUTA_PROJECT_PROGRAM | ANJUTA_PROJECT_EXECUTABLE, N_("Program"), "application/x-executable", "autotools-project-target-edit"}, AM_TOKEN__PROGRAMS, "PROGRAMS", "bin"}, {{ANJUTA_PROJECT_TARGET | ANJUTA_PROJECT_PYTHON, N_("Python Module"), "application/x-python", "autotools-project-target-edit"}, AM_TOKEN__PYTHON, "PYTHON", "python"}, {{ANJUTA_PROJECT_TARGET | ANJUTA_PROJECT_JAVA, N_("Java Module"), "application/x-java", "autotools-project-target-edit"}, AM_TOKEN__JAVA, "JAVA", "java"}, {{ANJUTA_PROJECT_TARGET | ANJUTA_PROJECT_LISP, N_("Lisp Module"), "text/plain", "autotools-project-target-edit"}, AM_TOKEN__LISP, "LISP", "lisp"}, {{ANJUTA_PROJECT_TARGET | ANJUTA_PROJECT_HEADER, N_("Header Files"), "text/x-chdr", "autotools-project-target-edit"}, AM_TOKEN__HEADERS, "HEADERS", "include"}, {{ANJUTA_PROJECT_TARGET | ANJUTA_PROJECT_MAN, N_("Man Documentation"), "text/x-troff-man", "autotools-project-target-edit"}, AM_TOKEN__MANS, "MANS", "man"}, {{ANJUTA_PROJECT_TARGET | ANJUTA_PROJECT_INFO, N_("Info Documentation"), "application/x-tex-info", "autotools-project-target-edit"}, AM_TOKEN__TEXINFOS, "TEXINFOS", "info"}, {{ANJUTA_PROJECT_TARGET | ANJUTA_PROJECT_DATA, N_("Miscellaneous Data"), "application/octet-stream", "autotools-project-target-edit"}, AM_TOKEN__DATA, "DATA", "data"}, {{ANJUTA_PROJECT_TARGET | ANJUTA_PROJECT_SCRIPT, N_("Script"), "text/x-shellscript", "autotools-project-target-edit"}, AM_TOKEN__SCRIPTS, "SCRIPTS", "bin"}, {{ANJUTA_PROJECT_MODULE, N_("Module"), "", NULL}, ANJUTA_TOKEN_NONE, NULL, NULL}, {{ANJUTA_PROJECT_PACKAGE, N_("Package"), "", NULL}, ANJUTA_TOKEN_NONE, NULL, NULL}, {{ANJUTA_PROJECT_UNKNOWN, NULL, NULL, NULL}, ANJUTA_TOKEN_NONE, NULL, NULL} }; /* Types *---------------------------------------------------------------------------*/ static void iproject_iface_init(IAnjutaProjectIface* iface); G_DEFINE_DYNAMIC_TYPE_EXTENDED (AmpProject, amp_project, AMP_TYPE_ROOT_NODE, 0, G_IMPLEMENT_INTERFACE_DYNAMIC (IANJUTA_TYPE_PROJECT, iproject_iface_init)); /* Properties *---------------------------------------------------------------------------*/ /* ----- Standard GObject types and variables ----- */ enum { PROP_0, PROP_PROJECT_DIR }; static GObject *parent_class; /* Helper functions *---------------------------------------------------------------------------*/ /* Work even if file is not a descendant of parent */ gchar* get_relative_path (GFile *parent, GFile *file) { gchar *relative; relative = g_file_get_relative_path (parent, file); if (relative == NULL) { if (g_file_equal (parent, file)) { relative = g_strdup ("."); } else { GFile *grand_parent = g_file_get_parent (parent); gint level; gchar *grand_relative; gchar *ptr; gsize len; for (level = 1; !g_file_has_prefix (file, grand_parent); level++) { GFile *next = g_file_get_parent (grand_parent); g_object_unref (grand_parent); grand_parent = next; } grand_relative = g_file_get_relative_path (grand_parent, file); g_object_unref (grand_parent); len = strlen (grand_relative); relative = g_new (gchar, len + level * 3 + 1); ptr = relative; for (; level; level--) { memcpy(ptr, ".." G_DIR_SEPARATOR_S, 3); ptr += 3; } memcpy (ptr, grand_relative, len + 1); g_free (grand_relative); } } return relative; } GFileType file_type (GFile *file, const gchar *filename) { GFile *child; GFileInfo *info; GFileType type; child = filename != NULL ? g_file_get_child (file, filename) : g_object_ref (file); info = g_file_query_info (child, G_FILE_ATTRIBUTE_STANDARD_TYPE, G_FILE_QUERY_INFO_NONE, NULL, NULL); if (info != NULL) { type = g_file_info_get_file_type (info); g_object_unref (info); } else { type = G_FILE_TYPE_UNKNOWN; } g_object_unref (child); return type; } /* Automake parsing function *---------------------------------------------------------------------------*/ /* Remove invalid character according to automake rules */ gchar* canonicalize_automake_variable (const gchar *name) { gchar *canon_name = g_strdup (name); gchar *ptr; for (ptr = canon_name; *ptr != '\0'; ptr++) { if (!g_ascii_isalnum (*ptr) && (*ptr != '@')) { *ptr = '_'; } } return canon_name; } gboolean split_automake_variable (gchar *name, gint *flags, gchar **module, gchar **primary) { gboolean res = FALSE; GRegex *regex; GMatchInfo *match_info; gint start_pos; gint end_pos; regex = g_regex_new ("(nobase_|notrans_)?" "(dist_|nodist_)?" "(noinst_|check_|man_|man[0-9al]_)?" "(.*_)?" "([^_]+)", G_REGEX_ANCHORED, G_REGEX_MATCH_ANCHORED, NULL); if (!g_regex_match (regex, name, G_REGEX_MATCH_ANCHORED, &match_info)) goto out; if (flags) { *flags = 0; g_match_info_fetch_pos (match_info, 1, &start_pos, &end_pos); if (start_pos >= 0) { if (*(name + start_pos + 2) == 'b') *flags |= AM_TARGET_NOBASE; if (*(name + start_pos + 2) == 't') *flags |= AM_TARGET_NOTRANS; } g_match_info_fetch_pos (match_info, 2, &start_pos, &end_pos); if (start_pos >= 0) { if (*(name + start_pos) == 'd') *flags |= AM_TARGET_DIST; if (*(name + start_pos) == 'n') *flags |= AM_TARGET_NODIST; } g_match_info_fetch_pos (match_info, 3, &start_pos, &end_pos); if (start_pos >= 0) { if (*(name + start_pos) == 'n') *flags |= AM_TARGET_NOINST; if (*(name + start_pos) == 'c') *flags |= AM_TARGET_CHECK; if (*(name + start_pos) == 'm') { gchar section = *(name + end_pos - 1); *flags |= AM_TARGET_MAN; if (section != 'n') *flags |= (section & 0x1F) << 7; } } } if (module) { g_match_info_fetch_pos (match_info, 4, &start_pos, &end_pos); if (start_pos >= 0) { *module = name + start_pos; *(name + end_pos - 1) = '\0'; } else { *module = NULL; } } if (primary) { g_match_info_fetch_pos (match_info, 5, &start_pos, &end_pos); if (start_pos >= 0) { *primary = name + start_pos; } else { *primary = NULL; } } res = TRUE; out: g_match_info_unref (match_info); g_regex_unref (regex); return res; } static gchar* ac_init_default_tarname (const gchar *name) { gchar *tarname; if (name == NULL) return NULL; /* Remove GNU prefix */ if (strncmp (name, "GNU ", 4) == 0) name += 4; tarname = g_ascii_strdown (name, -1); g_strcanon (tarname, "abcdefghijklmnopqrstuvwxyz0123456789", '-'); return tarname; } /* Config file objects *---------------------------------------------------------------------------*/ static AmpConfigFile* amp_config_file_new (const gchar *pathname, GFile *project_root, AnjutaToken *token) { AmpConfigFile *config; g_return_val_if_fail ((pathname != NULL) && (project_root != NULL), NULL); config = g_slice_new0(AmpConfigFile); config->file = g_file_resolve_relative_path (project_root, pathname); config->token = token; return config; } static void amp_config_file_free (AmpConfigFile *config) { if (config) { g_object_unref (config->file); g_slice_free (AmpConfigFile, config); } } static void amp_project_clear (AmpProject *project) { if (project->configure_file != NULL) anjuta_token_file_free (project->configure_file); project->configure_file = NULL; if (project->configure_token) anjuta_token_free (project->configure_token); project->configure_token = NULL; } static void on_project_monitor_changed (GFileMonitor *monitor, GFile *file, GFile *other_file, GFileMonitorEvent event_type, gpointer data) { AmpProject *project = AMP_PROJECT (data); switch (event_type) { case G_FILE_MONITOR_EVENT_CHANGES_DONE_HINT: case G_FILE_MONITOR_EVENT_CHANGED: case G_FILE_MONITOR_EVENT_DELETED: /* project can be NULL, if the node is dummy node because the * original one is reloaded. */ g_signal_emit_by_name (G_OBJECT (project), "file-changed", data); break; default: break; } } AnjutaTokenFile* amp_project_set_configure (AmpProject *project, GFile *configure) { if (project->configure != NULL) g_object_unref (project->configure); if (project->configure_file != NULL) anjuta_token_file_free (project->configure_file); if (project->monitor) g_object_unref (project->monitor); if (configure != NULL) { project->configure_file = anjuta_token_file_new (configure); project->configure = g_object_ref (configure); project->monitor = g_file_monitor_file (configure, G_FILE_MONITOR_NONE, NULL, NULL); if (project->monitor != NULL) { g_signal_connect (G_OBJECT (project->monitor), "changed", G_CALLBACK (on_project_monitor_changed), project); } } else { project->configure_file = NULL; project->configure = NULL; project->monitor = NULL; } return project->configure_file; } gboolean amp_project_update_configure (AmpProject *project, AnjutaToken *token) { return anjuta_token_file_update (project->configure_file, token); } AnjutaToken* amp_project_get_configure_token (AmpProject *project) { return project->configure_token; } AnjutaToken * amp_project_get_config_token (AmpProject *project, GFile *file) { AmpConfigFile *config; config = g_hash_table_lookup (project->configs, file); return config != NULL ? config->token : NULL; } static void remove_config_file (gpointer data, GObject *object) { AmpProject *project = (AmpProject *)data; g_return_if_fail (project->files != NULL); project->files = g_list_remove (project->files, object); } void amp_project_update_root (AmpProject *project, AmpProject *new_project) { GHashTable *hash; GList *list; AnjutaTokenStyle *style; if (project->configure != NULL) g_object_unref (project->configure); if (project->configure_file != NULL) anjuta_token_file_free (project->configure_file); if (project->monitor) g_object_unref (project->monitor); project->configure = new_project->configure; if (project->configure != NULL) { project->monitor = g_file_monitor_file (project->configure, G_FILE_MONITOR_NONE, NULL, NULL); if (project->monitor != NULL) { g_signal_connect (G_OBJECT (project->monitor), "changed", G_CALLBACK (on_project_monitor_changed), project); } } else { project->monitor = NULL; } new_project->configure = NULL; project->configure_file = new_project->configure_file; new_project->configure_file = NULL; project->configure_token = new_project->configure_token; new_project->configure_token = NULL; hash = project->groups; project->groups = new_project->groups; new_project->groups = hash; list = project->files; project->files = new_project->files; new_project->files = list; for (list = project->files; list != NULL; list = g_list_next (list)) { GObject *tfile = (GObject *)list->data; g_object_weak_unref (tfile, remove_config_file, new_project); g_object_weak_ref (tfile, remove_config_file, project); } for (list = new_project->files; list != NULL; list = g_list_next (list)) { GObject *tfile = (GObject *)list->data; g_object_weak_unref (tfile, remove_config_file, project); g_object_weak_ref (tfile, remove_config_file, new_project); } hash = project->configs; project->configs = new_project->configs; new_project->configs = hash; style = project->ac_space_list; project->ac_space_list = new_project->ac_space_list; new_project->ac_space_list = style; style = project->am_space_list; project->am_space_list = new_project->am_space_list; new_project->am_space_list = style; style = project->arg_list; project->arg_list = new_project->arg_list; new_project->arg_list = style; AMP_NODE_CLASS (parent_class)->update (AMP_NODE (project), AMP_NODE (new_project)); } /* Target objects *---------------------------------------------------------------------------*/ static gboolean find_target (AnjutaProjectNode *node, gpointer data) { if (anjuta_project_node_get_node_type (node) == ANJUTA_PROJECT_TARGET) { if (strcmp (anjuta_project_node_get_name (node), *(gchar **)data) == 0) { /* Find target, return node value in pointer */ *(AnjutaProjectNode **)data = node; return TRUE; } } return FALSE; } static gboolean find_canonical_target (AnjutaProjectNode *node, gpointer data) { if (anjuta_project_node_get_node_type (node) == ANJUTA_PROJECT_TARGET) { gchar *canon_name = canonicalize_automake_variable (anjuta_project_node_get_name (node)); DEBUG_PRINT ("compare canon %s vs %s node %p", canon_name, *(gchar **)data, node); if (strcmp (canon_name, *(gchar **)data) == 0) { /* Find target, return node value in pointer */ *(AnjutaProjectNode **)data = node; g_free (canon_name); return TRUE; } g_free (canon_name); } return FALSE; } /* * ---------------- Data structures managment */ void amp_project_load_properties (AmpProject *project, AnjutaToken *macro, AnjutaToken *args) { GList *item; gint type = anjuta_token_get_type (macro); for (item = anjuta_project_node_get_properties_info (ANJUTA_PROJECT_NODE (project)); item != NULL; item = g_list_next (item)) { AmpPropertyInfo *info = (AmpPropertyInfo *)item->data; if ((info->token_type == type) && (info->flags & AM_PROPERTY_IN_CONFIGURE)) { AnjutaProjectProperty *new_prop; new_prop = anjuta_project_node_get_property (ANJUTA_PROJECT_NODE (project), info->base.id); if ((new_prop != NULL) && (new_prop->info->default_value != new_prop)) { amp_property_free (new_prop); } new_prop = amp_property_new (NULL, info->token_type, info->position, NULL, args); if (info->position >= 0) { /* Each parameter correspond to a different property */ AnjutaToken *arg; arg = anjuta_token_nth_word (args, info->position); g_free (new_prop->value); new_prop->value = anjuta_token_evaluate_name (arg); } else { /* Property value is whole argument */ if (args == NULL) { new_prop->value = g_strdup(" "); } else { AnjutaToken *arg; arg = anjuta_token_nth_word (args, 0); new_prop->value = anjuta_token_evaluate_name (arg); if (new_prop->value == NULL) new_prop->value = g_strdup(" "); } } amp_node_property_add (ANJUTA_PROJECT_NODE (project), new_prop); } } } void amp_project_load_module (AmpProject *project, AnjutaToken *module_token) { AmpAcScanner *scanner = NULL; if (module_token != NULL) { AnjutaToken *arg; AnjutaToken *list; AnjutaToken *item; gchar *value; AmpModuleNode *module; AmpPackageNode *package; gchar *compare; /* Module name */ arg = anjuta_token_first_item (module_token); value = anjuta_token_evaluate (arg); module = amp_module_node_new (value); amp_module_node_add_token (module, module_token); anjuta_project_node_append (ANJUTA_PROJECT_NODE (project), ANJUTA_PROJECT_NODE (module)); /* Package list */ arg = anjuta_token_next_word (arg); if (arg != NULL) { scanner = amp_ac_scanner_new (project); list = amp_ac_scanner_parse_token (scanner, NULL, arg, AC_SPACE_LIST_STATE, NULL, NULL); anjuta_token_free_children (arg); list = anjuta_token_delete_parent (list); anjuta_token_prepend_items (arg, list); amp_ac_scanner_free (scanner); } package = NULL; compare = NULL; for (item = anjuta_token_first_word (arg); item != NULL; item = anjuta_token_next_word (item)) { value = anjuta_token_evaluate (item); if (value == NULL) continue; /* Empty value, a comment of a quote by example */ if (*value == '\0') { g_free (value); continue; } if ((package != NULL) && (compare != NULL)) { amp_package_node_set_version (package, compare, value); g_free (value); g_free (compare); package = NULL; compare = NULL; } else if ((package != NULL) && (anjuta_token_get_type (item) == ANJUTA_TOKEN_OPERATOR)) { compare = value; } else { package = amp_package_node_new (value); amp_package_node_add_token (package, item); anjuta_project_node_append (ANJUTA_PROJECT_NODE (module), ANJUTA_PROJECT_NODE (package)); anjuta_project_node_set_state (ANJUTA_PROJECT_NODE (package), ANJUTA_PROJECT_INCOMPLETE); g_free (value); compare = NULL; } } } } void amp_project_load_config (AmpProject *project, AnjutaToken *arg_list) { AmpAcScanner *scanner = NULL; if (arg_list != NULL) { AnjutaToken *arg; AnjutaToken *list; AnjutaToken *item; /* File list */ scanner = amp_ac_scanner_new (project); arg = anjuta_token_first_word (arg_list); list = amp_ac_scanner_parse_token (scanner, NULL, arg, AC_SPACE_LIST_STATE, NULL, NULL); anjuta_token_free_children (arg); list = anjuta_token_delete_parent (list); amp_ac_scanner_free (scanner); /* list can be NULL is there is no argument to AC_OUTPUT or AC_CONFIG_FILES */ if (list != NULL) { anjuta_token_prepend_items (arg, list); for (item = anjuta_token_first_word (arg); item != NULL; item = anjuta_token_next_word (item)) { gchar *value; AmpConfigFile *cfg; value = anjuta_token_evaluate (item); if (value == NULL) continue; cfg = amp_config_file_new (value, anjuta_project_node_get_file (ANJUTA_PROJECT_NODE (project)), item); g_hash_table_replace (project->configs, cfg->file, cfg); g_free (value); } } } } static AnjutaToken* project_load_target (AmpProject *project, AnjutaProjectNode *parent, AnjutaToken *variable, GHashTable *orphan_properties) { AnjutaToken *arg; gchar *install = NULL; gchar *value; gint flags = 0; AmpNodeInfo *info = AmpNodeInformations; while (info->base.type != 0) { if (anjuta_token_get_type (variable) == info->token) { break; } info++; } value = anjuta_token_evaluate (anjuta_token_first_word (variable)); split_automake_variable (value, &flags, &install, NULL); amp_group_node_add_token (AMP_GROUP_NODE (parent), variable, AM_GROUP_TARGET); for (arg = anjuta_token_first_word (anjuta_token_last_item (variable)); arg != NULL; arg = anjuta_token_next_word (arg)) { gchar *value; gchar *canon_id; AmpTargetNode *target; AmpTargetNode *orphan; gchar *orig_key; gpointer find; value = anjuta_token_evaluate (arg); /* This happens for variable token which are considered as value */ if (value == NULL) continue; canon_id = canonicalize_automake_variable (value); /* Check if target already exists */ find = value; anjuta_project_node_children_traverse (parent, find_target, &find); if ((gchar *)find != value) { /* Find target */ g_free (canon_id); g_free (value); continue; } /* Create target */ target = amp_target_node_new (value, info->base.type, install, flags); if (target != NULL) { amp_target_node_add_token (target, ANJUTA_TOKEN_ARGUMENT, arg); anjuta_project_node_append (parent, ANJUTA_PROJECT_NODE (target)); DEBUG_PRINT ("create target %p name %s", target, value); /* Check if there are sources or properties availables */ if (g_hash_table_lookup_extended (orphan_properties, canon_id, (gpointer *)&orig_key, (gpointer *)&orphan)) { AnjutaTokenType type; GList *properties; AnjutaProjectNode *child; g_hash_table_steal (orphan_properties, canon_id); /* Copy all token */ for (type = amp_target_node_get_first_token_type (orphan); type != 0; type = amp_target_node_get_next_token_type (orphan, type)) { GList *tokens; tokens = amp_target_node_get_token (orphan, type); for (tokens = g_list_first (tokens); tokens != NULL; tokens = g_list_next (tokens)) { AnjutaToken *token = (AnjutaToken *)tokens->data; amp_target_node_add_token (target, type, token); } } /* Copy all properties */ while ((properties = anjuta_project_node_get_properties (ANJUTA_PROJECT_NODE (orphan))) != NULL) { AnjutaProjectProperty *prop; prop = (AnjutaProjectProperty *)anjuta_project_node_remove_property (ANJUTA_PROJECT_NODE (orphan), (AnjutaProjectProperty *)properties->data); amp_node_property_add ((AnjutaProjectNode *)target, prop); } /* Copy all sources */ while ((child = anjuta_project_node_first_child (ANJUTA_PROJECT_NODE (orphan))) != NULL) { anjuta_project_node_remove (child); anjuta_project_node_append (ANJUTA_PROJECT_NODE (target), child); g_object_unref (child); } amp_target_changed (target); g_free (orig_key); amp_target_node_free (orphan); } /* Set target properties */ if (flags & AM_TARGET_NOBASE) amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 0, "1", arg); if (flags & AM_TARGET_NOTRANS) amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 1, "1", arg); if (flags & AM_TARGET_DIST) amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 2, "1", arg); if (flags & AM_TARGET_NODIST) amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 2, "0", arg); if (flags & AM_TARGET_NOINST) { amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 3, "1", arg); } else if (install != NULL) { gchar *instdir = g_strconcat (install, "dir", NULL); amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 6, instdir, arg); g_free (instdir); } if (flags & AM_TARGET_CHECK) amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 4, "1", arg); if (flags & AM_TARGET_MAN) { gchar section[] = "0"; section[0] += (flags >> 7) & 0x1F; amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 4, section, arg); } } g_free (canon_id); g_free (value); } g_free (value); return NULL; } static AnjutaToken* project_load_sources (AmpProject *project, AnjutaProjectNode *group, AnjutaToken *variable, GHashTable *orphan_properties) { AnjutaToken *arg; GFile *group_file = g_object_ref (anjuta_project_node_get_file (ANJUTA_PROJECT_NODE (group))); gchar *target_id = NULL; target_id = anjuta_token_evaluate (anjuta_token_first_word (variable)); if (target_id) { gchar *end = strrchr (target_id, '_'); if (end) { *end = '\0'; } } if (target_id) { gpointer find; AnjutaProjectNode *target; find = target_id; DEBUG_PRINT ("search for canonical %s", target_id); anjuta_project_node_children_traverse (group, find_canonical_target, &find); target = (gchar *)find != target_id ? (AnjutaProjectNode *)find : NULL; /* Get orphan buffer if there is no target */ if (target == NULL) { gchar *orig_key; if (g_hash_table_lookup_extended (orphan_properties, target_id, (gpointer *)&orig_key, (gpointer *)&target)) { g_hash_table_steal (orphan_properties, target_id); g_free (orig_key); } else { target = ANJUTA_PROJECT_NODE (amp_target_node_new ("dummy", 0, NULL, 0)); } g_hash_table_insert (orphan_properties, target_id, target); } else { g_free (target_id); } amp_target_node_add_token (AMP_TARGET_NODE (target), AM_TOKEN__SOURCES, variable); for (arg = anjuta_token_first_word (anjuta_token_last_item (variable)); arg != NULL; arg = anjuta_token_next_word (arg)) { gchar *value; AnjutaProjectNode *source; AnjutaProjectNode *parent = target; GFile *src_file; GFileInfo* file_info; value = anjuta_token_evaluate (arg); if (value == NULL) continue; src_file = g_file_get_child (group_file, value); if (project->lang_manager != NULL) { file_info = g_file_query_info (src_file, G_FILE_ATTRIBUTE_STANDARD_CONTENT_TYPE, G_FILE_QUERY_INFO_NONE, NULL, NULL); if (file_info) { gint id = ianjuta_language_get_from_mime_type (project->lang_manager, g_file_info_get_content_type (file_info), NULL); if (id > 0) { const gchar *obj_ext = ianjuta_language_get_make_target (project->lang_manager, id, NULL); if (obj_ext != NULL) { /* Create object node */ gchar *object_name; gchar *basename; gchar *ext; GFile *obj_file; AnjutaProjectNode *object; ext = strrchr (value, '.'); if ((ext != NULL) && (ext != value)) *ext = '\0'; object_name = g_strconcat (value, obj_ext, NULL); basename = g_path_get_basename (object_name); obj_file = g_file_get_child (group_file, basename); g_free (basename); g_free (object_name); object = amp_object_node_new (obj_file, ANJUTA_PROJECT_PROJECT); g_object_unref (obj_file); anjuta_project_node_append (parent, object); parent = object; } } g_object_unref (file_info); } } /* Create source */ source = amp_source_node_new (src_file, ANJUTA_PROJECT_PROJECT); g_object_unref (src_file); if (source != NULL) { amp_source_node_add_token (AMP_SOURCE_NODE (source), arg); DEBUG_PRINT ("add target child %p", target); /* Add as target child */ anjuta_project_node_append (parent, source); } g_free (value); } amp_target_changed (AMP_TARGET_NODE (target)); } g_object_unref (group_file); return NULL; } static AnjutaToken* project_load_data (AmpProject *project, AnjutaProjectNode *parent, AnjutaToken *variable, GHashTable *orphan_properties, gint data_flags) { gchar *install = NULL; AmpTargetNode *target; gchar *target_id; gpointer find; gint flags = 0; AmpNodeInfo *info = AmpNodeInformations; AnjutaToken *arg; AnjutaToken *list; while (info->base.name != NULL) { if (anjuta_token_get_type (variable) == info->token) { break; } info++; } target_id = anjuta_token_evaluate (anjuta_token_first_word (variable)); split_automake_variable (target_id, &flags, &install, NULL); amp_group_node_add_token (AMP_GROUP_NODE (parent), variable, AM_GROUP_TARGET); /* Check if target already exists */ find = target_id; anjuta_project_node_children_traverse (parent, find_target, &find); if ((gchar *)find == target_id) { /* Create target */ target = amp_target_node_new (target_id, info->base.type, install, flags); if (target != NULL) { anjuta_project_node_append (parent, ANJUTA_PROJECT_NODE (target)); DEBUG_PRINT ("create target %p name %s", target, target_id); } } else { target = AMP_TARGET_NODE (find); } if (target != NULL) { GFile *parent_file = g_object_ref (anjuta_project_node_get_file (parent)); amp_target_node_add_token (AMP_TARGET_NODE (target), AM_TOKEN__DATA, variable); list = anjuta_token_last_item (variable); for (arg = anjuta_token_first_word (list); arg != NULL; arg = anjuta_token_next_word (arg)) { gchar *value; AnjutaProjectNode *source; GFile *src_file; value = anjuta_token_evaluate (arg); if (value == NULL) continue; /* Create source */ src_file = g_file_get_child (parent_file, value); source = amp_source_node_new (src_file, ANJUTA_PROJECT_PROJECT | data_flags); g_object_unref (src_file); if (source != NULL) { amp_source_node_add_token (AMP_SOURCE_NODE(source), arg); /* Add as target child */ DEBUG_PRINT ("add target child %p", target); anjuta_project_node_append (ANJUTA_PROJECT_NODE (target), source); } g_free (value); } g_object_unref (parent_file); /* Set target properties */ if (flags & AM_TARGET_NOBASE) amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 0, "1", arg); if (flags & AM_TARGET_NOTRANS) amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 1, "1", arg); if (flags & AM_TARGET_DIST) amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 2, "1", arg); if (flags & AM_TARGET_NODIST) amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 2, "0", arg); if (flags & AM_TARGET_NOINST) { amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 3, "1", arg); } else if (install != NULL) { gchar *instdir = g_strconcat (install, "dir", NULL); amp_node_property_load (ANJUTA_PROJECT_NODE (target), AM_TOKEN__PROGRAMS, 6, instdir, arg); g_free (instdir); } } g_free (target_id); return NULL; } static AnjutaToken* project_load_target_properties (AmpProject *project, AnjutaProjectNode *parent, AnjutaToken *variable, GHashTable *orphan_properties) { gchar *target_id = NULL; target_id = anjuta_token_evaluate (anjuta_token_first_word (variable)); if (target_id) { gchar *end = strrchr (target_id, '_'); if (end) { *end = '\0'; } } if (target_id) { gpointer find; gchar *value; AnjutaProjectProperty *prop; AnjutaToken *list; AnjutaTokenType type; find = target_id; DEBUG_PRINT ("search for canonical %s", target_id); anjuta_project_node_children_traverse (parent, find_canonical_target, &find); parent = (gchar *)find != target_id ? (AnjutaProjectNode *)find : NULL; /* Create property */ list = anjuta_token_last_item (variable); type = anjuta_token_get_type (variable); value = anjuta_token_evaluate_name (list); prop = amp_property_new (NULL, type, 0, value, list); if (parent == NULL) { /* Add property to non existing target, create a dummy target */ gchar *orig_key; if (g_hash_table_lookup_extended (orphan_properties, target_id, (gpointer *)&orig_key, (gpointer *)&parent)) { /* Dummy target already created */ g_hash_table_steal (orphan_properties, target_id); g_free (orig_key); } else { /* Create dummy target */ parent = ANJUTA_PROJECT_NODE (amp_target_node_new ("dummy", 0, NULL, 0)); } g_hash_table_insert (orphan_properties, target_id, parent); } else { g_free (target_id); } g_free (value); /* Add property to target */ amp_node_property_add (parent, prop); amp_target_node_add_token (AMP_TARGET_NODE (parent), type, variable); amp_target_changed (AMP_TARGET_NODE (parent)); } return NULL; } static AnjutaToken* project_load_group_properties (AmpProject *project, AnjutaProjectNode *parent, AnjutaToken *variable) { gchar *value; gchar *name; AnjutaProjectProperty *prop; AnjutaToken *list; /* Create property */ list = anjuta_token_last_item (variable); name = anjuta_token_evaluate (anjuta_token_first_word (variable)); value = anjuta_token_evaluate_name (list); prop = amp_property_new (name, anjuta_token_get_type (variable), 0, value, list); amp_node_property_add (parent, prop); g_free (value); g_free (name); return NULL; } static gboolean find_group (AnjutaProjectNode *node, gpointer data) { if (anjuta_project_node_get_node_type (node) == ANJUTA_PROJECT_GROUP) { if (g_file_equal (anjuta_project_node_get_file (node), (GFile *)data)) { /* Find group, return node value in pointer */ return TRUE; } } return FALSE; } static void project_load_subdirs (AmpProject *project, AnjutaToken *list, AnjutaProjectNode *parent, gboolean dist_only) { AnjutaToken *arg; for (arg = anjuta_token_first_word (list); arg != NULL; arg = anjuta_token_next_word (arg)) { gchar *value; value = anjuta_token_evaluate (arg); if (value == NULL) continue; /* Empty value, a comment of a quote by example */ /* Skip ., it is a special case, used to defined build order */ if (strcmp (value, ".") != 0) { GFile *subdir; AmpGroupNode *group; subdir = g_file_resolve_relative_path (anjuta_project_node_get_file (parent), value); /* Look for already existing group */ group = AMP_GROUP_NODE (anjuta_project_node_children_traverse (parent, find_group, subdir)); if (group != NULL) { /* Already existing group, mark for built if needed */ if (!dist_only) amp_group_node_set_dist_only (group, FALSE); } else { /* Create new group */ group = amp_group_node_new (subdir, value, dist_only); /* Group can be NULL if the name is not valid */ if (group != NULL) { g_hash_table_insert (project->groups, g_file_get_uri (subdir), group); anjuta_project_node_append (parent, ANJUTA_PROJECT_NODE (group)); amp_node_load (AMP_NODE (group), NULL, project, NULL); } } if (group) amp_group_node_add_token (group, arg, dist_only ? AM_GROUP_TOKEN_DIST_SUBDIRS : AM_GROUP_TOKEN_SUBDIRS); g_object_unref (subdir); } g_free (value); } } void amp_project_set_am_variable (AmpProject* project, AmpGroupNode* group, AnjutaToken *variable, GHashTable *orphan_properties) { AnjutaToken *list; switch (anjuta_token_get_type (variable)) { case AM_TOKEN_SUBDIRS: list = anjuta_token_last_item (variable); project_load_subdirs (project, list, ANJUTA_PROJECT_NODE (group), FALSE); break; case AM_TOKEN_DIST_SUBDIRS: list = anjuta_token_last_item (variable); project_load_subdirs (project, list, ANJUTA_PROJECT_NODE (group), TRUE); break; case AM_TOKEN__DATA: case AM_TOKEN__HEADERS: case AM_TOKEN__LISP: case AM_TOKEN__MANS: case AM_TOKEN__PYTHON: case AM_TOKEN__JAVA: case AM_TOKEN__TEXINFOS: project_load_data (project, ANJUTA_PROJECT_NODE (group), variable, orphan_properties, 0); break; case AM_TOKEN__SCRIPTS: project_load_data (project, ANJUTA_PROJECT_NODE (group), variable, orphan_properties, ANJUTA_PROJECT_EXECUTABLE); break; case AM_TOKEN__LIBRARIES: case AM_TOKEN__LTLIBRARIES: case AM_TOKEN__PROGRAMS: project_load_target (project, ANJUTA_PROJECT_NODE (group), variable, orphan_properties); break; case AM_TOKEN__SOURCES: project_load_sources (project, ANJUTA_PROJECT_NODE (group), variable, orphan_properties); break; case AM_TOKEN_DIR: case AM_TOKEN__LDFLAGS: case AM_TOKEN__CPPFLAGS: case AM_TOKEN__CFLAGS: case AM_TOKEN__CXXFLAGS: case AM_TOKEN__JAVACFLAGS: case AM_TOKEN__VALAFLAGS: case AM_TOKEN__FCFLAGS: case AM_TOKEN__OBJCFLAGS: case AM_TOKEN__LFLAGS: case AM_TOKEN__YFLAGS: project_load_group_properties (project, ANJUTA_PROJECT_NODE (group), variable); break; case AM_TOKEN_TARGET_LDFLAGS: case AM_TOKEN_TARGET_CPPFLAGS: case AM_TOKEN_TARGET_CFLAGS: case AM_TOKEN_TARGET_CXXFLAGS: case AM_TOKEN_TARGET_JAVACFLAGS: case AM_TOKEN_TARGET_VALAFLAGS: case AM_TOKEN_TARGET_FCFLAGS: case AM_TOKEN_TARGET_OBJCFLAGS: case AM_TOKEN_TARGET_LFLAGS: case AM_TOKEN_TARGET_YFLAGS: case AM_TOKEN_TARGET_DEPENDENCIES: case AM_TOKEN_TARGET_LIBADD: case AM_TOKEN_TARGET_LDADD: project_load_target_properties (project, ANJUTA_PROJECT_NODE (group), variable, orphan_properties); break; default: break; } /* Keep the autotools variable as a normal variable too */ amp_group_node_update_variable (group, variable); } /* Map function *---------------------------------------------------------------------------*/ static gint amp_project_compare_node (AnjutaProjectNode *old_node, AnjutaProjectNode *new_node) { const gchar *name1; const gchar *name2; GFile *file1; GFile *file2; name1 = anjuta_project_node_get_name (old_node); name2 = anjuta_project_node_get_name (new_node); file1 = anjuta_project_node_get_file (old_node); file2 = anjuta_project_node_get_file (new_node); return (anjuta_project_node_get_full_type (old_node) == anjuta_project_node_get_full_type (new_node)) && ((name1 == NULL) || (name2 == NULL) || (strcmp (name1, name2) == 0)) && ((file1 == NULL) || (file2 == NULL) || g_file_equal (file1, file2)) ? 0 : 1; } static void amp_project_map_children (GHashTable *map, AnjutaProjectNode *old_node, AnjutaProjectNode *new_node) { GList *children = NULL; if (new_node != NULL) { for (new_node = anjuta_project_node_first_child (new_node); new_node != NULL; new_node = anjuta_project_node_next_sibling (new_node)) { children = g_list_prepend (children, new_node); } children = g_list_reverse (children); } for (old_node = anjuta_project_node_first_child (old_node); old_node != NULL; old_node = anjuta_project_node_next_sibling (old_node)) { GList *same; same = g_list_find_custom (children, old_node, (GCompareFunc)amp_project_compare_node); if (same != NULL) { /* Add new to old node mapping */ g_hash_table_insert (map, (AnjutaProjectNode *)same->data, old_node); amp_project_map_children (map, old_node, (AnjutaProjectNode *)same->data); children = g_list_delete_link (children, same); } else { /* Keep old_node to be deleted in the hash table as a key with a NULL * value */ g_hash_table_insert (map, old_node, NULL); } } /* Add remaining node in hash table */ for (; children != NULL; children = g_list_delete_link (children, children)) { /* Keep new node without a corresponding old node as a key and a identical * value */ g_hash_table_insert (map, children->data, children->data); } } /* Find the correspondance between the new loaded node (new_node) and the * original node currently in the tree (old_node) */ static GHashTable * amp_project_map_node (AnjutaProjectNode *old_node, AnjutaProjectNode *new_node) { GHashTable *map; map = g_hash_table_new (g_direct_hash, NULL); g_hash_table_insert (map, new_node, old_node); amp_project_map_children (map, old_node, new_node); return map; } static void amp_project_update_node (AnjutaProjectNode *key, AnjutaProjectNode *value, GHashTable *map) { AnjutaProjectNode *old_node = NULL; /* The node that will be deleted */ if (value == NULL) { /* if value is NULL, delete the old node which is the key */ old_node = key; } else { AnjutaProjectNode *node = value; /* The node that we keep in the tree */ AnjutaProjectNode *new_node = key; /* The node with the new data */ if (new_node && new_node != node) { GList *properties; amp_node_update (AMP_NODE (node), AMP_NODE (new_node)); /* Swap custom properties */ properties = node->properties; node->properties = new_node->properties; new_node->properties = properties; if (new_node->parent == NULL) { /* This is the top loaded node, update only the children */ node->children = new_node->children; } else { /* Other node update all pointers */ node->parent = new_node->parent; node->children = new_node->children; node->next = new_node->next; node->prev = new_node->prev; } /* Destroy node with data */ old_node = new_node; } /* Update links, using original node address if they stay in the tree */ new_node = g_hash_table_lookup (map, node->parent); if (new_node != NULL) node->parent = new_node; new_node = g_hash_table_lookup (map, node->children); if (new_node != NULL) node->children = new_node; new_node = g_hash_table_lookup (map, node->next); if (new_node != NULL) node->next = new_node; new_node = g_hash_table_lookup (map, node->prev); if (new_node != NULL) node->prev = new_node; } /* Unlink old node and free it */ if (old_node != NULL) { old_node->parent = NULL; old_node->children = NULL; old_node->next = NULL; old_node->prev = NULL; g_object_unref (old_node); } } /* Public functions *---------------------------------------------------------------------------*/ AnjutaProjectNodeInfo * amp_project_get_type_info (AmpProject *project, AnjutaProjectNodeType type) { AmpNodeInfo *info; for (info = AmpNodeInformations; info->base.type != type; info++) { if ((info->base.type == type) || (info->base.type == 0)) break; } return (AnjutaProjectNodeInfo *)info; } static gboolean amp_project_load_root (AmpProject *project, GError **error) { AmpAcScanner *scanner; AnjutaToken *arg; GFile *root_file; GFile *configure_file; AnjutaTokenFile *configure_token_file; AnjutaProjectNode *source; GError *err = NULL; root_file = anjuta_project_node_get_file (ANJUTA_PROJECT_NODE (project)); DEBUG_PRINT ("reload project %p root file %p", project, root_file); /* Unload current project */ amp_project_unload (project); /* Initialize list styles */ project->ac_space_list = anjuta_token_style_new (NULL, " ", "\n", NULL, 0); project->am_space_list = anjuta_token_style_new (NULL, " ", " \\\n\t", NULL, 0); project->arg_list = anjuta_token_style_new (NULL, ", ", ", ", ")", 0); /* Find configure file */ if (file_type (root_file, "configure.ac") == G_FILE_TYPE_REGULAR) { configure_file = g_file_get_child (root_file, "configure.ac"); } else if (file_type (root_file, "configure.in") == G_FILE_TYPE_REGULAR) { configure_file = g_file_get_child (root_file, "configure.in"); } else { g_set_error (error, IANJUTA_PROJECT_ERROR, IANJUTA_PROJECT_ERROR_DOESNT_EXIST, _("Project doesn't exist or invalid path")); return FALSE; } /* Parse configure */ configure_token_file = amp_project_set_configure (project, configure_file); amp_project_add_file (project, configure_file, configure_token_file); source = amp_source_node_new (configure_file, ANJUTA_PROJECT_PROJECT | ANJUTA_PROJECT_FRAME | ANJUTA_PROJECT_READ_ONLY); anjuta_project_node_append (ANJUTA_PROJECT_NODE (project), source); arg = anjuta_token_file_load (configure_token_file, NULL); g_hash_table_remove_all (project->ac_variables); scanner = amp_ac_scanner_new (project); project->configure_token = amp_ac_scanner_parse_token (scanner, NULL, arg, 0, configure_file, &err); amp_ac_scanner_free (scanner); if (project->configure_token == NULL) { if (err != NULL) { g_set_error_literal (error, IANJUTA_PROJECT_ERROR, IANJUTA_PROJECT_ERROR_PROJECT_MALFORMED, err->message); g_error_free (err); } else { g_set_error (error, IANJUTA_PROJECT_ERROR, IANJUTA_PROJECT_ERROR_PROJECT_MALFORMED, "%s", _("Unable to parse project file")); } return FALSE; } /* Load all makefiles recursively */ if (!AMP_NODE_CLASS (parent_class)->load (AMP_NODE (project), NULL, project, NULL)) { g_set_error (error, IANJUTA_PROJECT_ERROR, IANJUTA_PROJECT_ERROR_DOESNT_EXIST, _("Project doesn't exist or has an invalid path")); return FALSE; } return TRUE; } void amp_project_unload (AmpProject *project) { /* project data */ amp_project_clear (project); /* shortcut hash tables */ if (project->groups) g_hash_table_remove_all (project->groups); if (project->files != NULL) { GList *list; for (list = project->files; list != NULL; list = g_list_delete_link (list, list)) { g_object_weak_unref (G_OBJECT (list->data), remove_config_file, project); } project->files = NULL; } if (project->configs) g_hash_table_remove_all (project->configs); /* List styles */ if (project->am_space_list) anjuta_token_style_free (project->am_space_list); if (project->ac_space_list) anjuta_token_style_free (project->ac_space_list); if (project->arg_list) anjuta_token_style_free (project->arg_list); } gboolean amp_project_is_loaded (AmpProject *project) { return project->loading == 0; } gint amp_project_probe (GFile *file, GError **error) { gint probe; gboolean dir; dir = (file_type (file, NULL) == G_FILE_TYPE_DIRECTORY); if (!dir) { g_set_error (error, IANJUTA_PROJECT_ERROR, IANJUTA_PROJECT_ERROR_DOESNT_EXIST, _("Project doesn't exist or invalid path")); } probe = dir; if (probe) { const gchar **makefile; /* Look for makefiles */ probe = FALSE; for (makefile = valid_am_makefiles; *makefile != NULL; makefile++) { if (file_type (file, *makefile) == G_FILE_TYPE_REGULAR) { probe = TRUE; break; } } if (probe) { probe = ((file_type (file, "configure.ac") == G_FILE_TYPE_REGULAR) || (file_type (file, "configure.in") == G_FILE_TYPE_REGULAR)); } } return probe ? IANJUTA_PROJECT_PROBE_PROJECT_FILES : 0; } gboolean amp_project_get_token_location (AmpProject *project, AnjutaTokenFileLocation *location, AnjutaToken *token) { GList *list; for (list = project->files; list != NULL; list = g_list_next (list)) { if (anjuta_token_file_get_token_location ((AnjutaTokenFile *)list->data, location, token)) { return TRUE; } } return FALSE; } void amp_project_remove_group (AmpProject *project, AmpGroupNode *group, GError **error) { GList *token_list; if (anjuta_project_node_get_node_type (ANJUTA_PROJECT_NODE (group)) != ANJUTA_PROJECT_GROUP) return; for (token_list = amp_group_node_get_token (group, AM_GROUP_TOKEN_CONFIGURE); token_list != NULL; token_list = g_list_next (token_list)) { anjuta_token_remove_word ((AnjutaToken *)token_list->data); } for (token_list = amp_group_node_get_token (group, AM_GROUP_TOKEN_SUBDIRS); token_list != NULL; token_list = g_list_next (token_list)) { anjuta_token_remove_word ((AnjutaToken *)token_list->data); } for (token_list = amp_group_node_get_token (group, AM_GROUP_TOKEN_DIST_SUBDIRS); token_list != NULL; token_list = g_list_next (token_list)) { anjuta_token_remove_word ((AnjutaToken *)token_list->data); } amp_group_node_free (group); } void amp_project_remove_source (AmpProject *project, AmpSourceNode *source, GError **error) { if (anjuta_project_node_get_node_type (ANJUTA_PROJECT_NODE (source)) != ANJUTA_PROJECT_SOURCE) return; anjuta_token_remove_word (amp_source_node_get_token (source)); amp_source_node_free (source); } const GList * amp_project_get_node_info (AmpProject *project, GError **error) { static GList *info_list = NULL; if (info_list == NULL) { AmpNodeInfo *node; for (node = AmpNodeInformations; node->base.type != 0; node++) { info_list = g_list_prepend (info_list, node); } info_list = g_list_reverse (info_list); } return info_list; } /* Public functions *---------------------------------------------------------------------------*/ typedef struct _AmpMovePacket { AmpProject *project; GFile *old_root_file; GFile *new_root_file; } AmpMovePacket; static void foreach_node_move (AnjutaProjectNode *g_node, gpointer data) { AmpProject *project = ((AmpMovePacket *)data)->project; GFile *old_root_file = ((AmpMovePacket *)data)->old_root_file; GFile *new_root_file = ((AmpMovePacket *)data)->new_root_file; gchar *relative; GFile *new_file; switch (anjuta_project_node_get_node_type (g_node)) { case ANJUTA_PROJECT_GROUP: relative = get_relative_path (old_root_file, anjuta_project_node_get_file (g_node)); new_file = g_file_resolve_relative_path (new_root_file, relative); g_free (relative); amp_group_node_set_file (AMP_GROUP_NODE (g_node), new_file); g_object_unref (new_file); g_hash_table_insert (project->groups, g_file_get_uri (new_file), g_node); break; case ANJUTA_PROJECT_SOURCE: relative = get_relative_path (old_root_file, anjuta_project_node_get_file (g_node)); new_file = g_file_resolve_relative_path (new_root_file, relative); g_free (relative); amp_source_node_set_file (AMP_SOURCE_NODE (g_node), new_file); g_object_unref (new_file); break; default: break; } } gboolean amp_project_move (AmpProject *project, const gchar *path) { GFile *new_file; gchar *relative; GList *list; gpointer key; AmpConfigFile *cfg; GHashTable* old_hash; GHashTableIter iter; AmpMovePacket packet= {project, NULL}; /* Change project root directory */ packet.old_root_file = g_object_ref (anjuta_project_node_get_file (ANJUTA_PROJECT_NODE (project))); packet.new_root_file = g_file_new_for_path (path); /* Change project root directory in groups */ old_hash = project->groups; project->groups = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); anjuta_project_node_foreach (ANJUTA_PROJECT_NODE (project), G_POST_ORDER, foreach_node_move, &packet); g_hash_table_destroy (old_hash); /* Change all files */ for (list = project->files; list != NULL; list = g_list_next (list)) { AnjutaTokenFile *tfile = (AnjutaTokenFile *)list->data; relative = get_relative_path (packet.old_root_file, anjuta_token_file_get_file (tfile)); new_file = g_file_resolve_relative_path (packet.new_root_file, relative); g_free (relative); anjuta_token_file_move (tfile, new_file); } /* Change all configs */ old_hash = project->configs; project->configs = g_hash_table_new_full (g_file_hash, (GEqualFunc)g_file_equal, NULL, (GDestroyNotify)amp_config_file_free); g_hash_table_iter_init (&iter, old_hash); while (g_hash_table_iter_next (&iter, &key, (gpointer *)&cfg)) { relative = get_relative_path (packet.old_root_file, cfg->file); new_file = g_file_resolve_relative_path (packet.new_root_file, relative); g_free (relative); g_object_unref (cfg->file); cfg->file = new_file; g_hash_table_insert (project->configs, new_file, cfg); } g_hash_table_steal_all (old_hash); g_hash_table_destroy (old_hash); g_object_unref (packet.old_root_file); g_object_unref (packet.new_root_file); return TRUE; } /* Dump file content of corresponding node */ gboolean amp_project_dump (AmpProject *project, AnjutaProjectNode *node, AmpFileType type) { gboolean ok = FALSE; if (anjuta_project_node_get_node_type (node) == ANJUTA_PROJECT_GROUP) { switch (type) { case DUMP_MAKEFILE: anjuta_token_dump (amp_group_node_get_makefile_token (AMP_GROUP_NODE (node))); break; case DUMP_CONFIGURE: anjuta_token_dump (AMP_PROJECT (node)->configure_token); break; default: break; } } return ok; } AmpProject * amp_project_new (GFile *file, IAnjutaLanguage *language, GError **error) { AmpProject *project; GFile *new_file; project = AMP_PROJECT (g_object_new (AMP_TYPE_PROJECT, NULL)); new_file = g_file_dup (file); amp_root_node_set_file (AMP_ROOT_NODE (project), new_file); g_object_unref (new_file); project->lang_manager = (language != NULL) ? g_object_ref (language) : NULL; return project; } /* Project access functions *---------------------------------------------------------------------------*/ AmpProject * amp_project_get_root (AmpProject *project) { return AMP_PROJECT (project); } AmpGroupNode * amp_project_get_group (AmpProject *project, const gchar *id) { return (AmpGroupNode *)g_hash_table_lookup (project->groups, id); } AmpTargetNode * amp_project_get_target (AmpProject *project, const gchar *id) { AmpTargetNode **buffer; AmpTargetNode *target; gsize dummy; buffer = (AmpTargetNode **)g_base64_decode (id, &dummy); target = *buffer; g_free (buffer); return target; } AmpSourceNode * amp_project_get_source (AmpProject *project, const gchar *id) { AmpSourceNode **buffer; AmpSourceNode *source; gsize dummy; buffer = (AmpSourceNode **)g_base64_decode (id, &dummy); source = *buffer; g_free (buffer); return source; } gchar * amp_project_get_uri (AmpProject *project) { g_return_val_if_fail (project != NULL, NULL); return g_file_get_uri (anjuta_project_node_get_file (ANJUTA_PROJECT_NODE (project))); } GFile* amp_project_get_file (AmpProject *project) { g_return_val_if_fail (project != NULL, NULL); return anjuta_project_node_get_file (ANJUTA_PROJECT_NODE (project)); } void amp_project_add_file (AmpProject *project, GFile *file, AnjutaTokenFile* token) { project->files = g_list_prepend (project->files, token); g_object_weak_ref (G_OBJECT (token), remove_config_file, project); } void amp_project_add_subst_variable (AmpProject *project, const gchar *name, AnjutaToken *value) { g_hash_table_insert (project->ac_variables, (gchar *)name, value); } AnjutaToken * amp_project_get_subst_variable_token (AmpProject *project, const gchar *name) { return g_hash_table_lookup (project->ac_variables, name); } gboolean amp_project_is_busy (AmpProject *project) { if (project->queue == NULL) return FALSE; return pm_command_queue_is_busy (project->queue); } /* Worker thread *---------------------------------------------------------------------------*/ static gboolean amp_load_setup (PmJob *job) { //anjuta_project_node_check (job->node); pm_job_set_parent (job, anjuta_project_node_parent (job->node)); job->proxy = ANJUTA_PROJECT_NODE (amp_node_copy (AMP_NODE (job->node))); return TRUE; } static gboolean amp_load_work (PmJob *job) { return amp_node_load (AMP_NODE (job->proxy), AMP_NODE (job->parent), AMP_PROJECT (job->user_data), &job->error); } static gboolean amp_load_complete (PmJob *job) { GHashTable *map; //static GTimer *timer = NULL; g_return_val_if_fail (job->proxy != NULL, FALSE); //anjuta_project_node_check (job->node); /*if (timer == NULL) { timer = g_timer_new (); } else { g_timer_continue (timer); }*/ map = amp_project_map_node (job->node, job->proxy); g_object_ref (job->proxy); job->proxy->parent = NULL; // Mark loaded top node g_hash_table_foreach (map, (GHFunc)amp_project_update_node, map); //anjuta_project_node_check (job->node); g_hash_table_destroy (map); g_object_unref (job->proxy); job->proxy = NULL; AMP_PROJECT (job->user_data)->loading--; g_signal_emit_by_name (AMP_PROJECT (job->user_data), "node-loaded", job->node, job->error); //g_timer_stop (timer); //g_message ("amp_load_complete completed in %g", g_timer_elapsed (timer, NULL)); return TRUE; } static PmCommandWork amp_load_job = {amp_load_setup, amp_load_work, amp_load_complete}; static gboolean amp_save_setup (PmJob *job) { return TRUE; } static gboolean amp_save_work (PmJob *job) { /* It is difficult to save only a particular node, so the whole project is saved */ amp_node_save (AMP_NODE (job->user_data), NULL, AMP_PROJECT (job->user_data), &job->error); return TRUE; } static gboolean amp_save_complete (PmJob *job) { g_signal_emit_by_name (AMP_PROJECT (job->user_data), "node-saved", job->node, job->error); return TRUE; } static PmCommandWork amp_save_job = {amp_save_setup, amp_save_work, amp_save_complete}; static gboolean amp_add_before_setup (PmJob *job) { /* If add is called to add the root group, the node is already existing */ if (job->parent != job->node) anjuta_project_node_insert_before (job->parent, job->sibling, job->node); return TRUE; } static gboolean amp_add_after_setup (PmJob *job) { /* If add is called to add the root group, the node is already existing */ if (job->parent != job->node) anjuta_project_node_insert_after (job->parent, job->sibling, job->node); return TRUE; } static gboolean amp_add_work (PmJob *job) { AmpNode *parent = AMP_NODE (job->parent); gboolean ok; ok = amp_node_write (AMP_NODE (job->node), parent, AMP_PROJECT (job->user_data), &job->error); /* Add new node properties if existing */ if (ok) { GList *item; for (item = anjuta_project_node_get_properties (ANJUTA_PROJECT_NODE (job->node)); item != NULL; item = g_list_next (item)) { AnjutaProjectProperty *property = (AnjutaProjectProperty *)item->data; gint flags; flags = ((AmpPropertyInfo *)property->info)->flags; if (flags & AM_PROPERTY_IN_CONFIGURE) { ok = ok && amp_project_update_ac_property (AMP_PROJECT (job->user_data), property); } else if (flags & AM_PROPERTY_IN_MAKEFILE) { if (((AnjutaProjectPropertyInfo *)property->info)->flags & ANJUTA_PROJECT_PROPERTY_READ_WRITE) { ok = ok && amp_project_update_am_property (AMP_PROJECT (job->user_data), job->node, property); } } } } return ok; } static gboolean amp_add_complete (PmJob *job) { g_signal_emit_by_name (AMP_PROJECT (job->user_data), "node-changed", job->parent, job->error); return TRUE; } static PmCommandWork amp_add_before_job = {amp_add_before_setup, amp_add_work, amp_add_complete}; static PmCommandWork amp_add_after_job = {amp_add_after_setup, amp_add_work, amp_add_complete}; static gboolean amp_remove_setup (PmJob *job) { AnjutaProjectNode *parent; parent = anjuta_project_node_parent (job->node); if (parent == NULL) parent = job->node; pm_job_set_parent (job, parent); anjuta_project_node_set_state (job->node, ANJUTA_PROJECT_REMOVED); return TRUE; } static gboolean amp_remove_work (PmJob *job) { AmpNode *parent = AMP_NODE (job->parent); gboolean ok; ok = amp_node_erase (AMP_NODE (job->node), parent, AMP_PROJECT (job->user_data), &job->error); return ok; } static gboolean amp_remove_complete (PmJob *job) { g_signal_emit_by_name (AMP_PROJECT (job->user_data), "node-changed", job->parent, job->error); return TRUE; } static PmCommandWork amp_remove_job = {amp_remove_setup, amp_remove_work, amp_remove_complete}; static gboolean amp_set_property_setup (PmJob *job) { return TRUE; } static gboolean amp_set_property_work (PmJob *job) { gint flags; flags = ((AmpPropertyInfo *)job->property->info)->flags; if (flags & AM_PROPERTY_IN_CONFIGURE) { amp_project_update_ac_property (AMP_PROJECT (job->user_data), job->property); } else if (flags & AM_PROPERTY_IN_MAKEFILE) { if (((AnjutaProjectPropertyInfo *)job->property->info)->flags & ANJUTA_PROJECT_PROPERTY_READ_WRITE) { amp_project_update_am_property (AMP_PROJECT (job->user_data), job->node, job->property); } } return TRUE; } static gboolean amp_set_property_complete (PmJob *job) { g_signal_emit_by_name (AMP_PROJECT (job->user_data), "node-changed", job->node, job->error); return TRUE; } static PmCommandWork amp_set_property_job = {amp_set_property_setup, amp_set_property_work, amp_set_property_complete}; static gboolean amp_remove_property_setup (PmJob *job) { return TRUE; } static gboolean amp_remove_property_work (PmJob *job) { gint flags; flags = ((AmpPropertyInfo *)job->property->info)->flags; if (flags & AM_PROPERTY_IN_CONFIGURE) { amp_project_update_ac_property (AMP_PROJECT (job->user_data), job->property); } else if (flags & AM_PROPERTY_IN_MAKEFILE) { if (((AnjutaProjectPropertyInfo *)job->property->info)->flags & ANJUTA_PROJECT_PROPERTY_READ_WRITE) { amp_project_update_am_property (AMP_PROJECT (job->user_data), job->node, job->property); } } return TRUE; } static gboolean amp_remove_property_complete (PmJob *job) { g_signal_emit_by_name (AMP_PROJECT (job->user_data), "node-changed", job->node, job->error); return TRUE; } static PmCommandWork amp_remove_property_job = {amp_remove_property_setup, amp_remove_property_work, amp_remove_property_complete}; /* Implement IAnjutaProject *---------------------------------------------------------------------------*/ static gboolean iproject_load_node (IAnjutaProject *obj, AnjutaProjectNode *node, GError **error) { PmJob *load_job; if (node == NULL) node = ANJUTA_PROJECT_NODE (obj); if (AMP_PROJECT (obj)->queue == NULL) AMP_PROJECT (obj)->queue = pm_command_queue_new (); AMP_PROJECT (obj)->loading++; load_job = pm_job_new (&amp_load_job, node, NULL, NULL, ANJUTA_PROJECT_UNKNOWN, NULL, NULL, obj); pm_command_queue_push (AMP_PROJECT (obj)->queue, load_job); return TRUE; } static gboolean iproject_save_node (IAnjutaProject *obj, AnjutaProjectNode *node, GError **error) { PmJob *save_job; if (node == NULL) node = ANJUTA_PROJECT_NODE (obj); if (AMP_PROJECT (obj)->queue == NULL) AMP_PROJECT (obj)->queue = pm_command_queue_new (); save_job = pm_job_new (&amp_save_job, node, NULL, NULL, ANJUTA_PROJECT_UNKNOWN, NULL, NULL, obj); pm_command_queue_push (AMP_PROJECT (obj)->queue, save_job); return TRUE; } static AnjutaProjectNode * iproject_add_node_before (IAnjutaProject *obj, AnjutaProjectNode *parent, AnjutaProjectNode *sibling, AnjutaProjectNodeType type, GFile *file, const gchar *name, GError **err) { AnjutaProjectNode *node; PmJob *add_job; if (AMP_PROJECT (obj)->queue == NULL) AMP_PROJECT (obj)->queue = pm_command_queue_new (); node = amp_node_new_valid (parent, type, file, name, err); if (node != NULL) { add_job = pm_job_new (&amp_add_before_job, node, parent, sibling, ANJUTA_PROJECT_UNKNOWN, NULL, NULL, obj); pm_command_queue_push (AMP_PROJECT (obj)->queue, add_job); } return node; } static AnjutaProjectNode * iproject_add_node_after (IAnjutaProject *obj, AnjutaProjectNode *parent, AnjutaProjectNode *sibling, AnjutaProjectNodeType type, GFile *file, const gchar *name, GError **err) { AnjutaProjectNode *node; PmJob *add_job; if (AMP_PROJECT (obj)->queue == NULL) AMP_PROJECT (obj)->queue = pm_command_queue_new (); node = amp_node_new_valid (parent, type, file, name, err); if (node != NULL) { add_job = pm_job_new (&amp_add_after_job, node, parent, sibling, ANJUTA_PROJECT_UNKNOWN, NULL, NULL, obj); pm_command_queue_push (AMP_PROJECT (obj)->queue, add_job); } return node; } static gboolean iproject_remove_node (IAnjutaProject *obj, AnjutaProjectNode *node, GError **err) { PmJob *remove_job; if (AMP_PROJECT (obj)->queue == NULL) AMP_PROJECT (obj)->queue = pm_command_queue_new (); remove_job = pm_job_new (&amp_remove_job, node, NULL, NULL, ANJUTA_PROJECT_UNKNOWN, NULL, NULL, obj); pm_command_queue_push (AMP_PROJECT (obj)->queue, remove_job); return TRUE; } static AnjutaProjectProperty * iproject_set_property (IAnjutaProject *obj, AnjutaProjectNode *node, const gchar *id, const gchar *name, const gchar *value, GError **error) { AnjutaProjectProperty *new_prop; PmJob *set_property_job; if (AMP_PROJECT (obj)->queue == NULL) AMP_PROJECT (obj)->queue = pm_command_queue_new (); new_prop = name == NULL ? amp_node_property_set (node, id, value) : amp_node_map_property_set (node, id, name, value); set_property_job = pm_job_new (&amp_set_property_job, node, NULL, NULL, ANJUTA_PROJECT_UNKNOWN, NULL, NULL, obj); set_property_job->property = new_prop; pm_command_queue_push (AMP_PROJECT (obj)->queue, set_property_job); return new_prop; } static gboolean iproject_remove_property (IAnjutaProject *obj, AnjutaProjectNode *node, const gchar *id, const gchar *name, GError **error) { AnjutaProjectProperty *new_prop; PmJob *remove_property_job; if (AMP_PROJECT (obj)->queue == NULL) AMP_PROJECT (obj)->queue = pm_command_queue_new (); new_prop = amp_node_map_property_set (node, id, name, NULL); remove_property_job = pm_job_new (&amp_set_property_job, node, NULL, NULL, ANJUTA_PROJECT_UNKNOWN, NULL, NULL, obj); remove_property_job->property = new_prop; pm_command_queue_push (AMP_PROJECT (obj)->queue, remove_property_job); return TRUE; } static AnjutaProjectNode * iproject_get_root (IAnjutaProject *obj, GError **err) { return ANJUTA_PROJECT_NODE (obj); } static const GList* iproject_get_node_info (IAnjutaProject *obj, GError **err) { return amp_project_get_node_info (AMP_PROJECT (obj), err); } static gboolean iproject_is_loaded (IAnjutaProject *obj, GError **err) { return amp_project_is_loaded (AMP_PROJECT (obj)); } static void iproject_iface_init(IAnjutaProjectIface* iface) { iface->load_node = iproject_load_node; iface->save_node = iproject_save_node; iface->add_node_before = iproject_add_node_before; iface->add_node_after = iproject_add_node_after; iface->remove_node = iproject_remove_node; iface->set_property = iproject_set_property; iface->remove_property = iproject_remove_property; iface->get_root = iproject_get_root; iface->get_node_info = iproject_get_node_info; iface->is_loaded = iproject_is_loaded; } /* AmpNode implementation *---------------------------------------------------------------------------*/ static gboolean amp_project_load (AmpNode *root, AmpNode *parent, AmpProject *project, GError **error) { return amp_project_load_root (AMP_PROJECT (root), error); } static gboolean amp_project_save (AmpNode *root, AmpNode *parent, AmpProject *project, GError **error) { AnjutaTokenFile *tfile; AnjutaProjectNode *child; /* Save node */ tfile = AMP_PROJECT (root)->configure_file; if (anjuta_token_file_is_dirty (tfile)) { if (!anjuta_token_file_save (tfile, error)) return FALSE; } if (!AMP_NODE_CLASS (parent_class)->save (root, parent, project, error)) { return FALSE; } /* Save all children */ for (child = anjuta_project_node_first_child (ANJUTA_PROJECT_NODE (root)); child != NULL; child = anjuta_project_node_next_sibling (child)) { if (!amp_node_save (AMP_NODE (child), root, project, error)) return FALSE; } return TRUE; } static gboolean amp_project_update (AmpNode *node, AmpNode *new_node) { amp_project_update_root (AMP_PROJECT (node), AMP_PROJECT (new_node)); return TRUE; } static AmpNode * amp_project_copy (AmpNode *old_node) { AmpNode *new_node; new_node = AMP_NODE_CLASS (amp_project_parent_class)->copy (old_node); ((AmpProject *)new_node)->lang_manager = (((AmpProject *)old_node)->lang_manager != NULL) ? g_object_ref (((AmpProject *)old_node)->lang_manager) : NULL; return new_node; } /* GObject implementation *---------------------------------------------------------------------------*/ static void amp_project_dispose (GObject *object) { AmpProject *project; g_return_if_fail (AMP_IS_PROJECT (object)); project = AMP_PROJECT (object); amp_project_unload (project); amp_project_clear (project); if (project->groups) g_hash_table_destroy (project->groups); project->groups = NULL; if (project->configs) g_hash_table_destroy (project->configs); project->configs = NULL; if (project->ac_variables) g_hash_table_destroy (project->ac_variables); project->ac_variables = NULL; if (project->queue) pm_command_queue_free (project->queue); project->queue = NULL; if (project->monitor) g_object_unref (project->monitor); project->monitor = NULL; if (project->lang_manager) g_object_unref (project->lang_manager); project->lang_manager = NULL; G_OBJECT_CLASS (parent_class)->dispose (object); } static void amp_project_init (AmpProject *project) { g_return_if_fail (project != NULL); g_return_if_fail (AMP_IS_PROJECT (project)); /* project data */ project->configure_file = NULL; project->configure_token = NULL; /* Hash tables */ project->groups = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); project->files = NULL; project->configs = g_hash_table_new_full (g_file_hash, (GEqualFunc)g_file_equal, NULL, (GDestroyNotify)amp_config_file_free); project->ac_variables = g_hash_table_new_full (g_str_hash, g_str_equal, (GDestroyNotify)g_free, NULL); /* Default style */ project->am_space_list = NULL; project->ac_space_list = NULL; project->arg_list = NULL; project->queue = NULL; project->loading = 0; } static void amp_project_class_init (AmpProjectClass *klass) { GObjectClass *object_class; AmpNodeClass *node_class; parent_class = g_type_class_peek_parent (klass); object_class = G_OBJECT_CLASS (klass); object_class->dispose = amp_project_dispose; node_class = AMP_NODE_CLASS (klass); node_class->load = amp_project_load; node_class->save = amp_project_save; node_class->update = amp_project_update; node_class->copy = amp_project_copy; } static void amp_project_class_finalize (AmpProjectClass *klass) { } void amp_project_register (GTypeModule *module) { amp_node_register (module); amp_project_register_type (module); }
gpl-2.0
KroArtem/Wyrmgus
src/video/video.cpp
3
13653
// _________ __ __ // / _____// |_____________ _/ |______ ____ __ __ ______ // \_____ \\ __\_ __ \__ \\ __\__ \ / ___\| | \/ ___/ // / \| | | | \// __ \| | / __ \_/ /_/ > | /\___ | // /_______ /|__| |__| (____ /__| (____ /\___ /|____//____ > // \/ \/ \//_____/ \/ // ______________________ ______________________ // T H E W A R B E G I N S // Stratagus - A free fantasy real time strategy game engine // /**@name video.cpp - The universal video functions. */ // // (c) Copyright 1999-2005 by Lutz Sammer, Nehal Mistry, and Jimmy Salmon // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; only version 2 of the License. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA // 02111-1307, USA. // //@{ /** ** @page VideoModule Module - Video ** ** There are lots of video functions available, therefore this ** page tries to summarize these separately. ** ** @note care must be taken what to use, how to use it and where ** put new source-code. So please read the following sections ** first. ** ** ** @section VideoMain Video main initialization ** ** The general setup of platform dependent video and basic video ** functionalities is done with function @see InitVideo ** ** We support (depending on the platform) resolutions: ** 640x480, 800x600, 1024x768, 1600x1200 ** with colors 8,15,16,24,32 bit ** ** @see video.h @see video.cpp ** ** ** @section VideoModuleHigh High Level - video dependent functions ** ** These are the video platforms that are supported, any platform ** dependent settings/functionailty are located within each ** separate files: ** ** SDL : Simple Direct Media for Linux, ** Win32 (Windows 95/98/2000), BeOs, MacOS ** (visit http://www.libsdl.org) ** ** @see sdl.cpp ** ** ** @section VideoModuleLow Low Level - draw functions ** ** All direct drawing functions ** ** @note you might need to use Decorations (see above), to prevent ** drawing directly to screen in conflict with the video update. ** ** @see linedraw.cpp ** @see sprite.cpp */ /*---------------------------------------------------------------------------- -- Includes ----------------------------------------------------------------------------*/ #include "stratagus.h" #include <vector> #include "video.h" #include "intern_video.h" #include "cursor.h" #include "font.h" #include "iolib.h" #include "map.h" #include "ui.h" #include "SDL.h" /*---------------------------------------------------------------------------- -- Declarations ----------------------------------------------------------------------------*/ /** ** Structure of pushed clippings. */ struct Clip { int X1; /// pushed clipping top left int Y1; /// pushed clipping top left int X2; /// pushed clipping bottom right int Y2; /// pushed clipping bottom right }; class ColorIndexRange { public: ColorIndexRange(unsigned int begin, unsigned int end) : begin(begin), end(end) {} public: unsigned int begin; unsigned int end; }; class CColorCycling { private: CColorCycling() : ColorCycleAll(false), cycleCount(0) {} static void CreateInstanceIfNeeded() { if (s_instance == NULL) { s_instance = new CColorCycling; } } public: static CColorCycling &GetInstance() { CreateInstanceIfNeeded(); return *s_instance; } static void ReleaseInstance() { delete s_instance; s_instance = NULL; } public: std::vector<SDL_Surface *> PaletteList; /// List of all used palettes. std::vector<ColorIndexRange> ColorIndexRanges; /// List of range of color index for cycling. bool ColorCycleAll; /// Flag Color Cycle with all palettes unsigned int cycleCount; private: static CColorCycling *s_instance; }; /*---------------------------------------------------------------------------- -- Externals ----------------------------------------------------------------------------*/ extern void InitVideoSdl(); /// Init SDL video hardware driver extern void SdlLockScreen(); /// Do SDL hardware lock extern void SdlUnlockScreen(); /// Do SDL hardware unlock /*---------------------------------------------------------------------------- -- Variables ----------------------------------------------------------------------------*/ CVideo Video; /*static*/ CColorCycling *CColorCycling::s_instance = NULL; #if defined(USE_OPENGL) || defined(USE_GLES) char ForceUseOpenGL; bool UseOpenGL; /// Use OpenGL //Wyrmgus start //bool ZoomNoResize; bool ZoomNoResize = false; //bool GLShaderPipelineSupported = true; bool GLShaderPipelineSupported = false; //Wyrmgus end #endif char VideoForceFullScreen; /// fullscreen set from commandline double NextFrameTicks; /// Ticks of begin of the next frame unsigned long FrameCounter; /// Current frame number unsigned long SlowFrameCounter; /// Profile, frames out of sync int ClipX1; /// current clipping top left int ClipY1; /// current clipping top left int ClipX2; /// current clipping bottom right int ClipY2; /// current clipping bottom right static std::vector<Clip> Clips; int VideoSyncSpeed = 100; /// 0 disable interrupts int SkipFrames; /// Skip this frames Uint32 ColorBlack; Uint32 ColorDarkGreen; Uint32 ColorLightBlue; Uint32 ColorBlue; Uint32 ColorOrange; Uint32 ColorWhite; Uint32 ColorLightGray; Uint32 ColorGray; Uint32 ColorDarkGray; Uint32 ColorRed; Uint32 ColorGreen; Uint32 ColorYellow; /*---------------------------------------------------------------------------- -- Functions ----------------------------------------------------------------------------*/ /** ** Set clipping for graphic routines. ** ** @param left Left X screen coordinate. ** @param top Top Y screen coordinate. ** @param right Right X screen coordinate. ** @param bottom Bottom Y screen coordinate. */ void SetClipping(int left, int top, int right, int bottom) { Assert(left <= right && top <= bottom && left >= 0 && left < Video.Width && top >= 0 && top < Video.Height && right >= 0 && right < Video.Width && bottom >= 0 && bottom < Video.Height); ClipX1 = left; ClipY1 = top; ClipX2 = right; ClipY2 = bottom; } /** ** Push current clipping. */ void PushClipping() { Clip clip = {ClipX1, ClipY1, ClipX2, ClipY2}; Clips.push_back(clip); } /** ** Pop current clipping. */ void PopClipping() { Clip clip = Clips.back(); ClipX1 = clip.X1; ClipY1 = clip.Y1; ClipX2 = clip.X2; ClipY2 = clip.Y2; Clips.pop_back(); } /*---------------------------------------------------------------------------- -- Functions ----------------------------------------------------------------------------*/ /** ** Lock the screen for write access. */ void CVideo::LockScreen() { SdlLockScreen(); } /** ** Unlock the screen for write access. */ void CVideo::UnlockScreen() { SdlUnlockScreen(); } /** ** Clear the video screen. */ void CVideo::ClearScreen() { FillRectangle(ColorBlack, 0, 0, Video.Width, Video.Height); } /** ** Resize the video screen. ** ** @return True if the resolution changed, false otherwise */ bool CVideo::ResizeScreen(int w, int h) { if (VideoValidResolution(w, h)) { #if defined(USE_OPENGL) || defined(USE_GLES) if (UseOpenGL) { FreeOpenGLGraphics(); FreeOpenGLFonts(); UI.Minimap.FreeOpenGL(); } #endif TheScreen = SDL_SetVideoMode(w, h, TheScreen->format->BitsPerPixel, TheScreen->flags); #if defined(USE_OPENGL) || defined(USE_GLES) ViewportWidth = w; ViewportHeight = h; if (ZoomNoResize) { ReloadOpenGL(); } else { Width = w; Height = h; SetClipping(0, 0, Video.Width - 1, Video.Height - 1); if (UseOpenGL) { ReloadOpenGL(); } } #else Width = w; Height = h; SetClipping(0, 0, Video.Width - 1, Video.Height - 1); #endif //Wyrmgus start if (GameRunning) { InitUserInterface(); UI.Load(); } //Wyrmgus end return true; } return false; } /** ** Return ticks in ms since start. */ unsigned long GetTicks() { return SDL_GetTicks(); } /** ** Video initialize. */ void InitVideo() { InitVideoSdl(); InitLineDraw(); } void DeInitVideo() { CColorCycling::ReleaseInstance(); } /** ** Set the video sync speed ** ** @param l Lua state. */ static int CclSetVideoSyncSpeed(lua_State *l) { LuaCheckArgs(l, 1); VideoSyncSpeed = LuaToNumber(l, 1); return 0; } void VideoCclRegister() { lua_register(Lua, "SetVideoSyncSpeed", CclSetVideoSyncSpeed); } #if 1 // color cycling /** ** Add a surface to the palette list, used for color cycling ** ** @param surface The SDL surface to add to the list to cycle. */ void VideoPaletteListAdd(SDL_Surface *surface) { if (surface == NULL || surface->format == NULL || surface->format->BytesPerPixel != 1) { return; } CColorCycling &colorCycling = CColorCycling::GetInstance(); std::vector<SDL_Surface *>::iterator it = std::find(colorCycling.PaletteList.begin(), colorCycling.PaletteList.end(), surface); if (it != colorCycling.PaletteList.end()) { return ; } colorCycling.PaletteList.push_back(surface); } /** ** Remove a surface to the palette list, used for color cycling ** ** @param surface The SDL surface to add to the list to cycle. */ void VideoPaletteListRemove(SDL_Surface *surface) { CColorCycling &colorCycling = CColorCycling::GetInstance(); std::vector<SDL_Surface *>::iterator it = std::find(colorCycling.PaletteList.begin(), colorCycling.PaletteList.end(), surface); if (it != colorCycling.PaletteList.end()) { colorCycling.PaletteList.erase(it); } } void ClearAllColorCyclingRange() { CColorCycling::GetInstance().ColorIndexRanges.clear(); } void AddColorCyclingRange(unsigned int begin, unsigned int end) { CColorCycling::GetInstance().ColorIndexRanges.push_back(ColorIndexRange(begin, end)); } void SetColorCycleAll(bool value) { CColorCycling::GetInstance().ColorCycleAll = value; } /** ** Color Cycle for particular surface */ static void ColorCycleSurface(SDL_Surface &surface) { SDL_Color *palcolors = surface.format->palette->colors; SDL_Color colors[256]; CColorCycling &colorCycling = CColorCycling::GetInstance(); memcpy(colors, palcolors, sizeof(colors)); for (std::vector<ColorIndexRange>::const_iterator it = colorCycling.ColorIndexRanges.begin(); it != colorCycling.ColorIndexRanges.end(); ++it) { const ColorIndexRange &range = *it; memcpy(colors + range.begin, palcolors + range.begin + 1, (range.end - range.begin) * sizeof(SDL_Color)); colors[range.end] = palcolors[range.begin]; } SDL_SetPalette(&surface, SDL_LOGPAL | SDL_PHYSPAL, colors, 0, 256); } /** ** Undo Color Cycle for particular surface ** @note function may be optimized. */ static void ColorCycleSurface_Reverse(SDL_Surface &surface, unsigned int count) { for (unsigned int i = 0; i != count; ++i) { SDL_Color *palcolors = surface.format->palette->colors; SDL_Color colors[256]; CColorCycling &colorCycling = CColorCycling::GetInstance(); memcpy(colors, palcolors, sizeof(colors)); for (std::vector<ColorIndexRange>::const_iterator it = colorCycling.ColorIndexRanges.begin(); it != colorCycling.ColorIndexRanges.end(); ++it) { const ColorIndexRange &range = *it; memcpy(colors + range.begin + 1, palcolors + range.begin, (range.end - range.begin) * sizeof(SDL_Color)); colors[range.begin] = palcolors[range.end]; } SDL_SetPalette(&surface, SDL_LOGPAL | SDL_PHYSPAL, colors, 0, 256); } } /** ** Color cycle. */ // FIXME: cpu intensive to go through the whole PaletteList void ColorCycle() { /// MACRO defines speed of colorcycling FIXME: should be made configurable #define COLOR_CYCLE_SPEED (CYCLES_PER_SECOND / 4) if ((FrameCounter % COLOR_CYCLE_SPEED) != 0) { return; } CColorCycling &colorCycling = CColorCycling::GetInstance(); if (colorCycling.ColorCycleAll) { ++colorCycling.cycleCount; for (std::vector<SDL_Surface *>::iterator it = colorCycling.PaletteList.begin(); it != colorCycling.PaletteList.end(); ++it) { SDL_Surface *surface = (*it); ColorCycleSurface(*surface); } } else if (Map.TileGraphic->Surface->format->BytesPerPixel == 1) { ++colorCycling.cycleCount; ColorCycleSurface(*Map.TileGraphic->Surface); } } void RestoreColorCyclingSurface() { CColorCycling &colorCycling = CColorCycling::GetInstance(); if (colorCycling.ColorCycleAll) { for (std::vector<SDL_Surface *>::iterator it = colorCycling.PaletteList.begin(); it != colorCycling.PaletteList.end(); ++it) { SDL_Surface *surface = (*it); ColorCycleSurface_Reverse(*surface, colorCycling.cycleCount); } } else if (Map.TileGraphic->Surface->format->BytesPerPixel == 1) { ColorCycleSurface_Reverse(*Map.TileGraphic->Surface, colorCycling.cycleCount); } colorCycling.cycleCount = 0; } #endif //@}
gpl-2.0
lkundrak/coreboot
src/mainboard/supermicro/h8scm_fam10/acpi_tables.c
3
6960
/* * This file is part of the coreboot project. * * Copyright (C) 2010 Advanced Micro Devices, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <console/console.h> #include <string.h> #include <arch/acpi.h> #include <arch/ioapic.h> #include <device/pci.h> #include <device/pci_ids.h> #include <cpu/x86/msr.h> #include <cpu/amd/mtrr.h> #include <cpu/amd/amdfam10_sysconf.h> #include "mb_sysconf.h" extern const unsigned char AmlCode[]; extern const unsigned char AmlCode_ssdt[]; #if CONFIG_ACPI_SSDTX_NUM >= 1 extern const unsigned char AmlCode_ssdt2[]; extern const unsigned char AmlCode_ssdt3[]; extern const unsigned char AmlCode_ssdt4[]; extern const unsigned char AmlCode_ssdt5[]; #endif unsigned long acpi_fill_mcfg(unsigned long current) { /* Just a dummy */ return current; } unsigned long acpi_fill_madt(unsigned long current) { device_t dev; u32 dword; u32 gsi_base=0; /* create all subtables for processors */ current = acpi_create_madt_lapics(current); /* Write SB700 IOAPIC, only one */ current += acpi_create_madt_ioapic((acpi_madt_ioapic_t *) current, 2, IO_APIC_ADDR, gsi_base); /* IOAPIC on rs5690 */ gsi_base += 24; /* SB700 has 24 IOAPIC entries. */ dev = dev_find_slot(0, PCI_DEVFN(0, 0)); if (dev) { pci_write_config32(dev, 0xF8, 0x1); dword = pci_read_config32(dev, 0xFC) & 0xfffffff0; current += acpi_create_madt_ioapic((acpi_madt_ioapic_t *) current, 2+1, dword, gsi_base); } current += acpi_create_madt_irqoverride((acpi_madt_irqoverride_t *) current, 0, 0, 2, 0); current += acpi_create_madt_irqoverride((acpi_madt_irqoverride_t *) current, 0, 9, 9, 0xF); /* 0: mean bus 0--->ISA */ /* 0: PIC 0 */ /* 2: APIC 2 */ /* 5 mean: 0101 --> Edge-triggered, Active high */ /* create all subtables for processors */ /* current = acpi_create_madt_lapic_nmis(current, 5, 1); */ /* 1: LINT1 connect to NMI */ return current; } unsigned long write_acpi_tables(unsigned long start) { unsigned long current; acpi_rsdp_t *rsdp; acpi_rsdt_t *rsdt; acpi_hpet_t *hpet; acpi_madt_t *madt; acpi_srat_t *srat; acpi_slit_t *slit; acpi_fadt_t *fadt; acpi_facs_t *facs; acpi_header_t *dsdt; acpi_header_t *ssdt; #if CONFIG_ACPI_SSDTX_NUM >= 1 acpi_header_t *ssdtx; void *p; int i; #endif get_bus_conf(); /* it will get sblk, pci1234, hcdn, and sbdn */ /* Align ACPI tables to 16 bytes */ start = ALIGN(start, 16); current = start; printk(BIOS_INFO, "ACPI: Writing ACPI tables at %lx...\n", start); /* We need at least an RSDP and an RSDT Table */ rsdp = (acpi_rsdp_t *) current; current += sizeof(acpi_rsdp_t); rsdt = (acpi_rsdt_t *) current; current += sizeof(acpi_rsdt_t); /* clear all table memory */ memset((void *)start, 0, current - start); acpi_write_rsdp(rsdp, rsdt, NULL); acpi_write_rsdt(rsdt); /* * We explicitly add these tables later on: */ current = ALIGN(current, 8); printk(BIOS_DEBUG, "ACPI: * HPET at %lx\n", current); hpet = (acpi_hpet_t *) current; current += sizeof(acpi_hpet_t); acpi_create_hpet(hpet); acpi_add_table(rsdp, hpet); /* If we want to use HPET Timers Linux wants an MADT */ current = ALIGN(current, 8); printk(BIOS_DEBUG, "ACPI: * MADT at %lx\n",current); madt = (acpi_madt_t *) current; acpi_create_madt(madt); current += madt->header.length; acpi_add_table(rsdp, madt); /* SRAT */ current = ALIGN(current, 8); printk(BIOS_DEBUG, "ACPI: * SRAT at %lx\n", current); srat = (acpi_srat_t *) current; acpi_create_srat(srat); current += srat->header.length; acpi_add_table(rsdp, srat); /* SLIT */ current = ALIGN(current, 8); printk(BIOS_DEBUG, "ACPI: * SLIT at %lx\n", current); slit = (acpi_slit_t *) current; acpi_create_slit(slit); current += slit->header.length; acpi_add_table(rsdp, slit); /* SSDT */ current = ALIGN(current, 16); printk(BIOS_DEBUG, "ACPI: * SSDT at %lx\n", current); ssdt = (acpi_header_t *)current; memcpy(ssdt, &AmlCode_ssdt, sizeof(acpi_header_t)); current += ssdt->length; memcpy(ssdt, &AmlCode_ssdt, ssdt->length); //Here you need to set value in pci1234, sblk and sbdn in get_bus_conf.c update_ssdt((void*)ssdt); /* recalculate checksum */ ssdt->checksum = 0; ssdt->checksum = acpi_checksum((unsigned char *)ssdt,ssdt->length); acpi_add_table(rsdp,ssdt); printk(BIOS_DEBUG, "ACPI: * SSDT for PState at %lx\n", current); current = acpi_add_ssdt_pstates(rsdp, current); #if CONFIG_ACPI_SSDTX_NUM >= 1 /* same htio, but different position? We may have to copy, change HCIN, and recalculate the checknum and add_table */ for(i=1;i<sysconf.hc_possible_num;i++) { // 0: is hc sblink if((sysconf.pci1234[i] & 1) != 1 ) continue; u8 c; if (i < 7) { c = (u8) ('4' + i - 1); } else { c = (u8) ('A' + i - 1 - 6); } current = ALIGN(current, 8); printk(BIOS_DEBUG, "ACPI: * SSDT for PCI%c at %lx\n", c, current); //pci0 and pci1 are in dsdt ssdtx = (acpi_header_t *)current; switch (sysconf.hcid[i]) { case 1: p = &AmlCode_ssdt2; break; case 2: p = &AmlCode_ssdt3; break; case 3: /* 8131 */ p = &AmlCode_ssdt4; break; default: /* HTX no io apic */ p = &AmlCode_ssdt5; break; } memcpy(ssdtx, p, sizeof(acpi_header_t)); current += ssdtx->length; memcpy(ssdtx, p, ssdtx->length); update_ssdtx((void *)ssdtx, i); ssdtx->checksum = 0; ssdtx->checksum = acpi_checksum((u8 *)ssdtx, ssdtx->length); acpi_add_table(rsdp, ssdtx); } #endif /* DSDT */ current = ALIGN(current, 8); printk(BIOS_DEBUG, "ACPI: * DSDT at %lx\n", current); dsdt = (acpi_header_t *)current; // it will used by fadt memcpy(dsdt, &AmlCode, sizeof(acpi_header_t)); current += dsdt->length; memcpy(dsdt, &AmlCode, dsdt->length); printk(BIOS_DEBUG, "ACPI: * DSDT @ %p Length %x\n",dsdt,dsdt->length); /* FACS */ // it needs 64 bit alignment current = ALIGN(current, 8); printk(BIOS_DEBUG, "ACPI: * FACS at %lx\n", current); facs = (acpi_facs_t *) current; // it will be used by fadt current += sizeof(acpi_facs_t); acpi_create_facs(facs); /* FADT */ current = ALIGN(current, 8); printk(BIOS_DEBUG, "ACPI: * FADT at %lx\n", current); fadt = (acpi_fadt_t *) current; current += sizeof(acpi_fadt_t); acpi_create_fadt(fadt, facs, dsdt); acpi_add_table(rsdp, fadt); printk(BIOS_INFO, "ACPI: done.\n"); return current; }
gpl-2.0
pmembrey/wireshark
ui/qt/packet_list_model.cpp
3
20938
/* packet_list_model.cpp * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <algorithm> #include "packet_list_model.h" #include "file.h" #include <wsutil/nstime.h> #include <epan/column.h> #include <epan/prefs.h> #include "ui/packet_list_utils.h" #include "ui/recent.h" #include "color.h" #include "color_filters.h" #include "frame_tvbuff.h" #include "color_utils.h" #include "wireshark_application.h" #include <QColor> #include <QElapsedTimer> #include <QFontMetrics> #include <QModelIndex> #include <QElapsedTimer> PacketListModel::PacketListModel(QObject *parent, capture_file *cf) : QAbstractItemModel(parent), max_row_height_(0), max_line_count_(1), idle_dissection_row_(0) { setCaptureFile(cf); PacketListRecord::clearStringPool(); connect(this, SIGNAL(maxLineCountChanged(QModelIndex)), this, SLOT(emitItemHeightChanged(QModelIndex)), Qt::QueuedConnection); idle_dissection_timer_ = new QElapsedTimer(); } PacketListModel::~PacketListModel() { delete idle_dissection_timer_; } void PacketListModel::setCaptureFile(capture_file *cf) { cap_file_ = cf; resetColumns(); } // Packet list records have no children (for now, at least). QModelIndex PacketListModel::index(int row, int column, const QModelIndex &) const { if (row >= visible_rows_.count() || row < 0 || !cap_file_ || column >= prefs.num_cols) return QModelIndex(); PacketListRecord *record = visible_rows_[row]; return createIndex(row, column, record); } // Everything is under the root. QModelIndex PacketListModel::parent(const QModelIndex &) const { return QModelIndex(); } int PacketListModel::packetNumberToRow(int packet_num) const { return number_to_row_.value(packet_num, -1); } guint PacketListModel::recreateVisibleRows() { int pos = visible_rows_.count(); beginResetModel(); visible_rows_.clear(); number_to_row_.clear(); endResetModel(); beginInsertRows(QModelIndex(), pos, pos); foreach (PacketListRecord *record, physical_rows_) { if (record->frameData()->flags.passed_dfilter || record->frameData()->flags.ref_time) { visible_rows_ << record; number_to_row_[record->frameData()->num] = visible_rows_.count() - 1; } } endInsertRows(); idle_dissection_row_ = 0; return visible_rows_.count(); } void PacketListModel::clear() { beginResetModel(); qDeleteAll(physical_rows_); physical_rows_.clear(); visible_rows_.clear(); number_to_row_.clear(); PacketListRecord::clearStringPool(); endResetModel(); max_row_height_ = 0; max_line_count_ = 1; idle_dissection_row_ = 0; } void PacketListModel::resetColumns() { if (cap_file_) { PacketListRecord::resetColumns(&cap_file_->cinfo); } dataChanged(index(0, 0), index(rowCount() - 1, columnCount() - 1)); headerDataChanged(Qt::Horizontal, 0, columnCount() - 1); } void PacketListModel::resetColorized() { foreach (PacketListRecord *record, physical_rows_) { record->resetColorized(); } dataChanged(index(0, 0), index(rowCount() - 1, columnCount() - 1)); } void PacketListModel::toggleFrameMark(const QModelIndex &fm_index) { if (!cap_file_ || !fm_index.isValid()) return; PacketListRecord *record = static_cast<PacketListRecord*>(fm_index.internalPointer()); if (!record) return; frame_data *fdata = record->frameData(); if (!fdata) return; if (fdata->flags.marked) cf_unmark_frame(cap_file_, fdata); else cf_mark_frame(cap_file_, fdata); dataChanged(fm_index, fm_index); } void PacketListModel::setDisplayedFrameMark(gboolean set) { foreach (PacketListRecord *record, visible_rows_) { if (set) { cf_mark_frame(cap_file_, record->frameData()); } else { cf_unmark_frame(cap_file_, record->frameData()); } } dataChanged(index(0, 0), index(rowCount() - 1, columnCount() - 1)); } void PacketListModel::toggleFrameIgnore(const QModelIndex &i_index) { if (!cap_file_ || !i_index.isValid()) return; PacketListRecord *record = static_cast<PacketListRecord*>(i_index.internalPointer()); if (!record) return; frame_data *fdata = record->frameData(); if (!fdata) return; if (fdata->flags.ignored) cf_unignore_frame(cap_file_, fdata); else cf_ignore_frame(cap_file_, fdata); } void PacketListModel::setDisplayedFrameIgnore(gboolean set) { foreach (PacketListRecord *record, visible_rows_) { if (set) { cf_ignore_frame(cap_file_, record->frameData()); } else { cf_unignore_frame(cap_file_, record->frameData()); } } dataChanged(index(0, 0), index(rowCount() - 1, columnCount() - 1)); } void PacketListModel::toggleFrameRefTime(const QModelIndex &rt_index) { if (!cap_file_ || !rt_index.isValid()) return; PacketListRecord *record = static_cast<PacketListRecord*>(rt_index.internalPointer()); if (!record) return; frame_data *fdata = record->frameData(); if (!fdata) return; if (fdata->flags.ref_time) { fdata->flags.ref_time=0; cap_file_->ref_time_count--; } else { fdata->flags.ref_time=1; cap_file_->ref_time_count++; } cf_reftime_packets(cap_file_); if (!fdata->flags.ref_time && !fdata->flags.passed_dfilter) { cap_file_->displayed_count--; } record->resetColumns(&cap_file_->cinfo); dataChanged(index(0, 0), index(rowCount() - 1, columnCount() - 1)); } void PacketListModel::unsetAllFrameRefTime() { if (!cap_file_) return; /* XXX: we might need a progressbar here */ foreach (PacketListRecord *record, physical_rows_) { frame_data *fdata = record->frameData(); if (fdata->flags.ref_time) { fdata->flags.ref_time = 0; } } cap_file_->ref_time_count = 0; cf_reftime_packets(cap_file_); PacketListRecord::resetColumns(&cap_file_->cinfo); dataChanged(index(0, 0), index(rowCount() - 1, columnCount() - 1)); } void PacketListModel::applyTimeShift() { resetColumns(); dataChanged(index(0, 0), index(rowCount() - 1, columnCount() - 1)); } void PacketListModel::setMaximiumRowHeight(int height) { max_row_height_ = height; // As the QTreeView uniformRowHeights documentation says, // "The height is obtained from the first item in the view. It is // updated when the data changes on that item." dataChanged(index(0, 0), index(0, columnCount() - 1)); } //void PacketListModel::setMonospaceFont(const QFont &mono_font, int row_height) //{ // QFontMetrics fm(mono_font_); // mono_font_ = mono_font; // row_height_ = row_height; // line_spacing_ = fm.lineSpacing(); //} // The Qt MVC documentation suggests using QSortFilterProxyModel for sorting // and filtering. That seems like overkill but it might be something we want // to do in the future. int PacketListModel::sort_column_; int PacketListModel::text_sort_column_; Qt::SortOrder PacketListModel::sort_order_; capture_file *PacketListModel::sort_cap_file_; QElapsedTimer busy_timer_; const int busy_timeout_ = 65; // ms, approximately 15 fps void PacketListModel::sort(int column, Qt::SortOrder order) { // packet_list_store.c:packet_list_dissect_and_cache_all if (!cap_file_ || visible_rows_.count() < 1) return; if (column < 0) return; sort_column_ = column; text_sort_column_ = PacketListRecord::textColumn(column); sort_order_ = order; sort_cap_file_ = cap_file_; gboolean stop_flag = FALSE; QString col_title = get_column_title(column); busy_timer_.start(); emit pushProgressStatus(tr("Dissecting"), true, true, &stop_flag); int row_num = 0; foreach (PacketListRecord *row, physical_rows_) { row->columnString(sort_cap_file_, column); row_num++; if (busy_timer_.elapsed() > busy_timeout_) { if (stop_flag) { emit popProgressStatus(); return; } emit updateProgressStatus(row_num * 100 / physical_rows_.count()); // What's the least amount of processing that we can do which will draw // the progress indicator? wsApp->processEvents(QEventLoop::AllEvents, 1); busy_timer_.restart(); } } emit popProgressStatus(); // XXX Use updateProgress instead. We'd have to switch from std::sort to // something we can interrupt. if (!col_title.isEmpty()) { QString busy_msg = tr("Sorting \"%1\"").arg(col_title); emit pushBusyStatus(busy_msg); } busy_timer_.restart(); std::sort(physical_rows_.begin(), physical_rows_.end(), recordLessThan); beginResetModel(); visible_rows_.clear(); number_to_row_.clear(); foreach (PacketListRecord *record, physical_rows_) { if (record->frameData()->flags.passed_dfilter || record->frameData()->flags.ref_time) { visible_rows_ << record; number_to_row_[record->frameData()->num] = visible_rows_.count() - 1; } } endResetModel(); if (!col_title.isEmpty()) { emit popBusyStatus(); } if (cap_file_->current_frame) { emit goToPacket(cap_file_->current_frame->num); } } bool PacketListModel::recordLessThan(PacketListRecord *r1, PacketListRecord *r2) { int cmp_val = 0; // Wherein we try to cram the logic of packet_list_compare_records, // _packet_list_compare_records, and packet_list_compare_custom from // gtk/packet_list_store.c into one function if (busy_timer_.elapsed() > busy_timeout_) { // What's the least amount of processing that we can do which will draw // the busy indicator? wsApp->processEvents(QEventLoop::ExcludeUserInputEvents | QEventLoop::ExcludeSocketNotifiers, 1); busy_timer_.restart(); } if (sort_column_ < 0) { // No column. cmp_val = frame_data_compare(sort_cap_file_->epan, r1->frameData(), r2->frameData(), COL_NUMBER); } else if (text_sort_column_ < 0) { // Column comes directly from frame data cmp_val = frame_data_compare(sort_cap_file_->epan, r1->frameData(), r2->frameData(), sort_cap_file_->cinfo.columns[sort_column_].col_fmt); } else { if (r1->columnString(sort_cap_file_, sort_column_).constData() == r2->columnString(sort_cap_file_, sort_column_).constData()) { cmp_val = 0; } else if (sort_cap_file_->cinfo.columns[sort_column_].col_fmt == COL_CUSTOM) { header_field_info *hfi; // Column comes from custom data hfi = proto_registrar_get_byname(sort_cap_file_->cinfo.columns[sort_column_].col_custom_field); if (hfi == NULL) { cmp_val = frame_data_compare(sort_cap_file_->epan, r1->frameData(), r2->frameData(), COL_NUMBER); } else if ((hfi->strings == NULL) && (((IS_FT_INT(hfi->type) || IS_FT_UINT(hfi->type)) && ((hfi->display == BASE_DEC) || (hfi->display == BASE_DEC_HEX) || (hfi->display == BASE_OCT))) || (hfi->type == FT_DOUBLE) || (hfi->type == FT_FLOAT) || (hfi->type == FT_BOOLEAN) || (hfi->type == FT_FRAMENUM) || (hfi->type == FT_RELATIVE_TIME))) { // Attempt to convert to numbers. // XXX This is slow. Can we avoid doing this? bool ok_r1, ok_r2; double num_r1 = r1->columnString(sort_cap_file_, sort_column_).toDouble(&ok_r1); double num_r2 = r2->columnString(sort_cap_file_, sort_column_).toDouble(&ok_r2); if (!ok_r1 && !ok_r2) { cmp_val = 0; } else if (!ok_r1 || num_r1 < num_r2) { cmp_val = -1; } else if (!ok_r2 || num_r1 > num_r2) { cmp_val = 1; } } else { cmp_val = strcmp(r1->columnString(sort_cap_file_, sort_column_).constData(), r2->columnString(sort_cap_file_, sort_column_).constData()); } } else { cmp_val = strcmp(r1->columnString(sort_cap_file_, sort_column_).constData(), r2->columnString(sort_cap_file_, sort_column_).constData()); } if (cmp_val == 0) { // All else being equal, compare column numbers. cmp_val = frame_data_compare(sort_cap_file_->epan, r1->frameData(), r2->frameData(), COL_NUMBER); } } if (sort_order_ == Qt::AscendingOrder) { return cmp_val < 0; } else { return cmp_val > 0; } } // ::data is const so we have to make changes here. void PacketListModel::emitItemHeightChanged(const QModelIndex &ih_index) { if (!ih_index.isValid()) return; PacketListRecord *record = static_cast<PacketListRecord*>(ih_index.internalPointer()); if (!record) return; if (record->lineCount() > max_line_count_) { max_line_count_ = record->lineCount(); emit itemHeightChanged(ih_index); } } int PacketListModel::rowCount(const QModelIndex &parent) const { if (parent.column() >= prefs.num_cols) return 0; return visible_rows_.count(); } int PacketListModel::columnCount(const QModelIndex &) const { return prefs.num_cols; } QVariant PacketListModel::data(const QModelIndex &d_index, int role) const { if (!d_index.isValid()) return QVariant(); PacketListRecord *record = static_cast<PacketListRecord*>(d_index.internalPointer()); if (!record) return QVariant(); const frame_data *fdata = record->frameData(); if (!fdata) return QVariant(); switch (role) { case Qt::TextAlignmentRole: switch(recent_get_column_xalign(d_index.column())) { case COLUMN_XALIGN_RIGHT: return Qt::AlignRight; break; case COLUMN_XALIGN_CENTER: return Qt::AlignCenter; break; case COLUMN_XALIGN_LEFT: return Qt::AlignLeft; break; case COLUMN_XALIGN_DEFAULT: default: if (right_justify_column(d_index.column(), cap_file_)) { return Qt::AlignRight; } break; } return Qt::AlignLeft; case Qt::BackgroundRole: const color_t *color; if (fdata->flags.ignored) { color = &prefs.gui_ignored_bg; } else if (fdata->flags.marked) { color = &prefs.gui_marked_bg; } else if (fdata->color_filter && recent.packet_list_colorize) { const color_filter_t *color_filter = (const color_filter_t *) fdata->color_filter; color = &color_filter->bg_color; } else { return QVariant(); } return ColorUtils::fromColorT(color); case Qt::ForegroundRole: if (fdata->flags.ignored) { color = &prefs.gui_ignored_fg; } else if (fdata->flags.marked) { color = &prefs.gui_marked_fg; } else if (fdata->color_filter && recent.packet_list_colorize) { const color_filter_t *color_filter = (const color_filter_t *) fdata->color_filter; color = &color_filter->fg_color; } else { return QVariant(); } return ColorUtils::fromColorT(color); case Qt::DisplayRole: { int column = d_index.column(); QByteArray column_string = record->columnString(cap_file_, column, true); // We don't know an item's sizeHint until we fetch its text here. // Assume each line count is 1. If the line count changes, emit // itemHeightChanged which triggers another redraw (including a // fetch of SizeHintRole and DisplayRole) in the next event loop. if (column == 0 && record->lineCountChanged() && record->lineCount() > max_line_count_) { emit maxLineCountChanged(d_index); } return column_string; } case Qt::SizeHintRole: { // If this is the first row and column, return the maximum row height... if (d_index.row() < 1 && d_index.column() < 1 && max_row_height_ > 0) { QSize size = QSize(-1, max_row_height_); return size; } // ...otherwise punt so that the item delegate can correctly calculate the item width. return QVariant(); } default: return QVariant(); } } QVariant PacketListModel::headerData(int section, Qt::Orientation orientation, int role) const { if (!cap_file_) return QVariant(); if (orientation == Qt::Horizontal && section < prefs.num_cols) { switch (role) { case Qt::DisplayRole: return get_column_title(section); default: break; } } return QVariant(); } void PacketListModel::flushVisibleRows() { gint pos = visible_rows_.count(); if (new_visible_rows_.count() > 0) { beginInsertRows(QModelIndex(), pos, pos + new_visible_rows_.count()); foreach (PacketListRecord *record, new_visible_rows_) { frame_data *fdata = record->frameData(); visible_rows_ << record; number_to_row_[fdata->num] = visible_rows_.count() - 1; } endInsertRows(); new_visible_rows_.clear(); } } // Fill our column string and colorization cache while the application is // idle. Try to be as conservative with the CPU and disk as possible. static const int idle_dissection_interval_ = 5; // ms void PacketListModel::dissectIdle(bool reset) { if (reset) { // qDebug() << "=di reset" << idle_dissection_row_; idle_dissection_row_ = 0; } else if (!idle_dissection_timer_->isValid()) { return; } idle_dissection_timer_->restart(); while (idle_dissection_timer_->elapsed() < idle_dissection_interval_ && idle_dissection_row_ < physical_rows_.count()) { ensureRowColorized(idle_dissection_row_); idle_dissection_row_++; // if (idle_dissection_row_ % 1000 == 0) qDebug() << "=di row" << idle_dissection_row_; } if (idle_dissection_row_ < physical_rows_.count()) { QTimer::singleShot(idle_dissection_interval_, this, SLOT(dissectIdle())); } else { idle_dissection_timer_->invalidate(); } } // XXX Pass in cinfo from packet_list_append so that we can fill in // line counts? gint PacketListModel::appendPacket(frame_data *fdata) { PacketListRecord *record = new PacketListRecord(fdata); gint pos = -1; physical_rows_ << record; if (fdata->flags.passed_dfilter || fdata->flags.ref_time) { new_visible_rows_ << record; if (new_visible_rows_.count() < 2) { // This is the first queued packet. Schedule an insertion for // the next UI update. QTimer::singleShot(0, this, SLOT(flushVisibleRows())); } pos = visible_rows_.count() + new_visible_rows_.count() - 1; } return pos; } frame_data *PacketListModel::getRowFdata(int row) { if (row < 0 || row >= visible_rows_.count()) return NULL; PacketListRecord *record = visible_rows_[row]; if (!record) return NULL; return record->frameData(); } void PacketListModel::ensureRowColorized(int row) { if (row < 0 || row >= visible_rows_.count()) return; PacketListRecord *record = visible_rows_[row]; if (!record) return; if (!record->colorized()) { record->columnString(cap_file_, 1, true); } } int PacketListModel::visibleIndexOf(frame_data *fdata) const { int row = 0; foreach (PacketListRecord *record, visible_rows_) { if (record->frameData() == fdata) { return row; } row++; } return -1; } /* * Editor modelines * * Local Variables: * c-basic-offset: 4 * tab-width: 8 * indent-tabs-mode: nil * End: * * ex: set shiftwidth=4 tabstop=8 expandtab: * :indentSize=4:tabSize=8:noTabs=true: */
gpl-2.0
crseanpaul/staging
block/blk-throttle.c
259
46883
/* * Interface for controlling IO bandwidth on a request queue * * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/blktrace_api.h> #include "blk-cgroup.h" #include "blk.h" /* Max dispatch from a group in 1 round */ static int throtl_grp_quantum = 8; /* Total max dispatch from all groups in one round */ static int throtl_quantum = 32; /* Throttling is performed over 100ms slice and after that slice is renewed */ static unsigned long throtl_slice = HZ/10; /* 100 ms */ static struct blkcg_policy blkcg_policy_throtl; /* A workqueue to queue throttle related work */ static struct workqueue_struct *kthrotld_workqueue; /* * To implement hierarchical throttling, throtl_grps form a tree and bios * are dispatched upwards level by level until they reach the top and get * issued. When dispatching bios from the children and local group at each * level, if the bios are dispatched into a single bio_list, there's a risk * of a local or child group which can queue many bios at once filling up * the list starving others. * * To avoid such starvation, dispatched bios are queued separately * according to where they came from. When they are again dispatched to * the parent, they're popped in round-robin order so that no single source * hogs the dispatch window. * * throtl_qnode is used to keep the queued bios separated by their sources. * Bios are queued to throtl_qnode which in turn is queued to * throtl_service_queue and then dispatched in round-robin order. * * It's also used to track the reference counts on blkg's. A qnode always * belongs to a throtl_grp and gets queued on itself or the parent, so * incrementing the reference of the associated throtl_grp when a qnode is * queued and decrementing when dequeued is enough to keep the whole blkg * tree pinned while bios are in flight. */ struct throtl_qnode { struct list_head node; /* service_queue->queued[] */ struct bio_list bios; /* queued bios */ struct throtl_grp *tg; /* tg this qnode belongs to */ }; struct throtl_service_queue { struct throtl_service_queue *parent_sq; /* the parent service_queue */ /* * Bios queued directly to this service_queue or dispatched from * children throtl_grp's. */ struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ unsigned int nr_queued[2]; /* number of queued bios */ /* * RB tree of active children throtl_grp's, which are sorted by * their ->disptime. */ struct rb_root pending_tree; /* RB tree of active tgs */ struct rb_node *first_pending; /* first node in the tree */ unsigned int nr_pending; /* # queued in the tree */ unsigned long first_pending_disptime; /* disptime of the first tg */ struct timer_list pending_timer; /* fires on first_pending_disptime */ }; enum tg_state_flags { THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */ THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */ }; #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) /* Per-cpu group stats */ struct tg_stats_cpu { /* total bytes transferred */ struct blkg_rwstat service_bytes; /* total IOs serviced, post merge */ struct blkg_rwstat serviced; }; struct throtl_grp { /* must be the first member */ struct blkg_policy_data pd; /* active throtl group service_queue member */ struct rb_node rb_node; /* throtl_data this group belongs to */ struct throtl_data *td; /* this group's service queue */ struct throtl_service_queue service_queue; /* * qnode_on_self is used when bios are directly queued to this * throtl_grp so that local bios compete fairly with bios * dispatched from children. qnode_on_parent is used when bios are * dispatched from this throtl_grp into its parent and will compete * with the sibling qnode_on_parents and the parent's * qnode_on_self. */ struct throtl_qnode qnode_on_self[2]; struct throtl_qnode qnode_on_parent[2]; /* * Dispatch time in jiffies. This is the estimated time when group * will unthrottle and is ready to dispatch more bio. It is used as * key to sort active groups in service tree. */ unsigned long disptime; unsigned int flags; /* are there any throtl rules between this group and td? */ bool has_rules[2]; /* bytes per second rate limits */ uint64_t bps[2]; /* IOPS limits */ unsigned int iops[2]; /* Number of bytes disptached in current slice */ uint64_t bytes_disp[2]; /* Number of bio's dispatched in current slice */ unsigned int io_disp[2]; /* When did we start a new slice */ unsigned long slice_start[2]; unsigned long slice_end[2]; /* Per cpu stats pointer */ struct tg_stats_cpu __percpu *stats_cpu; /* List of tgs waiting for per cpu stats memory to be allocated */ struct list_head stats_alloc_node; }; struct throtl_data { /* service tree for active throtl groups */ struct throtl_service_queue service_queue; struct request_queue *queue; /* Total Number of queued bios on READ and WRITE lists */ unsigned int nr_queued[2]; /* * number of total undestroyed groups */ unsigned int nr_undestroyed_grps; /* Work for dispatching throttled bios */ struct work_struct dispatch_work; }; /* list and work item to allocate percpu group stats */ static DEFINE_SPINLOCK(tg_stats_alloc_lock); static LIST_HEAD(tg_stats_alloc_list); static void tg_stats_alloc_fn(struct work_struct *); static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn); static void throtl_pending_timer_fn(unsigned long arg); static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd) { return pd ? container_of(pd, struct throtl_grp, pd) : NULL; } static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) { return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl)); } static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) { return pd_to_blkg(&tg->pd); } static inline struct throtl_grp *td_root_tg(struct throtl_data *td) { return blkg_to_tg(td->queue->root_blkg); } /** * sq_to_tg - return the throl_grp the specified service queue belongs to * @sq: the throtl_service_queue of interest * * Return the throtl_grp @sq belongs to. If @sq is the top-level one * embedded in throtl_data, %NULL is returned. */ static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq) { if (sq && sq->parent_sq) return container_of(sq, struct throtl_grp, service_queue); else return NULL; } /** * sq_to_td - return throtl_data the specified service queue belongs to * @sq: the throtl_service_queue of interest * * A service_queue can be embeded in either a throtl_grp or throtl_data. * Determine the associated throtl_data accordingly and return it. */ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) { struct throtl_grp *tg = sq_to_tg(sq); if (tg) return tg->td; else return container_of(sq, struct throtl_data, service_queue); } /** * throtl_log - log debug message via blktrace * @sq: the service_queue being reported * @fmt: printf format string * @args: printf args * * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a * throtl_grp; otherwise, just "throtl". * * TODO: this should be made a function and name formatting should happen * after testing whether blktrace is enabled. */ #define throtl_log(sq, fmt, args...) do { \ struct throtl_grp *__tg = sq_to_tg((sq)); \ struct throtl_data *__td = sq_to_td((sq)); \ \ (void)__td; \ if ((__tg)) { \ char __pbuf[128]; \ \ blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \ blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \ } else { \ blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \ } \ } while (0) static void tg_stats_init(struct tg_stats_cpu *tg_stats) { blkg_rwstat_init(&tg_stats->service_bytes); blkg_rwstat_init(&tg_stats->serviced); } /* * Worker for allocating per cpu stat for tgs. This is scheduled on the * system_wq once there are some groups on the alloc_list waiting for * allocation. */ static void tg_stats_alloc_fn(struct work_struct *work) { static struct tg_stats_cpu *stats_cpu; /* this fn is non-reentrant */ struct delayed_work *dwork = to_delayed_work(work); bool empty = false; alloc_stats: if (!stats_cpu) { int cpu; stats_cpu = alloc_percpu(struct tg_stats_cpu); if (!stats_cpu) { /* allocation failed, try again after some time */ schedule_delayed_work(dwork, msecs_to_jiffies(10)); return; } for_each_possible_cpu(cpu) tg_stats_init(per_cpu_ptr(stats_cpu, cpu)); } spin_lock_irq(&tg_stats_alloc_lock); if (!list_empty(&tg_stats_alloc_list)) { struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list, struct throtl_grp, stats_alloc_node); swap(tg->stats_cpu, stats_cpu); list_del_init(&tg->stats_alloc_node); } empty = list_empty(&tg_stats_alloc_list); spin_unlock_irq(&tg_stats_alloc_lock); if (!empty) goto alloc_stats; } static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) { INIT_LIST_HEAD(&qn->node); bio_list_init(&qn->bios); qn->tg = tg; } /** * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it * @bio: bio being added * @qn: qnode to add bio to * @queued: the service_queue->queued[] list @qn belongs to * * Add @bio to @qn and put @qn on @queued if it's not already on. * @qn->tg's reference count is bumped when @qn is activated. See the * comment on top of throtl_qnode definition for details. */ static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, struct list_head *queued) { bio_list_add(&qn->bios, bio); if (list_empty(&qn->node)) { list_add_tail(&qn->node, queued); blkg_get(tg_to_blkg(qn->tg)); } } /** * throtl_peek_queued - peek the first bio on a qnode list * @queued: the qnode list to peek */ static struct bio *throtl_peek_queued(struct list_head *queued) { struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); struct bio *bio; if (list_empty(queued)) return NULL; bio = bio_list_peek(&qn->bios); WARN_ON_ONCE(!bio); return bio; } /** * throtl_pop_queued - pop the first bio form a qnode list * @queued: the qnode list to pop a bio from * @tg_to_put: optional out argument for throtl_grp to put * * Pop the first bio from the qnode list @queued. After popping, the first * qnode is removed from @queued if empty or moved to the end of @queued so * that the popping order is round-robin. * * When the first qnode is removed, its associated throtl_grp should be put * too. If @tg_to_put is NULL, this function automatically puts it; * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is * responsible for putting it. */ static struct bio *throtl_pop_queued(struct list_head *queued, struct throtl_grp **tg_to_put) { struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); struct bio *bio; if (list_empty(queued)) return NULL; bio = bio_list_pop(&qn->bios); WARN_ON_ONCE(!bio); if (bio_list_empty(&qn->bios)) { list_del_init(&qn->node); if (tg_to_put) *tg_to_put = qn->tg; else blkg_put(tg_to_blkg(qn->tg)); } else { list_move_tail(&qn->node, queued); } return bio; } /* init a service_queue, assumes the caller zeroed it */ static void throtl_service_queue_init(struct throtl_service_queue *sq, struct throtl_service_queue *parent_sq) { INIT_LIST_HEAD(&sq->queued[0]); INIT_LIST_HEAD(&sq->queued[1]); sq->pending_tree = RB_ROOT; sq->parent_sq = parent_sq; setup_timer(&sq->pending_timer, throtl_pending_timer_fn, (unsigned long)sq); } static void throtl_service_queue_exit(struct throtl_service_queue *sq) { del_timer_sync(&sq->pending_timer); } static void throtl_pd_init(struct blkcg_gq *blkg) { struct throtl_grp *tg = blkg_to_tg(blkg); struct throtl_data *td = blkg->q->td; struct throtl_service_queue *parent_sq; unsigned long flags; int rw; /* * If on the default hierarchy, we switch to properly hierarchical * behavior where limits on a given throtl_grp are applied to the * whole subtree rather than just the group itself. e.g. If 16M * read_bps limit is set on the root group, the whole system can't * exceed 16M for the device. * * If not on the default hierarchy, the broken flat hierarchy * behavior is retained where all throtl_grps are treated as if * they're all separate root groups right below throtl_data. * Limits of a group don't interact with limits of other groups * regardless of the position of the group in the hierarchy. */ parent_sq = &td->service_queue; if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent) parent_sq = &blkg_to_tg(blkg->parent)->service_queue; throtl_service_queue_init(&tg->service_queue, parent_sq); for (rw = READ; rw <= WRITE; rw++) { throtl_qnode_init(&tg->qnode_on_self[rw], tg); throtl_qnode_init(&tg->qnode_on_parent[rw], tg); } RB_CLEAR_NODE(&tg->rb_node); tg->td = td; tg->bps[READ] = -1; tg->bps[WRITE] = -1; tg->iops[READ] = -1; tg->iops[WRITE] = -1; /* * Ugh... We need to perform per-cpu allocation for tg->stats_cpu * but percpu allocator can't be called from IO path. Queue tg on * tg_stats_alloc_list and allocate from work item. */ spin_lock_irqsave(&tg_stats_alloc_lock, flags); list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); schedule_delayed_work(&tg_stats_alloc_work, 0); spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); } /* * Set has_rules[] if @tg or any of its parents have limits configured. * This doesn't require walking up to the top of the hierarchy as the * parent's has_rules[] is guaranteed to be correct. */ static void tg_update_has_rules(struct throtl_grp *tg) { struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); int rw; for (rw = READ; rw <= WRITE; rw++) tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) || (tg->bps[rw] != -1 || tg->iops[rw] != -1); } static void throtl_pd_online(struct blkcg_gq *blkg) { /* * We don't want new groups to escape the limits of its ancestors. * Update has_rules[] after a new group is brought online. */ tg_update_has_rules(blkg_to_tg(blkg)); } static void throtl_pd_exit(struct blkcg_gq *blkg) { struct throtl_grp *tg = blkg_to_tg(blkg); unsigned long flags; spin_lock_irqsave(&tg_stats_alloc_lock, flags); list_del_init(&tg->stats_alloc_node); spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); free_percpu(tg->stats_cpu); throtl_service_queue_exit(&tg->service_queue); } static void throtl_pd_reset_stats(struct blkcg_gq *blkg) { struct throtl_grp *tg = blkg_to_tg(blkg); int cpu; if (tg->stats_cpu == NULL) return; for_each_possible_cpu(cpu) { struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); blkg_rwstat_reset(&sc->service_bytes); blkg_rwstat_reset(&sc->serviced); } } static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkcg *blkcg) { /* * This is the common case when there are no blkcgs. Avoid lookup * in this case */ if (blkcg == &blkcg_root) return td_root_tg(td); return blkg_to_tg(blkg_lookup(blkcg, td->queue)); } static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, struct blkcg *blkcg) { struct request_queue *q = td->queue; struct throtl_grp *tg = NULL; /* * This is the common case when there are no blkcgs. Avoid lookup * in this case */ if (blkcg == &blkcg_root) { tg = td_root_tg(td); } else { struct blkcg_gq *blkg; blkg = blkg_lookup_create(blkcg, q); /* if %NULL and @q is alive, fall back to root_tg */ if (!IS_ERR(blkg)) tg = blkg_to_tg(blkg); else if (!blk_queue_dying(q)) tg = td_root_tg(td); } return tg; } static struct throtl_grp * throtl_rb_first(struct throtl_service_queue *parent_sq) { /* Service tree is empty */ if (!parent_sq->nr_pending) return NULL; if (!parent_sq->first_pending) parent_sq->first_pending = rb_first(&parent_sq->pending_tree); if (parent_sq->first_pending) return rb_entry_tg(parent_sq->first_pending); return NULL; } static void rb_erase_init(struct rb_node *n, struct rb_root *root) { rb_erase(n, root); RB_CLEAR_NODE(n); } static void throtl_rb_erase(struct rb_node *n, struct throtl_service_queue *parent_sq) { if (parent_sq->first_pending == n) parent_sq->first_pending = NULL; rb_erase_init(n, &parent_sq->pending_tree); --parent_sq->nr_pending; } static void update_min_dispatch_time(struct throtl_service_queue *parent_sq) { struct throtl_grp *tg; tg = throtl_rb_first(parent_sq); if (!tg) return; parent_sq->first_pending_disptime = tg->disptime; } static void tg_service_queue_add(struct throtl_grp *tg) { struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; struct rb_node **node = &parent_sq->pending_tree.rb_node; struct rb_node *parent = NULL; struct throtl_grp *__tg; unsigned long key = tg->disptime; int left = 1; while (*node != NULL) { parent = *node; __tg = rb_entry_tg(parent); if (time_before(key, __tg->disptime)) node = &parent->rb_left; else { node = &parent->rb_right; left = 0; } } if (left) parent_sq->first_pending = &tg->rb_node; rb_link_node(&tg->rb_node, parent, node); rb_insert_color(&tg->rb_node, &parent_sq->pending_tree); } static void __throtl_enqueue_tg(struct throtl_grp *tg) { tg_service_queue_add(tg); tg->flags |= THROTL_TG_PENDING; tg->service_queue.parent_sq->nr_pending++; } static void throtl_enqueue_tg(struct throtl_grp *tg) { if (!(tg->flags & THROTL_TG_PENDING)) __throtl_enqueue_tg(tg); } static void __throtl_dequeue_tg(struct throtl_grp *tg) { throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); tg->flags &= ~THROTL_TG_PENDING; } static void throtl_dequeue_tg(struct throtl_grp *tg) { if (tg->flags & THROTL_TG_PENDING) __throtl_dequeue_tg(tg); } /* Call with queue lock held */ static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, unsigned long expires) { mod_timer(&sq->pending_timer, expires); throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu", expires - jiffies, jiffies); } /** * throtl_schedule_next_dispatch - schedule the next dispatch cycle * @sq: the service_queue to schedule dispatch for * @force: force scheduling * * Arm @sq->pending_timer so that the next dispatch cycle starts on the * dispatch time of the first pending child. Returns %true if either timer * is armed or there's no pending child left. %false if the current * dispatch window is still open and the caller should continue * dispatching. * * If @force is %true, the dispatch timer is always scheduled and this * function is guaranteed to return %true. This is to be used when the * caller can't dispatch itself and needs to invoke pending_timer * unconditionally. Note that forced scheduling is likely to induce short * delay before dispatch starts even if @sq->first_pending_disptime is not * in the future and thus shouldn't be used in hot paths. */ static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq, bool force) { /* any pending children left? */ if (!sq->nr_pending) return true; update_min_dispatch_time(sq); /* is the next dispatch time in the future? */ if (force || time_after(sq->first_pending_disptime, jiffies)) { throtl_schedule_pending_timer(sq, sq->first_pending_disptime); return true; } /* tell the caller to continue dispatching */ return false; } static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg, bool rw, unsigned long start) { tg->bytes_disp[rw] = 0; tg->io_disp[rw] = 0; /* * Previous slice has expired. We must have trimmed it after last * bio dispatch. That means since start of last slice, we never used * that bandwidth. Do try to make use of that bandwidth while giving * credit. */ if (time_after_eq(start, tg->slice_start[rw])) tg->slice_start[rw] = start; tg->slice_end[rw] = jiffies + throtl_slice; throtl_log(&tg->service_queue, "[%c] new slice with credit start=%lu end=%lu jiffies=%lu", rw == READ ? 'R' : 'W', tg->slice_start[rw], tg->slice_end[rw], jiffies); } static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) { tg->bytes_disp[rw] = 0; tg->io_disp[rw] = 0; tg->slice_start[rw] = jiffies; tg->slice_end[rw] = jiffies + throtl_slice; throtl_log(&tg->service_queue, "[%c] new slice start=%lu end=%lu jiffies=%lu", rw == READ ? 'R' : 'W', tg->slice_start[rw], tg->slice_end[rw], jiffies); } static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, unsigned long jiffy_end) { tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); } static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, unsigned long jiffy_end) { tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); throtl_log(&tg->service_queue, "[%c] extend slice start=%lu end=%lu jiffies=%lu", rw == READ ? 'R' : 'W', tg->slice_start[rw], tg->slice_end[rw], jiffies); } /* Determine if previously allocated or extended slice is complete or not */ static bool throtl_slice_used(struct throtl_grp *tg, bool rw) { if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) return false; return 1; } /* Trim the used slices and adjust slice start accordingly */ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) { unsigned long nr_slices, time_elapsed, io_trim; u64 bytes_trim, tmp; BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); /* * If bps are unlimited (-1), then time slice don't get * renewed. Don't try to trim the slice if slice is used. A new * slice will start when appropriate. */ if (throtl_slice_used(tg, rw)) return; /* * A bio has been dispatched. Also adjust slice_end. It might happen * that initially cgroup limit was very low resulting in high * slice_end, but later limit was bumped up and bio was dispached * sooner, then we need to reduce slice_end. A high bogus slice_end * is bad because it does not allow new slice to start. */ throtl_set_slice_end(tg, rw, jiffies + throtl_slice); time_elapsed = jiffies - tg->slice_start[rw]; nr_slices = time_elapsed / throtl_slice; if (!nr_slices) return; tmp = tg->bps[rw] * throtl_slice * nr_slices; do_div(tmp, HZ); bytes_trim = tmp; io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ; if (!bytes_trim && !io_trim) return; if (tg->bytes_disp[rw] >= bytes_trim) tg->bytes_disp[rw] -= bytes_trim; else tg->bytes_disp[rw] = 0; if (tg->io_disp[rw] >= io_trim) tg->io_disp[rw] -= io_trim; else tg->io_disp[rw] = 0; tg->slice_start[rw] += nr_slices * throtl_slice; throtl_log(&tg->service_queue, "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu", rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw], jiffies); } static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, unsigned long *wait) { bool rw = bio_data_dir(bio); unsigned int io_allowed; unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; u64 tmp; jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; /* Slice has just started. Consider one slice interval */ if (!jiffy_elapsed) jiffy_elapsed_rnd = throtl_slice; jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice); /* * jiffy_elapsed_rnd should not be a big value as minimum iops can be * 1 then at max jiffy elapsed should be equivalent of 1 second as we * will allow dispatch after 1 second and after that slice should * have been trimmed. */ tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd; do_div(tmp, HZ); if (tmp > UINT_MAX) io_allowed = UINT_MAX; else io_allowed = tmp; if (tg->io_disp[rw] + 1 <= io_allowed) { if (wait) *wait = 0; return true; } /* Calc approx time to dispatch */ jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1; if (jiffy_wait > jiffy_elapsed) jiffy_wait = jiffy_wait - jiffy_elapsed; else jiffy_wait = 1; if (wait) *wait = jiffy_wait; return 0; } static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, unsigned long *wait) { bool rw = bio_data_dir(bio); u64 bytes_allowed, extra_bytes, tmp; unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; /* Slice has just started. Consider one slice interval */ if (!jiffy_elapsed) jiffy_elapsed_rnd = throtl_slice; jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice); tmp = tg->bps[rw] * jiffy_elapsed_rnd; do_div(tmp, HZ); bytes_allowed = tmp; if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { if (wait) *wait = 0; return true; } /* Calc approx time to dispatch */ extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); if (!jiffy_wait) jiffy_wait = 1; /* * This wait time is without taking into consideration the rounding * up we did. Add that time also. */ jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed); if (wait) *wait = jiffy_wait; return 0; } /* * Returns whether one can dispatch a bio or not. Also returns approx number * of jiffies to wait before this bio is with-in IO rate and can be dispatched */ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, unsigned long *wait) { bool rw = bio_data_dir(bio); unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0; /* * Currently whole state machine of group depends on first bio * queued in the group bio list. So one should not be calling * this function with a different bio if there are other bios * queued. */ BUG_ON(tg->service_queue.nr_queued[rw] && bio != throtl_peek_queued(&tg->service_queue.queued[rw])); /* If tg->bps = -1, then BW is unlimited */ if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { if (wait) *wait = 0; return true; } /* * If previous slice expired, start a new one otherwise renew/extend * existing slice to make sure it is at least throtl_slice interval * long since now. */ if (throtl_slice_used(tg, rw)) throtl_start_new_slice(tg, rw); else { if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) throtl_extend_slice(tg, rw, jiffies + throtl_slice); } if (tg_with_in_bps_limit(tg, bio, &bps_wait) && tg_with_in_iops_limit(tg, bio, &iops_wait)) { if (wait) *wait = 0; return 1; } max_wait = max(bps_wait, iops_wait); if (wait) *wait = max_wait; if (time_before(tg->slice_end[rw], jiffies + max_wait)) throtl_extend_slice(tg, rw, jiffies + max_wait); return 0; } static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes, int rw) { struct throtl_grp *tg = blkg_to_tg(blkg); struct tg_stats_cpu *stats_cpu; unsigned long flags; /* If per cpu stats are not allocated yet, don't do any accounting. */ if (tg->stats_cpu == NULL) return; /* * Disabling interrupts to provide mutual exclusion between two * writes on same cpu. It probably is not needed for 64bit. Not * optimizing that case yet. */ local_irq_save(flags); stats_cpu = this_cpu_ptr(tg->stats_cpu); blkg_rwstat_add(&stats_cpu->serviced, rw, 1); blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes); local_irq_restore(flags); } static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) { bool rw = bio_data_dir(bio); /* Charge the bio to the group */ tg->bytes_disp[rw] += bio->bi_iter.bi_size; tg->io_disp[rw]++; /* * REQ_THROTTLED is used to prevent the same bio to be throttled * more than once as a throttled bio will go through blk-throtl the * second time when it eventually gets issued. Set it when a bio * is being charged to a tg. * * Dispatch stats aren't recursive and each @bio should only be * accounted by the @tg it was originally associated with. Let's * update the stats when setting REQ_THROTTLED for the first time * which is guaranteed to be for the @bio's original tg. */ if (!(bio->bi_rw & REQ_THROTTLED)) { bio->bi_rw |= REQ_THROTTLED; throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_iter.bi_size, bio->bi_rw); } } /** * throtl_add_bio_tg - add a bio to the specified throtl_grp * @bio: bio to add * @qn: qnode to use * @tg: the target throtl_grp * * Add @bio to @tg's service_queue using @qn. If @qn is not specified, * tg->qnode_on_self[] is used. */ static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, struct throtl_grp *tg) { struct throtl_service_queue *sq = &tg->service_queue; bool rw = bio_data_dir(bio); if (!qn) qn = &tg->qnode_on_self[rw]; /* * If @tg doesn't currently have any bios queued in the same * direction, queueing @bio can change when @tg should be * dispatched. Mark that @tg was empty. This is automatically * cleaered on the next tg_update_disptime(). */ if (!sq->nr_queued[rw]) tg->flags |= THROTL_TG_WAS_EMPTY; throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); sq->nr_queued[rw]++; throtl_enqueue_tg(tg); } static void tg_update_disptime(struct throtl_grp *tg) { struct throtl_service_queue *sq = &tg->service_queue; unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; struct bio *bio; if ((bio = throtl_peek_queued(&sq->queued[READ]))) tg_may_dispatch(tg, bio, &read_wait); if ((bio = throtl_peek_queued(&sq->queued[WRITE]))) tg_may_dispatch(tg, bio, &write_wait); min_wait = min(read_wait, write_wait); disptime = jiffies + min_wait; /* Update dispatch time */ throtl_dequeue_tg(tg); tg->disptime = disptime; throtl_enqueue_tg(tg); /* see throtl_add_bio_tg() */ tg->flags &= ~THROTL_TG_WAS_EMPTY; } static void start_parent_slice_with_credit(struct throtl_grp *child_tg, struct throtl_grp *parent_tg, bool rw) { if (throtl_slice_used(parent_tg, rw)) { throtl_start_new_slice_with_credit(parent_tg, rw, child_tg->slice_start[rw]); } } static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) { struct throtl_service_queue *sq = &tg->service_queue; struct throtl_service_queue *parent_sq = sq->parent_sq; struct throtl_grp *parent_tg = sq_to_tg(parent_sq); struct throtl_grp *tg_to_put = NULL; struct bio *bio; /* * @bio is being transferred from @tg to @parent_sq. Popping a bio * from @tg may put its reference and @parent_sq might end up * getting released prematurely. Remember the tg to put and put it * after @bio is transferred to @parent_sq. */ bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); sq->nr_queued[rw]--; throtl_charge_bio(tg, bio); /* * If our parent is another tg, we just need to transfer @bio to * the parent using throtl_add_bio_tg(). If our parent is * @td->service_queue, @bio is ready to be issued. Put it on its * bio_lists[] and decrease total number queued. The caller is * responsible for issuing these bios. */ if (parent_tg) { throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg); start_parent_slice_with_credit(tg, parent_tg, rw); } else { throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw], &parent_sq->queued[rw]); BUG_ON(tg->td->nr_queued[rw] <= 0); tg->td->nr_queued[rw]--; } throtl_trim_slice(tg, rw); if (tg_to_put) blkg_put(tg_to_blkg(tg_to_put)); } static int throtl_dispatch_tg(struct throtl_grp *tg) { struct throtl_service_queue *sq = &tg->service_queue; unsigned int nr_reads = 0, nr_writes = 0; unsigned int max_nr_reads = throtl_grp_quantum*3/4; unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads; struct bio *bio; /* Try to dispatch 75% READS and 25% WRITES */ while ((bio = throtl_peek_queued(&sq->queued[READ])) && tg_may_dispatch(tg, bio, NULL)) { tg_dispatch_one_bio(tg, bio_data_dir(bio)); nr_reads++; if (nr_reads >= max_nr_reads) break; } while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && tg_may_dispatch(tg, bio, NULL)) { tg_dispatch_one_bio(tg, bio_data_dir(bio)); nr_writes++; if (nr_writes >= max_nr_writes) break; } return nr_reads + nr_writes; } static int throtl_select_dispatch(struct throtl_service_queue *parent_sq) { unsigned int nr_disp = 0; while (1) { struct throtl_grp *tg = throtl_rb_first(parent_sq); struct throtl_service_queue *sq = &tg->service_queue; if (!tg) break; if (time_before(jiffies, tg->disptime)) break; throtl_dequeue_tg(tg); nr_disp += throtl_dispatch_tg(tg); if (sq->nr_queued[0] || sq->nr_queued[1]) tg_update_disptime(tg); if (nr_disp >= throtl_quantum) break; } return nr_disp; } /** * throtl_pending_timer_fn - timer function for service_queue->pending_timer * @arg: the throtl_service_queue being serviced * * This timer is armed when a child throtl_grp with active bio's become * pending and queued on the service_queue's pending_tree and expires when * the first child throtl_grp should be dispatched. This function * dispatches bio's from the children throtl_grps to the parent * service_queue. * * If the parent's parent is another throtl_grp, dispatching is propagated * by either arming its pending_timer or repeating dispatch directly. If * the top-level service_tree is reached, throtl_data->dispatch_work is * kicked so that the ready bio's are issued. */ static void throtl_pending_timer_fn(unsigned long arg) { struct throtl_service_queue *sq = (void *)arg; struct throtl_grp *tg = sq_to_tg(sq); struct throtl_data *td = sq_to_td(sq); struct request_queue *q = td->queue; struct throtl_service_queue *parent_sq; bool dispatched; int ret; spin_lock_irq(q->queue_lock); again: parent_sq = sq->parent_sq; dispatched = false; while (true) { throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u", sq->nr_queued[READ] + sq->nr_queued[WRITE], sq->nr_queued[READ], sq->nr_queued[WRITE]); ret = throtl_select_dispatch(sq); if (ret) { throtl_log(sq, "bios disp=%u", ret); dispatched = true; } if (throtl_schedule_next_dispatch(sq, false)) break; /* this dispatch windows is still open, relax and repeat */ spin_unlock_irq(q->queue_lock); cpu_relax(); spin_lock_irq(q->queue_lock); } if (!dispatched) goto out_unlock; if (parent_sq) { /* @parent_sq is another throl_grp, propagate dispatch */ if (tg->flags & THROTL_TG_WAS_EMPTY) { tg_update_disptime(tg); if (!throtl_schedule_next_dispatch(parent_sq, false)) { /* window is already open, repeat dispatching */ sq = parent_sq; tg = sq_to_tg(sq); goto again; } } } else { /* reached the top-level, queue issueing */ queue_work(kthrotld_workqueue, &td->dispatch_work); } out_unlock: spin_unlock_irq(q->queue_lock); } /** * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work * @work: work item being executed * * This function is queued for execution when bio's reach the bio_lists[] * of throtl_data->service_queue. Those bio's are ready and issued by this * function. */ static void blk_throtl_dispatch_work_fn(struct work_struct *work) { struct throtl_data *td = container_of(work, struct throtl_data, dispatch_work); struct throtl_service_queue *td_sq = &td->service_queue; struct request_queue *q = td->queue; struct bio_list bio_list_on_stack; struct bio *bio; struct blk_plug plug; int rw; bio_list_init(&bio_list_on_stack); spin_lock_irq(q->queue_lock); for (rw = READ; rw <= WRITE; rw++) while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) bio_list_add(&bio_list_on_stack, bio); spin_unlock_irq(q->queue_lock); if (!bio_list_empty(&bio_list_on_stack)) { blk_start_plug(&plug); while((bio = bio_list_pop(&bio_list_on_stack))) generic_make_request(bio); blk_finish_plug(&plug); } } static u64 tg_prfill_cpu_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct throtl_grp *tg = pd_to_tg(pd); struct blkg_rwstat rwstat = { }, tmp; int i, cpu; for_each_possible_cpu(cpu) { struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); tmp = blkg_rwstat_read((void *)sc + off); for (i = 0; i < BLKG_RWSTAT_NR; i++) rwstat.cnt[i] += tmp.cnt[i]; } return __blkg_prfill_rwstat(sf, pd, &rwstat); } static int tg_print_cpu_rwstat(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_cpu_rwstat, &blkcg_policy_throtl, seq_cft(sf)->private, true); return 0; } static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct throtl_grp *tg = pd_to_tg(pd); u64 v = *(u64 *)((void *)tg + off); if (v == -1) return 0; return __blkg_prfill_u64(sf, pd, v); } static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd, int off) { struct throtl_grp *tg = pd_to_tg(pd); unsigned int v = *(unsigned int *)((void *)tg + off); if (v == -1) return 0; return __blkg_prfill_u64(sf, pd, v); } static int tg_print_conf_u64(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64, &blkcg_policy_throtl, seq_cft(sf)->private, false); return 0; } static int tg_print_conf_uint(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint, &blkcg_policy_throtl, seq_cft(sf)->private, false); return 0; } static ssize_t tg_set_conf(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off, bool is_u64) { struct blkcg *blkcg = css_to_blkcg(of_css(of)); struct blkg_conf_ctx ctx; struct throtl_grp *tg; struct throtl_service_queue *sq; struct blkcg_gq *blkg; struct cgroup_subsys_state *pos_css; int ret; ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); if (ret) return ret; tg = blkg_to_tg(ctx.blkg); sq = &tg->service_queue; if (!ctx.v) ctx.v = -1; if (is_u64) *(u64 *)((void *)tg + of_cft(of)->private) = ctx.v; else *(unsigned int *)((void *)tg + of_cft(of)->private) = ctx.v; throtl_log(&tg->service_queue, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE], tg->iops[READ], tg->iops[WRITE]); /* * Update has_rules[] flags for the updated tg's subtree. A tg is * considered to have rules if either the tg itself or any of its * ancestors has rules. This identifies groups without any * restrictions in the whole hierarchy and allows them to bypass * blk-throttle. */ blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg) tg_update_has_rules(blkg_to_tg(blkg)); /* * We're already holding queue_lock and know @tg is valid. Let's * apply the new config directly. * * Restart the slices for both READ and WRITES. It might happen * that a group's limit are dropped suddenly and we don't want to * account recently dispatched IO with new low rate. */ throtl_start_new_slice(tg, 0); throtl_start_new_slice(tg, 1); if (tg->flags & THROTL_TG_PENDING) { tg_update_disptime(tg); throtl_schedule_next_dispatch(sq->parent_sq, true); } blkg_conf_finish(&ctx); return nbytes; } static ssize_t tg_set_conf_u64(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { return tg_set_conf(of, buf, nbytes, off, true); } static ssize_t tg_set_conf_uint(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { return tg_set_conf(of, buf, nbytes, off, false); } static struct cftype throtl_files[] = { { .name = "throttle.read_bps_device", .private = offsetof(struct throtl_grp, bps[READ]), .seq_show = tg_print_conf_u64, .write = tg_set_conf_u64, }, { .name = "throttle.write_bps_device", .private = offsetof(struct throtl_grp, bps[WRITE]), .seq_show = tg_print_conf_u64, .write = tg_set_conf_u64, }, { .name = "throttle.read_iops_device", .private = offsetof(struct throtl_grp, iops[READ]), .seq_show = tg_print_conf_uint, .write = tg_set_conf_uint, }, { .name = "throttle.write_iops_device", .private = offsetof(struct throtl_grp, iops[WRITE]), .seq_show = tg_print_conf_uint, .write = tg_set_conf_uint, }, { .name = "throttle.io_service_bytes", .private = offsetof(struct tg_stats_cpu, service_bytes), .seq_show = tg_print_cpu_rwstat, }, { .name = "throttle.io_serviced", .private = offsetof(struct tg_stats_cpu, serviced), .seq_show = tg_print_cpu_rwstat, }, { } /* terminate */ }; static void throtl_shutdown_wq(struct request_queue *q) { struct throtl_data *td = q->td; cancel_work_sync(&td->dispatch_work); } static struct blkcg_policy blkcg_policy_throtl = { .pd_size = sizeof(struct throtl_grp), .cftypes = throtl_files, .pd_init_fn = throtl_pd_init, .pd_online_fn = throtl_pd_online, .pd_exit_fn = throtl_pd_exit, .pd_reset_stats_fn = throtl_pd_reset_stats, }; bool blk_throtl_bio(struct request_queue *q, struct bio *bio) { struct throtl_data *td = q->td; struct throtl_qnode *qn = NULL; struct throtl_grp *tg; struct throtl_service_queue *sq; bool rw = bio_data_dir(bio); struct blkcg *blkcg; bool throttled = false; /* see throtl_charge_bio() */ if (bio->bi_rw & REQ_THROTTLED) goto out; /* * A throtl_grp pointer retrieved under rcu can be used to access * basic fields like stats and io rates. If a group has no rules, * just update the dispatch stats in lockless manner and return. */ rcu_read_lock(); blkcg = bio_blkcg(bio); tg = throtl_lookup_tg(td, blkcg); if (tg) { if (!tg->has_rules[rw]) { throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_iter.bi_size, bio->bi_rw); goto out_unlock_rcu; } } /* * Either group has not been allocated yet or it is not an unlimited * IO group */ spin_lock_irq(q->queue_lock); tg = throtl_lookup_create_tg(td, blkcg); if (unlikely(!tg)) goto out_unlock; sq = &tg->service_queue; while (true) { /* throtl is FIFO - if bios are already queued, should queue */ if (sq->nr_queued[rw]) break; /* if above limits, break to queue */ if (!tg_may_dispatch(tg, bio, NULL)) break; /* within limits, let's charge and dispatch directly */ throtl_charge_bio(tg, bio); /* * We need to trim slice even when bios are not being queued * otherwise it might happen that a bio is not queued for * a long time and slice keeps on extending and trim is not * called for a long time. Now if limits are reduced suddenly * we take into account all the IO dispatched so far at new * low rate and * newly queued IO gets a really long dispatch * time. * * So keep on trimming slice even if bio is not queued. */ throtl_trim_slice(tg, rw); /* * @bio passed through this layer without being throttled. * Climb up the ladder. If we''re already at the top, it * can be executed directly. */ qn = &tg->qnode_on_parent[rw]; sq = sq->parent_sq; tg = sq_to_tg(sq); if (!tg) goto out_unlock; } /* out-of-limit, queue to @tg */ throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", rw == READ ? 'R' : 'W', tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw], tg->io_disp[rw], tg->iops[rw], sq->nr_queued[READ], sq->nr_queued[WRITE]); bio_associate_current(bio); tg->td->nr_queued[rw]++; throtl_add_bio_tg(bio, qn, tg); throttled = true; /* * Update @tg's dispatch time and force schedule dispatch if @tg * was empty before @bio. The forced scheduling isn't likely to * cause undue delay as @bio is likely to be dispatched directly if * its @tg's disptime is not in the future. */ if (tg->flags & THROTL_TG_WAS_EMPTY) { tg_update_disptime(tg); throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); } out_unlock: spin_unlock_irq(q->queue_lock); out_unlock_rcu: rcu_read_unlock(); out: /* * As multiple blk-throtls may stack in the same issue path, we * don't want bios to leave with the flag set. Clear the flag if * being issued. */ if (!throttled) bio->bi_rw &= ~REQ_THROTTLED; return throttled; } /* * Dispatch all bios from all children tg's queued on @parent_sq. On * return, @parent_sq is guaranteed to not have any active children tg's * and all bios from previously active tg's are on @parent_sq->bio_lists[]. */ static void tg_drain_bios(struct throtl_service_queue *parent_sq) { struct throtl_grp *tg; while ((tg = throtl_rb_first(parent_sq))) { struct throtl_service_queue *sq = &tg->service_queue; struct bio *bio; throtl_dequeue_tg(tg); while ((bio = throtl_peek_queued(&sq->queued[READ]))) tg_dispatch_one_bio(tg, bio_data_dir(bio)); while ((bio = throtl_peek_queued(&sq->queued[WRITE]))) tg_dispatch_one_bio(tg, bio_data_dir(bio)); } } /** * blk_throtl_drain - drain throttled bios * @q: request_queue to drain throttled bios for * * Dispatch all currently throttled bios on @q through ->make_request_fn(). */ void blk_throtl_drain(struct request_queue *q) __releases(q->queue_lock) __acquires(q->queue_lock) { struct throtl_data *td = q->td; struct blkcg_gq *blkg; struct cgroup_subsys_state *pos_css; struct bio *bio; int rw; queue_lockdep_assert_held(q); rcu_read_lock(); /* * Drain each tg while doing post-order walk on the blkg tree, so * that all bios are propagated to td->service_queue. It'd be * better to walk service_queue tree directly but blkg walk is * easier. */ blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) tg_drain_bios(&blkg_to_tg(blkg)->service_queue); /* finally, transfer bios from top-level tg's into the td */ tg_drain_bios(&td->service_queue); rcu_read_unlock(); spin_unlock_irq(q->queue_lock); /* all bios now should be in td->service_queue, issue them */ for (rw = READ; rw <= WRITE; rw++) while ((bio = throtl_pop_queued(&td->service_queue.queued[rw], NULL))) generic_make_request(bio); spin_lock_irq(q->queue_lock); } int blk_throtl_init(struct request_queue *q) { struct throtl_data *td; int ret; td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); if (!td) return -ENOMEM; INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); throtl_service_queue_init(&td->service_queue, NULL); q->td = td; td->queue = q; /* activate policy */ ret = blkcg_activate_policy(q, &blkcg_policy_throtl); if (ret) kfree(td); return ret; } void blk_throtl_exit(struct request_queue *q) { BUG_ON(!q->td); throtl_shutdown_wq(q); blkcg_deactivate_policy(q, &blkcg_policy_throtl); kfree(q->td); } static int __init throtl_init(void) { kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); if (!kthrotld_workqueue) panic("Failed to create kthrotld\n"); return blkcg_policy_register(&blkcg_policy_throtl); } module_init(throtl_init);
gpl-2.0
alianmohammad/linux-kernel-3.18-hacks
drivers/net/ethernet/sfc/mcdi.c
515
51937
/**************************************************************************** * Driver for Solarflare network controllers and boards * Copyright 2008-2013 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/delay.h> #include <asm/cmpxchg.h> #include "net_driver.h" #include "nic.h" #include "io.h" #include "farch_regs.h" #include "mcdi_pcol.h" #include "phy.h" /************************************************************************** * * Management-Controller-to-Driver Interface * ************************************************************************** */ #define MCDI_RPC_TIMEOUT (10 * HZ) /* A reboot/assertion causes the MCDI status word to be set after the * command word is set or a REBOOT event is sent. If we notice a reboot * via these mechanisms then wait 250ms for the status word to be set. */ #define MCDI_STATUS_DELAY_US 100 #define MCDI_STATUS_DELAY_COUNT 2500 #define MCDI_STATUS_SLEEP_MS \ (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) #define SEQ_MASK \ EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) struct efx_mcdi_async_param { struct list_head list; unsigned int cmd; size_t inlen; size_t outlen; bool quiet; efx_mcdi_async_completer *complete; unsigned long cookie; /* followed by request/response buffer */ }; static void efx_mcdi_timeout_async(unsigned long context); static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, bool *was_attached_out); static bool efx_mcdi_poll_once(struct efx_nic *efx); static void efx_mcdi_abandon(struct efx_nic *efx); int efx_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; bool already_attached; int rc; efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); if (!efx->mcdi) return -ENOMEM; mcdi = efx_mcdi(efx); mcdi->efx = efx; init_waitqueue_head(&mcdi->wq); spin_lock_init(&mcdi->iface_lock); mcdi->state = MCDI_STATE_QUIESCENT; mcdi->mode = MCDI_MODE_POLL; spin_lock_init(&mcdi->async_lock); INIT_LIST_HEAD(&mcdi->async_list); setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async, (unsigned long)mcdi); (void) efx_mcdi_poll_reboot(efx); mcdi->new_epoch = true; /* Recover from a failed assertion before probing */ rc = efx_mcdi_handle_assertion(efx); if (rc) return rc; /* Let the MC (and BMC, if this is a LOM) know that the driver * is loaded. We should do this before we reset the NIC. */ rc = efx_mcdi_drv_attach(efx, true, &already_attached); if (rc) { netif_err(efx, probe, efx->net_dev, "Unable to register driver with MCPU\n"); return rc; } if (already_attached) /* Not a fatal error */ netif_err(efx, probe, efx->net_dev, "Host already registered with MCPU\n"); if (efx->mcdi->fn_flags & (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) efx->primary = efx; return 0; } void efx_mcdi_fini(struct efx_nic *efx) { if (!efx->mcdi) return; BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT); /* Relinquish the device (back to the BMC, if this is a LOM) */ efx_mcdi_drv_attach(efx, false, NULL); kfree(efx->mcdi); } static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); efx_dword_t hdr[2]; size_t hdr_len; u32 xflags, seqno; BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT); /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ spin_lock_bh(&mcdi->iface_lock); ++mcdi->seqno; spin_unlock_bh(&mcdi->iface_lock); seqno = mcdi->seqno & SEQ_MASK; xflags = 0; if (mcdi->mode == MCDI_MODE_EVENTS) xflags |= MCDI_HEADER_XFLAGS_EVREQ; if (efx->type->mcdi_max_ver == 1) { /* MCDI v1 */ EFX_POPULATE_DWORD_7(hdr[0], MCDI_HEADER_RESPONSE, 0, MCDI_HEADER_RESYNC, 1, MCDI_HEADER_CODE, cmd, MCDI_HEADER_DATALEN, inlen, MCDI_HEADER_SEQ, seqno, MCDI_HEADER_XFLAGS, xflags, MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); hdr_len = 4; } else { /* MCDI v2 */ BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2); EFX_POPULATE_DWORD_7(hdr[0], MCDI_HEADER_RESPONSE, 0, MCDI_HEADER_RESYNC, 1, MCDI_HEADER_CODE, MC_CMD_V2_EXTN, MCDI_HEADER_DATALEN, 0, MCDI_HEADER_SEQ, seqno, MCDI_HEADER_XFLAGS, xflags, MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch); EFX_POPULATE_DWORD_2(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd, MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen); hdr_len = 8; } efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); mcdi->new_epoch = false; } static int efx_mcdi_errno(unsigned int mcdi_err) { switch (mcdi_err) { case 0: return 0; #define TRANSLATE_ERROR(name) \ case MC_CMD_ERR_ ## name: \ return -name; TRANSLATE_ERROR(EPERM); TRANSLATE_ERROR(ENOENT); TRANSLATE_ERROR(EINTR); TRANSLATE_ERROR(EAGAIN); TRANSLATE_ERROR(EACCES); TRANSLATE_ERROR(EBUSY); TRANSLATE_ERROR(EINVAL); TRANSLATE_ERROR(EDEADLK); TRANSLATE_ERROR(ENOSYS); TRANSLATE_ERROR(ETIME); TRANSLATE_ERROR(EALREADY); TRANSLATE_ERROR(ENOSPC); #undef TRANSLATE_ERROR case MC_CMD_ERR_ENOTSUP: return -EOPNOTSUPP; case MC_CMD_ERR_ALLOC_FAIL: return -ENOBUFS; case MC_CMD_ERR_MAC_EXIST: return -EADDRINUSE; default: return -EPROTO; } } static void efx_mcdi_read_response_header(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned int respseq, respcmd, error; efx_dword_t hdr; efx->type->mcdi_read_response(efx, &hdr, 0, 4); respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ); respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE); error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR); if (respcmd != MC_CMD_V2_EXTN) { mcdi->resp_hdr_len = 4; mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN); } else { efx->type->mcdi_read_response(efx, &hdr, 4, 4); mcdi->resp_hdr_len = 8; mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); } if (error && mcdi->resp_data_len == 0) { netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); mcdi->resprc = -EIO; } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { netif_err(efx, hw, efx->net_dev, "MC response mismatch tx seq 0x%x rx seq 0x%x\n", respseq, mcdi->seqno); mcdi->resprc = -EIO; } else if (error) { efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4); mcdi->resprc = efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0)); } else { mcdi->resprc = 0; } } static bool efx_mcdi_poll_once(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); rmb(); if (!efx->type->mcdi_poll_response(efx)) return false; spin_lock_bh(&mcdi->iface_lock); efx_mcdi_read_response_header(efx); spin_unlock_bh(&mcdi->iface_lock); return true; } static int efx_mcdi_poll(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned long time, finish; unsigned int spins; int rc; /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ rc = efx_mcdi_poll_reboot(efx); if (rc) { spin_lock_bh(&mcdi->iface_lock); mcdi->resprc = rc; mcdi->resp_hdr_len = 0; mcdi->resp_data_len = 0; spin_unlock_bh(&mcdi->iface_lock); return 0; } /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, * because generally mcdi responses are fast. After that, back off * and poll once a jiffy (approximately) */ spins = TICK_USEC; finish = jiffies + MCDI_RPC_TIMEOUT; while (1) { if (spins != 0) { --spins; udelay(1); } else { schedule_timeout_uninterruptible(1); } time = jiffies; if (efx_mcdi_poll_once(efx)) break; if (time_after(time, finish)) return -ETIMEDOUT; } /* Return rc=0 like wait_event_timeout() */ return 0; } /* Test and clear MC-rebooted flag for this port/function; reset * software state as necessary. */ int efx_mcdi_poll_reboot(struct efx_nic *efx) { if (!efx->mcdi) return 0; return efx->type->mcdi_poll_reboot(efx); } static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi) { return cmpxchg(&mcdi->state, MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) == MCDI_STATE_QUIESCENT; } static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi) { /* Wait until the interface becomes QUIESCENT and we win the race * to mark it RUNNING_SYNC. */ wait_event(mcdi->wq, cmpxchg(&mcdi->state, MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) == MCDI_STATE_QUIESCENT); } static int efx_mcdi_await_completion(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED, MCDI_RPC_TIMEOUT) == 0) return -ETIMEDOUT; /* Check if efx_mcdi_set_mode() switched us back to polled completions. * In which case, poll for completions directly. If efx_mcdi_ev_cpl() * completed the request first, then we'll just end up completing the * request again, which is safe. * * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which * wait_event_timeout() implicitly provides. */ if (mcdi->mode == MCDI_MODE_POLL) return efx_mcdi_poll(efx); return 0; } /* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the * requester. Return whether this was done. Does not take any locks. */ static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi) { if (cmpxchg(&mcdi->state, MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING_SYNC) { wake_up(&mcdi->wq); return true; } return false; } static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) { if (mcdi->mode == MCDI_MODE_EVENTS) { struct efx_mcdi_async_param *async; struct efx_nic *efx = mcdi->efx; /* Process the asynchronous request queue */ spin_lock_bh(&mcdi->async_lock); async = list_first_entry_or_null( &mcdi->async_list, struct efx_mcdi_async_param, list); if (async) { mcdi->state = MCDI_STATE_RUNNING_ASYNC; efx_mcdi_send_request(efx, async->cmd, (const efx_dword_t *)(async + 1), async->inlen); mod_timer(&mcdi->async_timer, jiffies + MCDI_RPC_TIMEOUT); } spin_unlock_bh(&mcdi->async_lock); if (async) return; } mcdi->state = MCDI_STATE_QUIESCENT; wake_up(&mcdi->wq); } /* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the * asynchronous completion function, and release the interface. * Return whether this was done. Must be called in bh-disabled * context. Will take iface_lock and async_lock. */ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout) { struct efx_nic *efx = mcdi->efx; struct efx_mcdi_async_param *async; size_t hdr_len, data_len, err_len; efx_dword_t *outbuf; MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0); int rc; if (cmpxchg(&mcdi->state, MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) != MCDI_STATE_RUNNING_ASYNC) return false; spin_lock(&mcdi->iface_lock); if (timeout) { /* Ensure that if the completion event arrives later, * the seqno check in efx_mcdi_ev_cpl() will fail */ ++mcdi->seqno; ++mcdi->credits; rc = -ETIMEDOUT; hdr_len = 0; data_len = 0; } else { rc = mcdi->resprc; hdr_len = mcdi->resp_hdr_len; data_len = mcdi->resp_data_len; } spin_unlock(&mcdi->iface_lock); /* Stop the timer. In case the timer function is running, we * must wait for it to return so that there is no possibility * of it aborting the next request. */ if (!timeout) del_timer_sync(&mcdi->async_timer); spin_lock(&mcdi->async_lock); async = list_first_entry(&mcdi->async_list, struct efx_mcdi_async_param, list); list_del(&async->list); spin_unlock(&mcdi->async_lock); outbuf = (efx_dword_t *)(async + 1); efx->type->mcdi_read_response(efx, outbuf, hdr_len, min(async->outlen, data_len)); if (!timeout && rc && !async->quiet) { err_len = min(sizeof(errbuf), data_len); efx->type->mcdi_read_response(efx, errbuf, hdr_len, sizeof(errbuf)); efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf, err_len, rc); } async->complete(efx, async->cookie, rc, outbuf, data_len); kfree(async); efx_mcdi_release(mcdi); return true; } static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, unsigned int datalen, unsigned int mcdi_err) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); bool wake = false; spin_lock(&mcdi->iface_lock); if ((seqno ^ mcdi->seqno) & SEQ_MASK) { if (mcdi->credits) /* The request has been cancelled */ --mcdi->credits; else netif_err(efx, hw, efx->net_dev, "MC response mismatch tx seq 0x%x rx " "seq 0x%x\n", seqno, mcdi->seqno); } else { if (efx->type->mcdi_max_ver >= 2) { /* MCDI v2 responses don't fit in an event */ efx_mcdi_read_response_header(efx); } else { mcdi->resprc = efx_mcdi_errno(mcdi_err); mcdi->resp_hdr_len = 4; mcdi->resp_data_len = datalen; } wake = true; } spin_unlock(&mcdi->iface_lock); if (wake) { if (!efx_mcdi_complete_async(mcdi, false)) (void) efx_mcdi_complete_sync(mcdi); /* If the interface isn't RUNNING_ASYNC or * RUNNING_SYNC then we've received a duplicate * completion after we've already transitioned back to * QUIESCENT. [A subsequent invocation would increment * seqno, so would have failed the seqno check]. */ } } static void efx_mcdi_timeout_async(unsigned long context) { struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context; efx_mcdi_complete_async(mcdi, true); } static int efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen) { if (efx->type->mcdi_max_ver < 0 || (efx->type->mcdi_max_ver < 2 && cmd > MC_CMD_CMD_SPACE_ESCAPE_7)) return -EINVAL; if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 || (efx->type->mcdi_max_ver < 2 && inlen > MCDI_CTL_SDU_LEN_MAX_V1)) return -EMSGSIZE; return 0; } static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual, bool quiet) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0); int rc; if (mcdi->mode == MCDI_MODE_POLL) rc = efx_mcdi_poll(efx); else rc = efx_mcdi_await_completion(efx); if (rc != 0) { netif_err(efx, hw, efx->net_dev, "MC command 0x%x inlen %d mode %d timed out\n", cmd, (int)inlen, mcdi->mode); if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) { netif_err(efx, hw, efx->net_dev, "MCDI request was completed without an event\n"); rc = 0; } efx_mcdi_abandon(efx); /* Close the race with efx_mcdi_ev_cpl() executing just too late * and completing a request we've just cancelled, by ensuring * that the seqno check therein fails. */ spin_lock_bh(&mcdi->iface_lock); ++mcdi->seqno; ++mcdi->credits; spin_unlock_bh(&mcdi->iface_lock); } if (rc != 0) { if (outlen_actual) *outlen_actual = 0; } else { size_t hdr_len, data_len, err_len; /* At the very least we need a memory barrier here to ensure * we pick up changes from efx_mcdi_ev_cpl(). Protect against * a spurious efx_mcdi_ev_cpl() running concurrently by * acquiring the iface_lock. */ spin_lock_bh(&mcdi->iface_lock); rc = mcdi->resprc; hdr_len = mcdi->resp_hdr_len; data_len = mcdi->resp_data_len; err_len = min(sizeof(errbuf), data_len); spin_unlock_bh(&mcdi->iface_lock); BUG_ON(rc > 0); efx->type->mcdi_read_response(efx, outbuf, hdr_len, min(outlen, data_len)); if (outlen_actual) *outlen_actual = data_len; efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len); if (cmd == MC_CMD_REBOOT && rc == -EIO) { /* Don't reset if MC_CMD_REBOOT returns EIO */ } else if (rc == -EIO || rc == -EINTR) { netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", -rc); efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); } else if (rc && !quiet) { efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len, rc); } if (rc == -EIO || rc == -EINTR) { msleep(MCDI_STATUS_SLEEP_MS); efx_mcdi_poll_reboot(efx); mcdi->new_epoch = true; } } efx_mcdi_release(mcdi); return rc; } static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual, bool quiet) { int rc; rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); if (rc) { if (outlen_actual) *outlen_actual = 0; return rc; } return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, outlen_actual, quiet); } int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) { return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen, outlen_actual, false); } /* Normally, on receiving an error code in the MCDI response, * efx_mcdi_rpc will log an error message containing (among other * things) the raw error code, by means of efx_mcdi_display_error. * This _quiet version suppresses that; if the caller wishes to log * the error conditionally on the return code, it should call this * function and is then responsible for calling efx_mcdi_display_error * as needed. */ int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) { return _efx_mcdi_rpc(efx, cmd, inbuf, inlen, outbuf, outlen, outlen_actual, true); } int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); int rc; rc = efx_mcdi_check_supported(efx, cmd, inlen); if (rc) return rc; if (efx->mc_bist_for_other_fn) return -ENETDOWN; if (mcdi->mode == MCDI_MODE_FAIL) return -ENETDOWN; efx_mcdi_acquire_sync(mcdi); efx_mcdi_send_request(efx, cmd, inbuf, inlen); return 0; } static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, const efx_dword_t *inbuf, size_t inlen, size_t outlen, efx_mcdi_async_completer *complete, unsigned long cookie, bool quiet) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); struct efx_mcdi_async_param *async; int rc; rc = efx_mcdi_check_supported(efx, cmd, inlen); if (rc) return rc; if (efx->mc_bist_for_other_fn) return -ENETDOWN; async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4), GFP_ATOMIC); if (!async) return -ENOMEM; async->cmd = cmd; async->inlen = inlen; async->outlen = outlen; async->quiet = quiet; async->complete = complete; async->cookie = cookie; memcpy(async + 1, inbuf, inlen); spin_lock_bh(&mcdi->async_lock); if (mcdi->mode == MCDI_MODE_EVENTS) { list_add_tail(&async->list, &mcdi->async_list); /* If this is at the front of the queue, try to start it * immediately */ if (mcdi->async_list.next == &async->list && efx_mcdi_acquire_async(mcdi)) { efx_mcdi_send_request(efx, cmd, inbuf, inlen); mod_timer(&mcdi->async_timer, jiffies + MCDI_RPC_TIMEOUT); } } else { kfree(async); rc = -ENETDOWN; } spin_unlock_bh(&mcdi->async_lock); return rc; } /** * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously * @efx: NIC through which to issue the command * @cmd: Command type number * @inbuf: Command parameters * @inlen: Length of command parameters, in bytes * @outlen: Length to allocate for response buffer, in bytes * @complete: Function to be called on completion or cancellation. * @cookie: Arbitrary value to be passed to @complete. * * This function does not sleep and therefore may be called in atomic * context. It will fail if event queues are disabled or if MCDI * event completions have been disabled due to an error. * * If it succeeds, the @complete function will be called exactly once * in atomic context, when one of the following occurs: * (a) the completion event is received (in NAPI context) * (b) event queues are disabled (in the process that disables them) * (c) the request times-out (in timer context) */ int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd, const efx_dword_t *inbuf, size_t inlen, size_t outlen, efx_mcdi_async_completer *complete, unsigned long cookie) { return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, cookie, false); } int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd, const efx_dword_t *inbuf, size_t inlen, size_t outlen, efx_mcdi_async_completer *complete, unsigned long cookie) { return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete, cookie, true); } int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) { return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, outlen_actual, false); } int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, size_t outlen, size_t *outlen_actual) { return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen, outlen_actual, true); } void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd, size_t inlen, efx_dword_t *outbuf, size_t outlen, int rc) { int code = 0, err_arg = 0; if (outlen >= MC_CMD_ERR_CODE_OFST + 4) code = MCDI_DWORD(outbuf, ERR_CODE); if (outlen >= MC_CMD_ERR_ARG_OFST + 4) err_arg = MCDI_DWORD(outbuf, ERR_ARG); netif_err(efx, hw, efx->net_dev, "MC command 0x%x inlen %d failed rc=%d (raw=%d) arg=%d\n", cmd, (int)inlen, rc, code, err_arg); } /* Switch to polled MCDI completions. This can be called in various * error conditions with various locks held, so it must be lockless. * Caller is responsible for flushing asynchronous requests later. */ void efx_mcdi_mode_poll(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; if (!efx->mcdi) return; mcdi = efx_mcdi(efx); /* If already in polling mode, nothing to do. * If in fail-fast state, don't switch to polled completion. * FLR recovery will do that later. */ if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL) return; /* We can switch from event completion to polled completion, because * mcdi requests are always completed in shared memory. We do this by * switching the mode to POLL'd then completing the request. * efx_mcdi_await_completion() will then call efx_mcdi_poll(). * * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), * which efx_mcdi_complete_sync() provides for us. */ mcdi->mode = MCDI_MODE_POLL; efx_mcdi_complete_sync(mcdi); } /* Flush any running or queued asynchronous requests, after event processing * is stopped */ void efx_mcdi_flush_async(struct efx_nic *efx) { struct efx_mcdi_async_param *async, *next; struct efx_mcdi_iface *mcdi; if (!efx->mcdi) return; mcdi = efx_mcdi(efx); /* We must be in poll or fail mode so no more requests can be queued */ BUG_ON(mcdi->mode == MCDI_MODE_EVENTS); del_timer_sync(&mcdi->async_timer); /* If a request is still running, make sure we give the MC * time to complete it so that the response won't overwrite our * next request. */ if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) { efx_mcdi_poll(efx); mcdi->state = MCDI_STATE_QUIESCENT; } /* Nothing else will access the async list now, so it is safe * to walk it without holding async_lock. If we hold it while * calling a completer then lockdep may warn that we have * acquired locks in the wrong order. */ list_for_each_entry_safe(async, next, &mcdi->async_list, list) { async->complete(efx, async->cookie, -ENETDOWN, NULL, 0); list_del(&async->list); kfree(async); } } void efx_mcdi_mode_event(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; if (!efx->mcdi) return; mcdi = efx_mcdi(efx); /* If already in event completion mode, nothing to do. * If in fail-fast state, don't switch to event completion. FLR * recovery will do that later. */ if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL) return; /* We can't switch from polled to event completion in the middle of a * request, because the completion method is specified in the request. * So acquire the interface to serialise the requestors. We don't need * to acquire the iface_lock to change the mode here, but we do need a * write memory barrier ensure that efx_mcdi_rpc() sees it, which * efx_mcdi_acquire() provides. */ efx_mcdi_acquire_sync(mcdi); mcdi->mode = MCDI_MODE_EVENTS; efx_mcdi_release(mcdi); } static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); /* If there is an outstanding MCDI request, it has been terminated * either by a BADASSERT or REBOOT event. If the mcdi interface is * in polled mode, then do nothing because the MC reboot handler will * set the header correctly. However, if the mcdi interface is waiting * for a CMDDONE event it won't receive it [and since all MCDI events * are sent to the same queue, we can't be racing with * efx_mcdi_ev_cpl()] * * If there is an outstanding asynchronous request, we can't * complete it now (efx_mcdi_complete() would deadlock). The * reset process will take care of this. * * There's a race here with efx_mcdi_send_request(), because * we might receive a REBOOT event *before* the request has * been copied out. In polled mode (during startup) this is * irrelevant, because efx_mcdi_complete_sync() is ignored. In * event mode, this condition is just an edge-case of * receiving a REBOOT event after posting the MCDI * request. Did the mc reboot before or after the copyout? The * best we can do always is just return failure. */ spin_lock(&mcdi->iface_lock); if (efx_mcdi_complete_sync(mcdi)) { if (mcdi->mode == MCDI_MODE_EVENTS) { mcdi->resprc = rc; mcdi->resp_hdr_len = 0; mcdi->resp_data_len = 0; ++mcdi->credits; } } else { int count; /* Consume the status word since efx_mcdi_rpc_finish() won't */ for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { if (efx_mcdi_poll_reboot(efx)) break; udelay(MCDI_STATUS_DELAY_US); } mcdi->new_epoch = true; /* Nobody was waiting for an MCDI request, so trigger a reset */ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); } spin_unlock(&mcdi->iface_lock); } /* The MC is going down in to BIST mode. set the BIST flag to block * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset * (which doesn't actually execute a reset, it waits for the controlling * function to reset it). */ static void efx_mcdi_ev_bist(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); spin_lock(&mcdi->iface_lock); efx->mc_bist_for_other_fn = true; if (efx_mcdi_complete_sync(mcdi)) { if (mcdi->mode == MCDI_MODE_EVENTS) { mcdi->resprc = -EIO; mcdi->resp_hdr_len = 0; mcdi->resp_data_len = 0; ++mcdi->credits; } } mcdi->new_epoch = true; efx_schedule_reset(efx, RESET_TYPE_MC_BIST); spin_unlock(&mcdi->iface_lock); } /* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try * to recover. */ static void efx_mcdi_abandon(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL) return; /* it had already been done */ netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n"); efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT); } /* Called from falcon_process_eventq for MCDI events */ void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); switch (code) { case MCDI_EVENT_CODE_BADSSERT: netif_err(efx, hw, efx->net_dev, "MC watchdog or assertion failure at 0x%x\n", data); efx_mcdi_ev_death(efx, -EINTR); break; case MCDI_EVENT_CODE_PMNOTICE: netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); break; case MCDI_EVENT_CODE_CMDDONE: efx_mcdi_ev_cpl(efx, MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); break; case MCDI_EVENT_CODE_LINKCHANGE: efx_mcdi_process_link_change(efx, event); break; case MCDI_EVENT_CODE_SENSOREVT: efx_mcdi_sensor_event(efx, event); break; case MCDI_EVENT_CODE_SCHEDERR: netif_dbg(efx, hw, efx->net_dev, "MC Scheduler alert (0x%x)\n", data); break; case MCDI_EVENT_CODE_REBOOT: case MCDI_EVENT_CODE_MC_REBOOT: netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); efx_mcdi_ev_death(efx, -EIO); break; case MCDI_EVENT_CODE_MC_BIST: netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n"); efx_mcdi_ev_bist(efx); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: /* MAC stats are gather lazily. We can ignore this. */ break; case MCDI_EVENT_CODE_FLR: efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); break; case MCDI_EVENT_CODE_PTP_RX: case MCDI_EVENT_CODE_PTP_FAULT: case MCDI_EVENT_CODE_PTP_PPS: efx_ptp_event(efx, event); break; case MCDI_EVENT_CODE_PTP_TIME: efx_time_sync_event(channel, event); break; case MCDI_EVENT_CODE_TX_FLUSH: case MCDI_EVENT_CODE_RX_FLUSH: /* Two flush events will be sent: one to the same event * queue as completions, and one to event queue 0. * In the latter case the {RX,TX}_FLUSH_TO_DRIVER * flag will be set, and we should ignore the event * because we want to wait for all completions. */ BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN != MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN); if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER)) efx_ef10_handle_drain_event(efx); break; case MCDI_EVENT_CODE_TX_ERR: case MCDI_EVENT_CODE_RX_ERR: netif_err(efx, hw, efx->net_dev, "%s DMA error (event: "EFX_QWORD_FMT")\n", code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX", EFX_QWORD_VAL(*event)); efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); break; default: netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", code); } } /************************************************************************** * * Specific request functions * ************************************************************************** */ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) { MCDI_DECLARE_BUF(outbuf, max(MC_CMD_GET_VERSION_OUT_LEN, MC_CMD_GET_CAPABILITIES_OUT_LEN)); size_t outlength; const __le16 *ver_words; size_t offset; int rc; BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, outbuf, sizeof(outbuf), &outlength); if (rc) goto fail; if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { rc = -EIO; goto fail; } ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); offset = snprintf(buf, len, "%u.%u.%u.%u", le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); /* EF10 may have multiple datapath firmware variants within a * single version. Report which variants are running. */ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, outbuf, sizeof(outbuf), &outlength); if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN) offset += snprintf( buf + offset, len - offset, " rx? tx?"); else offset += snprintf( buf + offset, len - offset, " rx%x tx%x", MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID), MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID)); /* It's theoretically possible for the string to exceed 31 * characters, though in practice the first three version * components are short enough that this doesn't happen. */ if (WARN_ON(offset >= len)) buf[0] = 0; } return; fail: netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); buf[0] = 0; } static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, bool *was_attached) { MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN); size_t outlen; int rc; MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, driver_operating ? 1 : 0); MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY); rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { rc = -EIO; goto fail; } if (driver_operating) { if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) { efx->mcdi->fn_flags = MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS); } else { /* Synthesise flags for Siena */ efx->mcdi->fn_flags = 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED | (efx_port_num(efx) == 0) << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY; } } /* We currently assume we have control of the external link * and are completely trusted by firmware. Abort probing * if that's not true for this function. */ if (driver_operating && (efx->mcdi->fn_flags & (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) != (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL | 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) { netif_err(efx, probe, efx->net_dev, "This driver version only supports one function per port\n"); return -ENODEV; } if (was_attached != NULL) *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); return 0; fail: netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, u16 *fw_subtype_list, u32 *capabilities) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX); size_t outlen, i; int port_num = efx_port_num(efx); int rc; BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); /* we need __aligned(2) for ether_addr_copy */ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1); BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1); rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { rc = -EIO; goto fail; } if (mac_address) ether_addr_copy(mac_address, port_num ? MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0)); if (fw_subtype_list) { for (i = 0; i < MCDI_VAR_ARRAY_LEN(outlen, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST); i++) fw_subtype_list[i] = MCDI_ARRAY_WORD( outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i); for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++) fw_subtype_list[i] = 0; } if (capabilities) { if (port_num) *capabilities = MCDI_DWORD(outbuf, GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); else *capabilities = MCDI_DWORD(outbuf, GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); } return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen); return rc; } int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) { MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN); u32 dest = 0; int rc; if (uart) dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; if (evq) dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), NULL, 0, NULL); return rc; } int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) { MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN); size_t outlen; int rc; BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { rc = -EIO; goto fail; } *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, size_t *size_out, size_t *erase_size_out, bool *protected_out) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN); size_t outlen; int rc; MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { rc = -EIO; goto fail; } *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN)); return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN); int rc; MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), NULL); if (rc) return rc; switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { case MC_CMD_NVRAM_TEST_PASS: case MC_CMD_NVRAM_TEST_NOTSUPP: return 0; default: return -EIO; } } int efx_mcdi_nvram_test_all(struct efx_nic *efx) { u32 nvram_types; unsigned int type; int rc; rc = efx_mcdi_nvram_types(efx, &nvram_types); if (rc) goto fail1; type = 0; while (nvram_types != 0) { if (nvram_types & 1) { rc = efx_mcdi_nvram_test(efx, type); if (rc) goto fail2; } type++; nvram_types >>= 1; } return 0; fail2: netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", __func__, type); fail1: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } static int efx_mcdi_read_assertion(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN); MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); unsigned int flags, index; const char *reason; size_t outlen; int retry; int rc; /* Attempt to read any stored assertion state before we reboot * the mcfw out of the assertion handler. Retry twice, once * because a boot-time assertion might cause this command to fail * with EINTR. And once again because GET_ASSERTS can race with * MC_CMD_REBOOT running on the other port. */ retry = 2; do { MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS, inbuf, MC_CMD_GET_ASSERTS_IN_LEN, outbuf, sizeof(outbuf), &outlen); } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); if (rc) { efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS, MC_CMD_GET_ASSERTS_IN_LEN, outbuf, outlen, rc); return rc; } if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) return -EIO; /* Print out any recorded assertion state */ flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) return 0; reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) ? "system-level assertion" : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) ? "thread-level assertion" : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) ? "watchdog reset" : "unknown assertion"; netif_err(efx, hw, efx->net_dev, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); /* Print out the registers */ for (index = 0; index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; index++) netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", 1 + index, MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS, index)); return 0; } static void efx_mcdi_exit_assertion(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); /* If the MC is running debug firmware, it might now be * waiting for a debugger to attach, but we just want it to * reboot. We set a flag that makes the command a no-op if it * has already done so. We don't know what return code to * expect (0 or -EIO), so ignore it. */ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, NULL, 0, NULL); } int efx_mcdi_handle_assertion(struct efx_nic *efx) { int rc; rc = efx_mcdi_read_assertion(efx); if (rc) return rc; efx_mcdi_exit_assertion(efx); return 0; } void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) { MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN); int rc; BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL); } static int efx_mcdi_reset_func(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN); int rc; BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0); MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG, ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf), NULL, 0, NULL); return rc; } static int efx_mcdi_reset_mc(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); int rc; BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), NULL, 0, NULL); /* White is black, and up is down */ if (rc == -EIO) return 0; if (rc == 0) rc = -EIO; return rc; } enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason) { return RESET_TYPE_RECOVER_OR_ALL; } int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method) { int rc; /* If MCDI is down, we can't handle_assertion */ if (method == RESET_TYPE_MCDI_TIMEOUT) { rc = pci_reset_function(efx->pci_dev); if (rc) return rc; /* Re-enable polled MCDI completion */ if (efx->mcdi) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); mcdi->mode = MCDI_MODE_POLL; } return 0; } /* Recover from a failed assertion pre-reset */ rc = efx_mcdi_handle_assertion(efx); if (rc) return rc; if (method == RESET_TYPE_WORLD) return efx_mcdi_reset_mc(efx); else return efx_mcdi_reset_func(efx); } static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, const u8 *mac, int *id_out) { MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN); size_t outlen; int rc; MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, MC_CMD_FILTER_MODE_SIMPLE); ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac); rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { rc = -EIO; goto fail; } *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); return 0; fail: *id_out = -1; netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) { return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); } int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) { MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN); size_t outlen; int rc; rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { rc = -EIO; goto fail; } *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); return 0; fail: *id_out = -1; netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) { MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN); int rc; MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), NULL, 0, NULL); return rc; } int efx_mcdi_flush_rxqs(struct efx_nic *efx) { struct efx_channel *channel; struct efx_rx_queue *rx_queue; MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS)); int rc, count; BUILD_BUG_ON(EFX_MAX_CHANNELS > MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); count = 0; efx_for_each_channel(channel, efx) { efx_for_each_channel_rx_queue(rx_queue, channel) { if (rx_queue->flush_pending) { rx_queue->flush_pending = false; atomic_dec(&efx->rxq_flush_pending); MCDI_SET_ARRAY_DWORD( inbuf, FLUSH_RX_QUEUES_IN_QID_OFST, count, efx_rx_queue_index(rx_queue)); count++; } } } rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL); WARN_ON(rc < 0); return rc; } int efx_mcdi_wol_filter_reset(struct efx_nic *efx) { int rc; rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); return rc; } int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled) { MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN); BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type); MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled); return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), NULL, 0, NULL); } #ifdef CONFIG_SFC_MTD #define EFX_MCDI_NVRAM_LEN_MAX 128 static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN); int rc; MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), NULL, 0, NULL); return rc; } static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, loff_t offset, u8 *buffer, size_t length) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)); size_t outlen; int rc; MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) return rc; memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); return 0; } static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, loff_t offset, const u8 *buffer, size_t length) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)); int rc; MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), NULL, 0, NULL); return rc; } static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, loff_t offset, size_t length) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN); int rc; MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), NULL, 0, NULL); return rc; } static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN); int rc; MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), NULL, 0, NULL); return rc; } int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, u8 *buffer) { struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; loff_t offset = start; loff_t end = min_t(loff_t, start + len, mtd->size); size_t chunk; int rc = 0; while (offset < end) { chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset, buffer, chunk); if (rc) goto out; offset += chunk; buffer += chunk; } out: *retlen = offset - start; return rc; } int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) { struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); loff_t end = min_t(loff_t, start + len, mtd->size); size_t chunk = part->common.mtd.erasesize; int rc = 0; if (!part->updating) { rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); if (rc) goto out; part->updating = true; } /* The MCDI interface can in fact do multiple erase blocks at once; * but erasing may be slow, so we make multiple calls here to avoid * tripping the MCDI RPC timeout. */ while (offset < end) { rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset, chunk); if (rc) goto out; offset += chunk; } out: return rc; } int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, const u8 *buffer) { struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; loff_t offset = start; loff_t end = min_t(loff_t, start + len, mtd->size); size_t chunk; int rc = 0; if (!part->updating) { rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); if (rc) goto out; part->updating = true; } while (offset < end) { chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset, buffer, chunk); if (rc) goto out; offset += chunk; buffer += chunk; } out: *retlen = offset - start; return rc; } int efx_mcdi_mtd_sync(struct mtd_info *mtd) { struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); struct efx_nic *efx = mtd->priv; int rc = 0; if (part->updating) { part->updating = false; rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type); } return rc; } void efx_mcdi_mtd_rename(struct efx_mtd_partition *part) { struct efx_mcdi_mtd_partition *mcdi_part = container_of(part, struct efx_mcdi_mtd_partition, common); struct efx_nic *efx = part->mtd.priv; snprintf(part->name, sizeof(part->name), "%s %s:%02x", efx->name, part->type_name, mcdi_part->fw_subtype); } #endif /* CONFIG_SFC_MTD */
gpl-2.0
githubacer/acer_linux_kernel
arch/mips/pci/pci-bcm47xx.c
771
2016
/* * Copyright (C) 2008 Aurelien Jarno <aurelien@aurel32.net> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/ssb/ssb.h> int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return 0; } int pcibios_plat_dev_init(struct pci_dev *dev) { int res; u8 slot, pin; res = ssb_pcibios_plat_dev_init(dev); if (res < 0) { printk(KERN_ALERT "PCI: Failed to init device %s\n", pci_name(dev)); return res; } pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); slot = PCI_SLOT(dev->devfn); res = ssb_pcibios_map_irq(dev, slot, pin); /* IRQ-0 and IRQ-1 are software interrupts. */ if (res < 2) { printk(KERN_ALERT "PCI: Failed to map IRQ of device %s\n", pci_name(dev)); return res; } dev->irq = res; return 0; }
gpl-2.0
jayk/linux
fs/ocfs2/dlm/dlmdebug.c
1027
27672
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmdebug.c * * debug functionality for the dlm * * Copyright (C) 2004, 2008 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */ #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/sysctl.h> #include <linux/spinlock.h> #include <linux/debugfs.h> #include <linux/export.h> #include "cluster/heartbeat.h" #include "cluster/nodemanager.h" #include "cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #include "dlmdomain.h" #include "dlmdebug.h" #define MLOG_MASK_PREFIX ML_DLM #include "cluster/masklog.h" static int stringify_lockname(const char *lockname, int locklen, char *buf, int len); void dlm_print_one_lock_resource(struct dlm_lock_resource *res) { spin_lock(&res->spinlock); __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); } static void dlm_print_lockres_refmap(struct dlm_lock_resource *res) { int bit; assert_spin_locked(&res->spinlock); printk(" refmap nodes: [ "); bit = 0; while (1) { bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); if (bit >= O2NM_MAX_NODES) break; printk("%u ", bit); bit++; } printk("], inflight=%u\n", res->inflight_locks); } static void __dlm_print_lock(struct dlm_lock *lock) { spin_lock(&lock->spinlock); printk(" type=%d, conv=%d, node=%u, cookie=%u:%llu, " "ref=%u, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c), " "pending=(conv=%c,lock=%c,cancel=%c,unlock=%c)\n", lock->ml.type, lock->ml.convert_type, lock->ml.node, dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), atomic_read(&lock->lock_refs.refcount), (list_empty(&lock->ast_list) ? 'y' : 'n'), (lock->ast_pending ? 'y' : 'n'), (list_empty(&lock->bast_list) ? 'y' : 'n'), (lock->bast_pending ? 'y' : 'n'), (lock->convert_pending ? 'y' : 'n'), (lock->lock_pending ? 'y' : 'n'), (lock->cancel_pending ? 'y' : 'n'), (lock->unlock_pending ? 'y' : 'n')); spin_unlock(&lock->spinlock); } void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) { struct dlm_lock *lock; char buf[DLM_LOCKID_NAME_MAX]; assert_spin_locked(&res->spinlock); stringify_lockname(res->lockname.name, res->lockname.len, buf, sizeof(buf)); printk("lockres: %s, owner=%u, state=%u\n", buf, res->owner, res->state); printk(" last used: %lu, refcnt: %u, on purge list: %s\n", res->last_used, atomic_read(&res->refs.refcount), list_empty(&res->purge) ? "no" : "yes"); printk(" on dirty list: %s, on reco list: %s, " "migrating pending: %s\n", list_empty(&res->dirty) ? "no" : "yes", list_empty(&res->recovering) ? "no" : "yes", res->migration_pending ? "yes" : "no"); printk(" inflight locks: %d, asts reserved: %d\n", res->inflight_locks, atomic_read(&res->asts_reserved)); dlm_print_lockres_refmap(res); printk(" granted queue:\n"); list_for_each_entry(lock, &res->granted, list) { __dlm_print_lock(lock); } printk(" converting queue:\n"); list_for_each_entry(lock, &res->converting, list) { __dlm_print_lock(lock); } printk(" blocked queue:\n"); list_for_each_entry(lock, &res->blocked, list) { __dlm_print_lock(lock); } } void dlm_print_one_lock(struct dlm_lock *lockid) { dlm_print_one_lock_resource(lockid->lockres); } EXPORT_SYMBOL_GPL(dlm_print_one_lock); static const char *dlm_errnames[] = { [DLM_NORMAL] = "DLM_NORMAL", [DLM_GRANTED] = "DLM_GRANTED", [DLM_DENIED] = "DLM_DENIED", [DLM_DENIED_NOLOCKS] = "DLM_DENIED_NOLOCKS", [DLM_WORKING] = "DLM_WORKING", [DLM_BLOCKED] = "DLM_BLOCKED", [DLM_BLOCKED_ORPHAN] = "DLM_BLOCKED_ORPHAN", [DLM_DENIED_GRACE_PERIOD] = "DLM_DENIED_GRACE_PERIOD", [DLM_SYSERR] = "DLM_SYSERR", [DLM_NOSUPPORT] = "DLM_NOSUPPORT", [DLM_CANCELGRANT] = "DLM_CANCELGRANT", [DLM_IVLOCKID] = "DLM_IVLOCKID", [DLM_SYNC] = "DLM_SYNC", [DLM_BADTYPE] = "DLM_BADTYPE", [DLM_BADRESOURCE] = "DLM_BADRESOURCE", [DLM_MAXHANDLES] = "DLM_MAXHANDLES", [DLM_NOCLINFO] = "DLM_NOCLINFO", [DLM_NOLOCKMGR] = "DLM_NOLOCKMGR", [DLM_NOPURGED] = "DLM_NOPURGED", [DLM_BADARGS] = "DLM_BADARGS", [DLM_VOID] = "DLM_VOID", [DLM_NOTQUEUED] = "DLM_NOTQUEUED", [DLM_IVBUFLEN] = "DLM_IVBUFLEN", [DLM_CVTUNGRANT] = "DLM_CVTUNGRANT", [DLM_BADPARAM] = "DLM_BADPARAM", [DLM_VALNOTVALID] = "DLM_VALNOTVALID", [DLM_REJECTED] = "DLM_REJECTED", [DLM_ABORT] = "DLM_ABORT", [DLM_CANCEL] = "DLM_CANCEL", [DLM_IVRESHANDLE] = "DLM_IVRESHANDLE", [DLM_DEADLOCK] = "DLM_DEADLOCK", [DLM_DENIED_NOASTS] = "DLM_DENIED_NOASTS", [DLM_FORWARD] = "DLM_FORWARD", [DLM_TIMEOUT] = "DLM_TIMEOUT", [DLM_IVGROUPID] = "DLM_IVGROUPID", [DLM_VERS_CONFLICT] = "DLM_VERS_CONFLICT", [DLM_BAD_DEVICE_PATH] = "DLM_BAD_DEVICE_PATH", [DLM_NO_DEVICE_PERMISSION] = "DLM_NO_DEVICE_PERMISSION", [DLM_NO_CONTROL_DEVICE ] = "DLM_NO_CONTROL_DEVICE ", [DLM_RECOVERING] = "DLM_RECOVERING", [DLM_MIGRATING] = "DLM_MIGRATING", [DLM_MAXSTATS] = "DLM_MAXSTATS", }; static const char *dlm_errmsgs[] = { [DLM_NORMAL] = "request in progress", [DLM_GRANTED] = "request granted", [DLM_DENIED] = "request denied", [DLM_DENIED_NOLOCKS] = "request denied, out of system resources", [DLM_WORKING] = "async request in progress", [DLM_BLOCKED] = "lock request blocked", [DLM_BLOCKED_ORPHAN] = "lock request blocked by a orphan lock", [DLM_DENIED_GRACE_PERIOD] = "topological change in progress", [DLM_SYSERR] = "system error", [DLM_NOSUPPORT] = "unsupported", [DLM_CANCELGRANT] = "can't cancel convert: already granted", [DLM_IVLOCKID] = "bad lockid", [DLM_SYNC] = "synchronous request granted", [DLM_BADTYPE] = "bad resource type", [DLM_BADRESOURCE] = "bad resource handle", [DLM_MAXHANDLES] = "no more resource handles", [DLM_NOCLINFO] = "can't contact cluster manager", [DLM_NOLOCKMGR] = "can't contact lock manager", [DLM_NOPURGED] = "can't contact purge daemon", [DLM_BADARGS] = "bad api args", [DLM_VOID] = "no status", [DLM_NOTQUEUED] = "NOQUEUE was specified and request failed", [DLM_IVBUFLEN] = "invalid resource name length", [DLM_CVTUNGRANT] = "attempted to convert ungranted lock", [DLM_BADPARAM] = "invalid lock mode specified", [DLM_VALNOTVALID] = "value block has been invalidated", [DLM_REJECTED] = "request rejected, unrecognized client", [DLM_ABORT] = "blocked lock request cancelled", [DLM_CANCEL] = "conversion request cancelled", [DLM_IVRESHANDLE] = "invalid resource handle", [DLM_DEADLOCK] = "deadlock recovery refused this request", [DLM_DENIED_NOASTS] = "failed to allocate AST", [DLM_FORWARD] = "request must wait for primary's response", [DLM_TIMEOUT] = "timeout value for lock has expired", [DLM_IVGROUPID] = "invalid group specification", [DLM_VERS_CONFLICT] = "version conflicts prevent request handling", [DLM_BAD_DEVICE_PATH] = "Locks device does not exist or path wrong", [DLM_NO_DEVICE_PERMISSION] = "Client has insufficient perms for device", [DLM_NO_CONTROL_DEVICE] = "Cannot set options on opened device ", [DLM_RECOVERING] = "lock resource being recovered", [DLM_MIGRATING] = "lock resource being migrated", [DLM_MAXSTATS] = "invalid error number", }; const char *dlm_errmsg(enum dlm_status err) { if (err >= DLM_MAXSTATS || err < 0) return dlm_errmsgs[DLM_MAXSTATS]; return dlm_errmsgs[err]; } EXPORT_SYMBOL_GPL(dlm_errmsg); const char *dlm_errname(enum dlm_status err) { if (err >= DLM_MAXSTATS || err < 0) return dlm_errnames[DLM_MAXSTATS]; return dlm_errnames[err]; } EXPORT_SYMBOL_GPL(dlm_errname); /* NOTE: This function converts a lockname into a string. It uses knowledge * of the format of the lockname that should be outside the purview of the dlm. * We are adding only to make dlm debugging slightly easier. * * For more on lockname formats, please refer to dlmglue.c and ocfs2_lockid.h. */ static int stringify_lockname(const char *lockname, int locklen, char *buf, int len) { int out = 0; __be64 inode_blkno_be; #define OCFS2_DENTRY_LOCK_INO_START 18 if (*lockname == 'N') { memcpy((__be64 *)&inode_blkno_be, (char *)&lockname[OCFS2_DENTRY_LOCK_INO_START], sizeof(__be64)); out += snprintf(buf + out, len - out, "%.*s%08x", OCFS2_DENTRY_LOCK_INO_START - 1, lockname, (unsigned int)be64_to_cpu(inode_blkno_be)); } else out += snprintf(buf + out, len - out, "%.*s", locklen, lockname); return out; } static int stringify_nodemap(unsigned long *nodemap, int maxnodes, char *buf, int len) { int out = 0; int i = -1; while ((i = find_next_bit(nodemap, maxnodes, i + 1)) < maxnodes) out += snprintf(buf + out, len - out, "%d ", i); return out; } static int dump_mle(struct dlm_master_list_entry *mle, char *buf, int len) { int out = 0; char *mle_type; if (mle->type == DLM_MLE_BLOCK) mle_type = "BLK"; else if (mle->type == DLM_MLE_MASTER) mle_type = "MAS"; else mle_type = "MIG"; out += stringify_lockname(mle->mname, mle->mnamelen, buf + out, len - out); out += snprintf(buf + out, len - out, "\t%3s\tmas=%3u\tnew=%3u\tevt=%1d\tuse=%1d\tref=%3d\n", mle_type, mle->master, mle->new_master, !list_empty(&mle->hb_events), !!mle->inuse, atomic_read(&mle->mle_refs.refcount)); out += snprintf(buf + out, len - out, "Maybe="); out += stringify_nodemap(mle->maybe_map, O2NM_MAX_NODES, buf + out, len - out); out += snprintf(buf + out, len - out, "\n"); out += snprintf(buf + out, len - out, "Vote="); out += stringify_nodemap(mle->vote_map, O2NM_MAX_NODES, buf + out, len - out); out += snprintf(buf + out, len - out, "\n"); out += snprintf(buf + out, len - out, "Response="); out += stringify_nodemap(mle->response_map, O2NM_MAX_NODES, buf + out, len - out); out += snprintf(buf + out, len - out, "\n"); out += snprintf(buf + out, len - out, "Node="); out += stringify_nodemap(mle->node_map, O2NM_MAX_NODES, buf + out, len - out); out += snprintf(buf + out, len - out, "\n"); out += snprintf(buf + out, len - out, "\n"); return out; } void dlm_print_one_mle(struct dlm_master_list_entry *mle) { char *buf; buf = (char *) get_zeroed_page(GFP_NOFS); if (buf) { dump_mle(mle, buf, PAGE_SIZE - 1); free_page((unsigned long)buf); } } #ifdef CONFIG_DEBUG_FS static struct dentry *dlm_debugfs_root; #define DLM_DEBUGFS_DIR "o2dlm" #define DLM_DEBUGFS_DLM_STATE "dlm_state" #define DLM_DEBUGFS_LOCKING_STATE "locking_state" #define DLM_DEBUGFS_MLE_STATE "mle_state" #define DLM_DEBUGFS_PURGE_LIST "purge_list" /* begin - utils funcs */ static void dlm_debug_free(struct kref *kref) { struct dlm_debug_ctxt *dc; dc = container_of(kref, struct dlm_debug_ctxt, debug_refcnt); kfree(dc); } static void dlm_debug_put(struct dlm_debug_ctxt *dc) { if (dc) kref_put(&dc->debug_refcnt, dlm_debug_free); } static void dlm_debug_get(struct dlm_debug_ctxt *dc) { kref_get(&dc->debug_refcnt); } static int debug_release(struct inode *inode, struct file *file) { free_page((unsigned long)file->private_data); return 0; } static ssize_t debug_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, i_size_read(file->f_mapping->host)); } /* end - util funcs */ /* begin - purge list funcs */ static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len) { struct dlm_lock_resource *res; int out = 0; unsigned long total = 0; out += snprintf(buf + out, len - out, "Dumping Purgelist for Domain: %s\n", dlm->name); spin_lock(&dlm->spinlock); list_for_each_entry(res, &dlm->purge_list, purge) { ++total; if (len - out < 100) continue; spin_lock(&res->spinlock); out += stringify_lockname(res->lockname.name, res->lockname.len, buf + out, len - out); out += snprintf(buf + out, len - out, "\t%ld\n", (jiffies - res->last_used)/HZ); spin_unlock(&res->spinlock); } spin_unlock(&dlm->spinlock); out += snprintf(buf + out, len - out, "Total on list: %lu\n", total); return out; } static int debug_purgelist_open(struct inode *inode, struct file *file) { struct dlm_ctxt *dlm = inode->i_private; char *buf = NULL; buf = (char *) get_zeroed_page(GFP_NOFS); if (!buf) goto bail; i_size_write(inode, debug_purgelist_print(dlm, buf, PAGE_SIZE - 1)); file->private_data = buf; return 0; bail: return -ENOMEM; } static const struct file_operations debug_purgelist_fops = { .open = debug_purgelist_open, .release = debug_release, .read = debug_read, .llseek = generic_file_llseek, }; /* end - purge list funcs */ /* begin - debug mle funcs */ static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len) { struct dlm_master_list_entry *mle; struct hlist_head *bucket; int i, out = 0; unsigned long total = 0, longest = 0, bucket_count = 0; out += snprintf(buf + out, len - out, "Dumping MLEs for Domain: %s\n", dlm->name); spin_lock(&dlm->master_lock); for (i = 0; i < DLM_HASH_BUCKETS; i++) { bucket = dlm_master_hash(dlm, i); hlist_for_each_entry(mle, bucket, master_hash_node) { ++total; ++bucket_count; if (len - out < 200) continue; out += dump_mle(mle, buf + out, len - out); } longest = max(longest, bucket_count); bucket_count = 0; } spin_unlock(&dlm->master_lock); out += snprintf(buf + out, len - out, "Total: %lu, Longest: %lu\n", total, longest); return out; } static int debug_mle_open(struct inode *inode, struct file *file) { struct dlm_ctxt *dlm = inode->i_private; char *buf = NULL; buf = (char *) get_zeroed_page(GFP_NOFS); if (!buf) goto bail; i_size_write(inode, debug_mle_print(dlm, buf, PAGE_SIZE - 1)); file->private_data = buf; return 0; bail: return -ENOMEM; } static const struct file_operations debug_mle_fops = { .open = debug_mle_open, .release = debug_release, .read = debug_read, .llseek = generic_file_llseek, }; /* end - debug mle funcs */ /* begin - debug lockres funcs */ static int dump_lock(struct dlm_lock *lock, int list_type, char *buf, int len) { int out; #define DEBUG_LOCK_VERSION 1 spin_lock(&lock->spinlock); out = snprintf(buf, len, "LOCK:%d,%d,%d,%d,%d,%d:%lld,%d,%d,%d,%d,%d," "%d,%d,%d,%d\n", DEBUG_LOCK_VERSION, list_type, lock->ml.type, lock->ml.convert_type, lock->ml.node, dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), !list_empty(&lock->ast_list), !list_empty(&lock->bast_list), lock->ast_pending, lock->bast_pending, lock->convert_pending, lock->lock_pending, lock->cancel_pending, lock->unlock_pending, atomic_read(&lock->lock_refs.refcount)); spin_unlock(&lock->spinlock); return out; } static int dump_lockres(struct dlm_lock_resource *res, char *buf, int len) { struct dlm_lock *lock; int i; int out = 0; out += snprintf(buf + out, len - out, "NAME:"); out += stringify_lockname(res->lockname.name, res->lockname.len, buf + out, len - out); out += snprintf(buf + out, len - out, "\n"); #define DEBUG_LRES_VERSION 1 out += snprintf(buf + out, len - out, "LRES:%d,%d,%d,%ld,%d,%d,%d,%d,%d,%d,%d\n", DEBUG_LRES_VERSION, res->owner, res->state, res->last_used, !list_empty(&res->purge), !list_empty(&res->dirty), !list_empty(&res->recovering), res->inflight_locks, res->migration_pending, atomic_read(&res->asts_reserved), atomic_read(&res->refs.refcount)); /* refmap */ out += snprintf(buf + out, len - out, "RMAP:"); out += stringify_nodemap(res->refmap, O2NM_MAX_NODES, buf + out, len - out); out += snprintf(buf + out, len - out, "\n"); /* lvb */ out += snprintf(buf + out, len - out, "LVBX:"); for (i = 0; i < DLM_LVB_LEN; i++) out += snprintf(buf + out, len - out, "%02x", (unsigned char)res->lvb[i]); out += snprintf(buf + out, len - out, "\n"); /* granted */ list_for_each_entry(lock, &res->granted, list) out += dump_lock(lock, 0, buf + out, len - out); /* converting */ list_for_each_entry(lock, &res->converting, list) out += dump_lock(lock, 1, buf + out, len - out); /* blocked */ list_for_each_entry(lock, &res->blocked, list) out += dump_lock(lock, 2, buf + out, len - out); out += snprintf(buf + out, len - out, "\n"); return out; } static void *lockres_seq_start(struct seq_file *m, loff_t *pos) { struct debug_lockres *dl = m->private; struct dlm_ctxt *dlm = dl->dl_ctxt; struct dlm_lock_resource *oldres = dl->dl_res; struct dlm_lock_resource *res = NULL; struct list_head *track_list; spin_lock(&dlm->track_lock); if (oldres) track_list = &oldres->tracking; else { track_list = &dlm->tracking_list; if (list_empty(track_list)) { dl = NULL; spin_unlock(&dlm->track_lock); goto bail; } } list_for_each_entry(res, track_list, tracking) { if (&res->tracking == &dlm->tracking_list) res = NULL; else dlm_lockres_get(res); break; } spin_unlock(&dlm->track_lock); if (oldres) dlm_lockres_put(oldres); dl->dl_res = res; if (res) { spin_lock(&res->spinlock); dump_lockres(res, dl->dl_buf, dl->dl_len - 1); spin_unlock(&res->spinlock); } else dl = NULL; bail: /* passed to seq_show */ return dl; } static void lockres_seq_stop(struct seq_file *m, void *v) { } static void *lockres_seq_next(struct seq_file *m, void *v, loff_t *pos) { return NULL; } static int lockres_seq_show(struct seq_file *s, void *v) { struct debug_lockres *dl = (struct debug_lockres *)v; seq_printf(s, "%s", dl->dl_buf); return 0; } static const struct seq_operations debug_lockres_ops = { .start = lockres_seq_start, .stop = lockres_seq_stop, .next = lockres_seq_next, .show = lockres_seq_show, }; static int debug_lockres_open(struct inode *inode, struct file *file) { struct dlm_ctxt *dlm = inode->i_private; struct debug_lockres *dl; void *buf; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) goto bail; dl = __seq_open_private(file, &debug_lockres_ops, sizeof(*dl)); if (!dl) goto bailfree; dl->dl_len = PAGE_SIZE; dl->dl_buf = buf; dlm_grab(dlm); dl->dl_ctxt = dlm; return 0; bailfree: kfree(buf); bail: mlog_errno(-ENOMEM); return -ENOMEM; } static int debug_lockres_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; struct debug_lockres *dl = (struct debug_lockres *)seq->private; if (dl->dl_res) dlm_lockres_put(dl->dl_res); dlm_put(dl->dl_ctxt); kfree(dl->dl_buf); return seq_release_private(inode, file); } static const struct file_operations debug_lockres_fops = { .open = debug_lockres_open, .release = debug_lockres_release, .read = seq_read, .llseek = seq_lseek, }; /* end - debug lockres funcs */ /* begin - debug state funcs */ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len) { int out = 0; struct dlm_reco_node_data *node; char *state; int cur_mles = 0, tot_mles = 0; int i; spin_lock(&dlm->spinlock); switch (dlm->dlm_state) { case DLM_CTXT_NEW: state = "NEW"; break; case DLM_CTXT_JOINED: state = "JOINED"; break; case DLM_CTXT_IN_SHUTDOWN: state = "SHUTDOWN"; break; case DLM_CTXT_LEAVING: state = "LEAVING"; break; default: state = "UNKNOWN"; break; } /* Domain: xxxxxxxxxx Key: 0xdfbac769 */ out += snprintf(buf + out, len - out, "Domain: %s Key: 0x%08x Protocol: %d.%d\n", dlm->name, dlm->key, dlm->dlm_locking_proto.pv_major, dlm->dlm_locking_proto.pv_minor); /* Thread Pid: xxx Node: xxx State: xxxxx */ out += snprintf(buf + out, len - out, "Thread Pid: %d Node: %d State: %s\n", task_pid_nr(dlm->dlm_thread_task), dlm->node_num, state); /* Number of Joins: xxx Joining Node: xxx */ out += snprintf(buf + out, len - out, "Number of Joins: %d Joining Node: %d\n", dlm->num_joins, dlm->joining_node); /* Domain Map: xx xx xx */ out += snprintf(buf + out, len - out, "Domain Map: "); out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES, buf + out, len - out); out += snprintf(buf + out, len - out, "\n"); /* Exit Domain Map: xx xx xx */ out += snprintf(buf + out, len - out, "Exit Domain Map: "); out += stringify_nodemap(dlm->exit_domain_map, O2NM_MAX_NODES, buf + out, len - out); out += snprintf(buf + out, len - out, "\n"); /* Live Map: xx xx xx */ out += snprintf(buf + out, len - out, "Live Map: "); out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES, buf + out, len - out); out += snprintf(buf + out, len - out, "\n"); /* Lock Resources: xxx (xxx) */ out += snprintf(buf + out, len - out, "Lock Resources: %d (%d)\n", atomic_read(&dlm->res_cur_count), atomic_read(&dlm->res_tot_count)); for (i = 0; i < DLM_MLE_NUM_TYPES; ++i) tot_mles += atomic_read(&dlm->mle_tot_count[i]); for (i = 0; i < DLM_MLE_NUM_TYPES; ++i) cur_mles += atomic_read(&dlm->mle_cur_count[i]); /* MLEs: xxx (xxx) */ out += snprintf(buf + out, len - out, "MLEs: %d (%d)\n", cur_mles, tot_mles); /* Blocking: xxx (xxx) */ out += snprintf(buf + out, len - out, " Blocking: %d (%d)\n", atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]), atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK])); /* Mastery: xxx (xxx) */ out += snprintf(buf + out, len - out, " Mastery: %d (%d)\n", atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]), atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER])); /* Migration: xxx (xxx) */ out += snprintf(buf + out, len - out, " Migration: %d (%d)\n", atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]), atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION])); /* Lists: Dirty=Empty Purge=InUse PendingASTs=Empty ... */ out += snprintf(buf + out, len - out, "Lists: Dirty=%s Purge=%s PendingASTs=%s " "PendingBASTs=%s\n", (list_empty(&dlm->dirty_list) ? "Empty" : "InUse"), (list_empty(&dlm->purge_list) ? "Empty" : "InUse"), (list_empty(&dlm->pending_asts) ? "Empty" : "InUse"), (list_empty(&dlm->pending_basts) ? "Empty" : "InUse")); /* Purge Count: xxx Refs: xxx */ out += snprintf(buf + out, len - out, "Purge Count: %d Refs: %d\n", dlm->purge_count, atomic_read(&dlm->dlm_refs.refcount)); /* Dead Node: xxx */ out += snprintf(buf + out, len - out, "Dead Node: %d\n", dlm->reco.dead_node); /* What about DLM_RECO_STATE_FINALIZE? */ if (dlm->reco.state == DLM_RECO_STATE_ACTIVE) state = "ACTIVE"; else state = "INACTIVE"; /* Recovery Pid: xxxx Master: xxx State: xxxx */ out += snprintf(buf + out, len - out, "Recovery Pid: %d Master: %d State: %s\n", task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master, state); /* Recovery Map: xx xx */ out += snprintf(buf + out, len - out, "Recovery Map: "); out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES, buf + out, len - out); out += snprintf(buf + out, len - out, "\n"); /* Recovery Node State: */ out += snprintf(buf + out, len - out, "Recovery Node State:\n"); list_for_each_entry(node, &dlm->reco.node_data, list) { switch (node->state) { case DLM_RECO_NODE_DATA_INIT: state = "INIT"; break; case DLM_RECO_NODE_DATA_REQUESTING: state = "REQUESTING"; break; case DLM_RECO_NODE_DATA_DEAD: state = "DEAD"; break; case DLM_RECO_NODE_DATA_RECEIVING: state = "RECEIVING"; break; case DLM_RECO_NODE_DATA_REQUESTED: state = "REQUESTED"; break; case DLM_RECO_NODE_DATA_DONE: state = "DONE"; break; case DLM_RECO_NODE_DATA_FINALIZE_SENT: state = "FINALIZE-SENT"; break; default: state = "BAD"; break; } out += snprintf(buf + out, len - out, "\t%u - %s\n", node->node_num, state); } spin_unlock(&dlm->spinlock); return out; } static int debug_state_open(struct inode *inode, struct file *file) { struct dlm_ctxt *dlm = inode->i_private; char *buf = NULL; buf = (char *) get_zeroed_page(GFP_NOFS); if (!buf) goto bail; i_size_write(inode, debug_state_print(dlm, buf, PAGE_SIZE - 1)); file->private_data = buf; return 0; bail: return -ENOMEM; } static const struct file_operations debug_state_fops = { .open = debug_state_open, .release = debug_release, .read = debug_read, .llseek = generic_file_llseek, }; /* end - debug state funcs */ /* files in subroot */ int dlm_debug_init(struct dlm_ctxt *dlm) { struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt; /* for dumping dlm_ctxt */ dc->debug_state_dentry = debugfs_create_file(DLM_DEBUGFS_DLM_STATE, S_IFREG|S_IRUSR, dlm->dlm_debugfs_subroot, dlm, &debug_state_fops); if (!dc->debug_state_dentry) { mlog_errno(-ENOMEM); goto bail; } /* for dumping lockres */ dc->debug_lockres_dentry = debugfs_create_file(DLM_DEBUGFS_LOCKING_STATE, S_IFREG|S_IRUSR, dlm->dlm_debugfs_subroot, dlm, &debug_lockres_fops); if (!dc->debug_lockres_dentry) { mlog_errno(-ENOMEM); goto bail; } /* for dumping mles */ dc->debug_mle_dentry = debugfs_create_file(DLM_DEBUGFS_MLE_STATE, S_IFREG|S_IRUSR, dlm->dlm_debugfs_subroot, dlm, &debug_mle_fops); if (!dc->debug_mle_dentry) { mlog_errno(-ENOMEM); goto bail; } /* for dumping lockres on the purge list */ dc->debug_purgelist_dentry = debugfs_create_file(DLM_DEBUGFS_PURGE_LIST, S_IFREG|S_IRUSR, dlm->dlm_debugfs_subroot, dlm, &debug_purgelist_fops); if (!dc->debug_purgelist_dentry) { mlog_errno(-ENOMEM); goto bail; } dlm_debug_get(dc); return 0; bail: dlm_debug_shutdown(dlm); return -ENOMEM; } void dlm_debug_shutdown(struct dlm_ctxt *dlm) { struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt; if (dc) { debugfs_remove(dc->debug_purgelist_dentry); debugfs_remove(dc->debug_mle_dentry); debugfs_remove(dc->debug_lockres_dentry); debugfs_remove(dc->debug_state_dentry); dlm_debug_put(dc); } } /* subroot - domain dir */ int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm) { dlm->dlm_debugfs_subroot = debugfs_create_dir(dlm->name, dlm_debugfs_root); if (!dlm->dlm_debugfs_subroot) { mlog_errno(-ENOMEM); goto bail; } dlm->dlm_debug_ctxt = kzalloc(sizeof(struct dlm_debug_ctxt), GFP_KERNEL); if (!dlm->dlm_debug_ctxt) { mlog_errno(-ENOMEM); goto bail; } kref_init(&dlm->dlm_debug_ctxt->debug_refcnt); return 0; bail: dlm_destroy_debugfs_subroot(dlm); return -ENOMEM; } void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm) { debugfs_remove(dlm->dlm_debugfs_subroot); } /* debugfs root */ int dlm_create_debugfs_root(void) { dlm_debugfs_root = debugfs_create_dir(DLM_DEBUGFS_DIR, NULL); if (!dlm_debugfs_root) { mlog_errno(-ENOMEM); return -ENOMEM; } return 0; } void dlm_destroy_debugfs_root(void) { debugfs_remove(dlm_debugfs_root); } #endif /* CONFIG_DEBUG_FS */
gpl-2.0
Menpiko/Google-7.0-Oficial-Kcal-6p
drivers/usb/serial/siemens_mpi.c
2307
1241
/* * Siemens USB-MPI Serial USB driver * * Copyright (C) 2005 Thomas Hergenhahn <thomas.hergenhahn@suse.de> * Copyright (C) 2005,2008 Greg Kroah-Hartman <gregkh@suse.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define DRIVER_AUTHOR "Thomas Hergenhahn@web.de http://libnodave.sf.net" #define DRIVER_DESC "Driver for Siemens USB/MPI adapter" static const struct usb_device_id id_table[] = { /* Vendor and product id for 6ES7-972-0CB20-0XA0 */ { USB_DEVICE(0x908, 0x0004) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver siemens_usb_mpi_device = { .driver = { .owner = THIS_MODULE, .name = "siemens_mpi", }, .id_table = id_table, .num_ports = 1, }; static struct usb_serial_driver * const serial_drivers[] = { &siemens_usb_mpi_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
ryrzy/p75xx_ICS
arch/arm/mach-kirkwood/rd88f6281-setup.c
2819
2950
/* * arch/arm/mach-kirkwood/rd88f6281-setup.c * * Marvell RD-88F6281 Reference Board Setup * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/mtd/partitions.h> #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <linux/ethtool.h> #include <net/dsa.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/kirkwood.h> #include <plat/mvsdio.h> #include "common.h" #include "mpp.h" static struct mtd_partition rd88f6281_nand_parts[] = { { .name = "u-boot", .offset = 0, .size = SZ_1M }, { .name = "uImage", .offset = MTDPART_OFS_NXTBLK, .size = SZ_2M }, { .name = "root", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL }, }; static struct mv643xx_eth_platform_data rd88f6281_ge00_data = { .phy_addr = MV643XX_ETH_PHY_NONE, .speed = SPEED_1000, .duplex = DUPLEX_FULL, }; static struct dsa_chip_data rd88f6281_switch_chip_data = { .port_names[0] = "lan1", .port_names[1] = "lan2", .port_names[2] = "lan3", .port_names[3] = "lan4", .port_names[5] = "cpu", }; static struct dsa_platform_data rd88f6281_switch_plat_data = { .nr_chips = 1, .chip = &rd88f6281_switch_chip_data, }; static struct mv643xx_eth_platform_data rd88f6281_ge01_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(11), }; static struct mv_sata_platform_data rd88f6281_sata_data = { .n_ports = 2, }; static struct mvsdio_platform_data rd88f6281_mvsdio_data = { .gpio_card_detect = 28, }; static unsigned int rd88f6281_mpp_config[] __initdata = { MPP28_GPIO, 0 }; static void __init rd88f6281_init(void) { u32 dev, rev; /* * Basic setup. Needs to be called early. */ kirkwood_init(); kirkwood_mpp_conf(rd88f6281_mpp_config); kirkwood_nand_init(ARRAY_AND_SIZE(rd88f6281_nand_parts), 25); kirkwood_ehci_init(); kirkwood_ge00_init(&rd88f6281_ge00_data); kirkwood_pcie_id(&dev, &rev); if (rev == MV88F6281_REV_A0) { rd88f6281_switch_chip_data.sw_addr = 10; kirkwood_ge01_init(&rd88f6281_ge01_data); } else { rd88f6281_switch_chip_data.port_names[4] = "wan"; } kirkwood_ge00_switch_init(&rd88f6281_switch_plat_data, NO_IRQ); kirkwood_sata_init(&rd88f6281_sata_data); kirkwood_sdio_init(&rd88f6281_mvsdio_data); kirkwood_uart0_init(); } static int __init rd88f6281_pci_init(void) { if (machine_is_rd88f6281()) kirkwood_pcie_init(KW_PCIE0); return 0; } subsys_initcall(rd88f6281_pci_init); MACHINE_START(RD88F6281, "Marvell RD-88F6281 Reference Board") /* Maintainer: Saeed Bishara <saeed@marvell.com> */ .boot_params = 0x00000100, .init_machine = rd88f6281_init, .map_io = kirkwood_map_io, .init_early = kirkwood_init_early, .init_irq = kirkwood_init_irq, .timer = &kirkwood_timer, MACHINE_END
gpl-2.0
robacklin/uclinux-linux
arch/arm/mach-ixp4xx/ixdp425-pci.c
5379
1914
/* * arch/arm/mach-ixp4xx/ixdp425-pci.c * * IXDP425 board-level PCI initialization * * Copyright (C) 2002 Intel Corporation. * Copyright (C) 2003-2004 MontaVista Software, Inc. * * Maintainer: Deepak Saxena <dsaxena@plexity.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/delay.h> #include <asm/mach/pci.h> #include <asm/irq.h> #include <mach/hardware.h> #include <asm/mach-types.h> #define MAX_DEV 4 #define IRQ_LINES 4 /* PCI controller GPIO to IRQ pin mappings */ #define INTA 11 #define INTB 10 #define INTC 9 #define INTD 8 void __init ixdp425_pci_preinit(void) { irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW); ixp4xx_pci_preinit(); } static int __init ixdp425_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static int pci_irq_table[IRQ_LINES] = { IXP4XX_GPIO_IRQ(INTA), IXP4XX_GPIO_IRQ(INTB), IXP4XX_GPIO_IRQ(INTC), IXP4XX_GPIO_IRQ(INTD) }; if (slot >= 1 && slot <= MAX_DEV && pin >= 1 && pin <= IRQ_LINES) return pci_irq_table[(slot + pin - 2) % 4]; return -1; } struct hw_pci ixdp425_pci __initdata = { .nr_controllers = 1, .preinit = ixdp425_pci_preinit, .swizzle = pci_std_swizzle, .setup = ixp4xx_setup, .scan = ixp4xx_scan_bus, .map_irq = ixdp425_map_irq, }; int __init ixdp425_pci_init(void) { if (machine_is_ixdp425() || machine_is_ixcdp1100() || machine_is_ixdp465() || machine_is_kixrp435()) pci_common_init(&ixdp425_pci); return 0; } subsys_initcall(ixdp425_pci_init);
gpl-2.0
chillwater/Padfone-A66-Jelly-Bean
drivers/net/phy/cicada.c
8195
4138
/* * drivers/net/phy/cicada.c * * Driver for Cicada PHYs * * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> /* Cicada Extended Control Register 1 */ #define MII_CIS8201_EXT_CON1 0x17 #define MII_CIS8201_EXTCON1_INIT 0x0000 /* Cicada Interrupt Mask Register */ #define MII_CIS8201_IMASK 0x19 #define MII_CIS8201_IMASK_IEN 0x8000 #define MII_CIS8201_IMASK_SPEED 0x4000 #define MII_CIS8201_IMASK_LINK 0x2000 #define MII_CIS8201_IMASK_DUPLEX 0x1000 #define MII_CIS8201_IMASK_MASK 0xf000 /* Cicada Interrupt Status Register */ #define MII_CIS8201_ISTAT 0x1a #define MII_CIS8201_ISTAT_STATUS 0x8000 #define MII_CIS8201_ISTAT_SPEED 0x4000 #define MII_CIS8201_ISTAT_LINK 0x2000 #define MII_CIS8201_ISTAT_DUPLEX 0x1000 /* Cicada Auxiliary Control/Status Register */ #define MII_CIS8201_AUX_CONSTAT 0x1c #define MII_CIS8201_AUXCONSTAT_INIT 0x0004 #define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020 #define MII_CIS8201_AUXCONSTAT_SPEED 0x0018 #define MII_CIS8201_AUXCONSTAT_GBIT 0x0010 #define MII_CIS8201_AUXCONSTAT_100 0x0008 MODULE_DESCRIPTION("Cicadia PHY driver"); MODULE_AUTHOR("Andy Fleming"); MODULE_LICENSE("GPL"); static int cis820x_config_init(struct phy_device *phydev) { int err; err = phy_write(phydev, MII_CIS8201_AUX_CONSTAT, MII_CIS8201_AUXCONSTAT_INIT); if (err < 0) return err; err = phy_write(phydev, MII_CIS8201_EXT_CON1, MII_CIS8201_EXTCON1_INIT); return err; } static int cis820x_ack_interrupt(struct phy_device *phydev) { int err = phy_read(phydev, MII_CIS8201_ISTAT); return (err < 0) ? err : 0; } static int cis820x_config_intr(struct phy_device *phydev) { int err; if(phydev->interrupts == PHY_INTERRUPT_ENABLED) err = phy_write(phydev, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK); else err = phy_write(phydev, MII_CIS8201_IMASK, 0); return err; } /* Cicada 8201, a.k.a Vitesse VSC8201 */ static struct phy_driver cis8201_driver = { .phy_id = 0x000fc410, .name = "Cicada Cis8201", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = &cis820x_config_init, .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, .driver = { .owner = THIS_MODULE,}, }; /* Cicada 8204 */ static struct phy_driver cis8204_driver = { .phy_id = 0x000fc440, .name = "Cicada Cis8204", .phy_id_mask = 0x000fffc0, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = &cis820x_config_init, .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, .driver = { .owner = THIS_MODULE,}, }; static int __init cicada_init(void) { int ret; ret = phy_driver_register(&cis8204_driver); if (ret) goto err1; ret = phy_driver_register(&cis8201_driver); if (ret) goto err2; return 0; err2: phy_driver_unregister(&cis8204_driver); err1: return ret; } static void __exit cicada_exit(void) { phy_driver_unregister(&cis8204_driver); phy_driver_unregister(&cis8201_driver); } module_init(cicada_init); module_exit(cicada_exit); static struct mdio_device_id __maybe_unused cicada_tbl[] = { { 0x000fc410, 0x000ffff0 }, { 0x000fc440, 0x000fffc0 }, { } }; MODULE_DEVICE_TABLE(mdio, cicada_tbl);
gpl-2.0
flar2/bulletproof-m7-5.0
drivers/tty/ipwireless/hardware.c
8195
46474
/* * IPWireless 3G PCMCIA Network Driver * * Original code * by Stephen Blackheath <stephen@blacksapphire.com>, * Ben Martel <benm@symmetric.co.nz> * * Copyrighted as follows: * Copyright (C) 2004 by Symmetric Systems Ltd (NZ) * * Various driver changes and rewrites, port to new kernels * Copyright (C) 2006-2007 Jiri Kosina * * Misc code cleanups and updates * Copyright (C) 2007 David Sterba */ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/slab.h> #include "hardware.h" #include "setup_protocol.h" #include "network.h" #include "main.h" static void ipw_send_setup_packet(struct ipw_hardware *hw); static void handle_received_SETUP_packet(struct ipw_hardware *ipw, unsigned int address, const unsigned char *data, int len, int is_last); static void ipwireless_setup_timer(unsigned long data); static void handle_received_CTRL_packet(struct ipw_hardware *hw, unsigned int channel_idx, const unsigned char *data, int len); /*#define TIMING_DIAGNOSTICS*/ #ifdef TIMING_DIAGNOSTICS static struct timing_stats { unsigned long last_report_time; unsigned long read_time; unsigned long write_time; unsigned long read_bytes; unsigned long write_bytes; unsigned long start_time; }; static void start_timing(void) { timing_stats.start_time = jiffies; } static void end_read_timing(unsigned length) { timing_stats.read_time += (jiffies - start_time); timing_stats.read_bytes += length + 2; report_timing(); } static void end_write_timing(unsigned length) { timing_stats.write_time += (jiffies - start_time); timing_stats.write_bytes += length + 2; report_timing(); } static void report_timing(void) { unsigned long since = jiffies - timing_stats.last_report_time; /* If it's been more than one second... */ if (since >= HZ) { int first = (timing_stats.last_report_time == 0); timing_stats.last_report_time = jiffies; if (!first) printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": %u us elapsed - read %lu bytes in %u us, wrote %lu bytes in %u us\n", jiffies_to_usecs(since), timing_stats.read_bytes, jiffies_to_usecs(timing_stats.read_time), timing_stats.write_bytes, jiffies_to_usecs(timing_stats.write_time)); timing_stats.read_time = 0; timing_stats.write_time = 0; timing_stats.read_bytes = 0; timing_stats.write_bytes = 0; } } #else static void start_timing(void) { } static void end_read_timing(unsigned length) { } static void end_write_timing(unsigned length) { } #endif /* Imported IPW definitions */ #define LL_MTU_V1 318 #define LL_MTU_V2 250 #define LL_MTU_MAX (LL_MTU_V1 > LL_MTU_V2 ? LL_MTU_V1 : LL_MTU_V2) #define PRIO_DATA 2 #define PRIO_CTRL 1 #define PRIO_SETUP 0 /* Addresses */ #define ADDR_SETUP_PROT 0 /* Protocol ids */ enum { /* Identifier for the Com Data protocol */ TL_PROTOCOLID_COM_DATA = 0, /* Identifier for the Com Control protocol */ TL_PROTOCOLID_COM_CTRL = 1, /* Identifier for the Setup protocol */ TL_PROTOCOLID_SETUP = 2 }; /* Number of bytes in NL packet header (cannot do * sizeof(nl_packet_header) since it's a bitfield) */ #define NL_FIRST_PACKET_HEADER_SIZE 3 /* Number of bytes in NL packet header (cannot do * sizeof(nl_packet_header) since it's a bitfield) */ #define NL_FOLLOWING_PACKET_HEADER_SIZE 1 struct nl_first_packet_header { unsigned char protocol:3; unsigned char address:3; unsigned char packet_rank:2; unsigned char length_lsb; unsigned char length_msb; }; struct nl_packet_header { unsigned char protocol:3; unsigned char address:3; unsigned char packet_rank:2; }; /* Value of 'packet_rank' above */ #define NL_INTERMEDIATE_PACKET 0x0 #define NL_LAST_PACKET 0x1 #define NL_FIRST_PACKET 0x2 union nl_packet { /* Network packet header of the first packet (a special case) */ struct nl_first_packet_header hdr_first; /* Network packet header of the following packets (if any) */ struct nl_packet_header hdr; /* Complete network packet (header + data) */ unsigned char rawpkt[LL_MTU_MAX]; } __attribute__ ((__packed__)); #define HW_VERSION_UNKNOWN -1 #define HW_VERSION_1 1 #define HW_VERSION_2 2 /* IPW I/O ports */ #define IOIER 0x00 /* Interrupt Enable Register */ #define IOIR 0x02 /* Interrupt Source/ACK register */ #define IODCR 0x04 /* Data Control Register */ #define IODRR 0x06 /* Data Read Register */ #define IODWR 0x08 /* Data Write Register */ #define IOESR 0x0A /* Embedded Driver Status Register */ #define IORXR 0x0C /* Rx Fifo Register (Host to Embedded) */ #define IOTXR 0x0E /* Tx Fifo Register (Embedded to Host) */ /* I/O ports and bit definitions for version 1 of the hardware */ /* IER bits*/ #define IER_RXENABLED 0x1 #define IER_TXENABLED 0x2 /* ISR bits */ #define IR_RXINTR 0x1 #define IR_TXINTR 0x2 /* DCR bits */ #define DCR_RXDONE 0x1 #define DCR_TXDONE 0x2 #define DCR_RXRESET 0x4 #define DCR_TXRESET 0x8 /* I/O ports and bit definitions for version 2 of the hardware */ struct MEMCCR { unsigned short reg_config_option; /* PCCOR: Configuration Option Register */ unsigned short reg_config_and_status; /* PCCSR: Configuration and Status Register */ unsigned short reg_pin_replacement; /* PCPRR: Pin Replacemant Register */ unsigned short reg_socket_and_copy; /* PCSCR: Socket and Copy Register */ unsigned short reg_ext_status; /* PCESR: Extendend Status Register */ unsigned short reg_io_base; /* PCIOB: I/O Base Register */ }; struct MEMINFREG { unsigned short memreg_tx_old; /* TX Register (R/W) */ unsigned short pad1; unsigned short memreg_rx_done; /* RXDone Register (R/W) */ unsigned short pad2; unsigned short memreg_rx; /* RX Register (R/W) */ unsigned short pad3; unsigned short memreg_pc_interrupt_ack; /* PC intr Ack Register (W) */ unsigned short pad4; unsigned long memreg_card_present;/* Mask for Host to check (R) for * CARD_PRESENT_VALUE */ unsigned short memreg_tx_new; /* TX2 (new) Register (R/W) */ }; #define CARD_PRESENT_VALUE (0xBEEFCAFEUL) #define MEMTX_TX 0x0001 #define MEMRX_RX 0x0001 #define MEMRX_RX_DONE 0x0001 #define MEMRX_PCINTACKK 0x0001 #define NL_NUM_OF_PRIORITIES 3 #define NL_NUM_OF_PROTOCOLS 3 #define NL_NUM_OF_ADDRESSES NO_OF_IPW_CHANNELS struct ipw_hardware { unsigned int base_port; short hw_version; unsigned short ll_mtu; spinlock_t lock; int initializing; int init_loops; struct timer_list setup_timer; /* Flag if hw is ready to send next packet */ int tx_ready; /* Count of pending packets to be sent */ int tx_queued; struct list_head tx_queue[NL_NUM_OF_PRIORITIES]; int rx_bytes_queued; struct list_head rx_queue; /* Pool of rx_packet structures that are not currently used. */ struct list_head rx_pool; int rx_pool_size; /* True if reception of data is blocked while userspace processes it. */ int blocking_rx; /* True if there is RX data ready on the hardware. */ int rx_ready; unsigned short last_memtx_serial; /* * Newer versions of the V2 card firmware send serial numbers in the * MemTX register. 'serial_number_detected' is set true when we detect * a non-zero serial number (indicating the new firmware). Thereafter, * the driver can safely ignore the Timer Recovery re-sends to avoid * out-of-sync problems. */ int serial_number_detected; struct work_struct work_rx; /* True if we are to send the set-up data to the hardware. */ int to_setup; /* Card has been removed */ int removed; /* Saved irq value when we disable the interrupt. */ int irq; /* True if this driver is shutting down. */ int shutting_down; /* Modem control lines */ unsigned int control_lines[NL_NUM_OF_ADDRESSES]; struct ipw_rx_packet *packet_assembler[NL_NUM_OF_ADDRESSES]; struct tasklet_struct tasklet; /* The handle for the network layer, for the sending of events to it. */ struct ipw_network *network; struct MEMINFREG __iomem *memory_info_regs; struct MEMCCR __iomem *memregs_CCR; void (*reboot_callback) (void *data); void *reboot_callback_data; unsigned short __iomem *memreg_tx; }; /* * Packet info structure for tx packets. * Note: not all the fields defined here are required for all protocols */ struct ipw_tx_packet { struct list_head queue; /* channel idx + 1 */ unsigned char dest_addr; /* SETUP, CTRL or DATA */ unsigned char protocol; /* Length of data block, which starts at the end of this structure */ unsigned short length; /* Sending state */ /* Offset of where we've sent up to so far */ unsigned long offset; /* Count of packet fragments, starting at 0 */ int fragment_count; /* Called after packet is sent and before is freed */ void (*packet_callback) (void *cb_data, unsigned int packet_length); void *callback_data; }; /* Signals from DTE */ #define COMCTRL_RTS 0 #define COMCTRL_DTR 1 /* Signals from DCE */ #define COMCTRL_CTS 2 #define COMCTRL_DCD 3 #define COMCTRL_DSR 4 #define COMCTRL_RI 5 struct ipw_control_packet_body { /* DTE signal or DCE signal */ unsigned char sig_no; /* 0: set signal, 1: clear signal */ unsigned char value; } __attribute__ ((__packed__)); struct ipw_control_packet { struct ipw_tx_packet header; struct ipw_control_packet_body body; }; struct ipw_rx_packet { struct list_head queue; unsigned int capacity; unsigned int length; unsigned int protocol; unsigned int channel_idx; }; static char *data_type(const unsigned char *buf, unsigned length) { struct nl_packet_header *hdr = (struct nl_packet_header *) buf; if (length == 0) return " "; if (hdr->packet_rank & NL_FIRST_PACKET) { switch (hdr->protocol) { case TL_PROTOCOLID_COM_DATA: return "DATA "; case TL_PROTOCOLID_COM_CTRL: return "CTRL "; case TL_PROTOCOLID_SETUP: return "SETUP"; default: return "???? "; } } else return " "; } #define DUMP_MAX_BYTES 64 static void dump_data_bytes(const char *type, const unsigned char *data, unsigned length) { char prefix[56]; sprintf(prefix, IPWIRELESS_PCCARD_NAME ": %s %s ", type, data_type(data, length)); print_hex_dump_bytes(prefix, 0, (void *)data, length < DUMP_MAX_BYTES ? length : DUMP_MAX_BYTES); } static void swap_packet_bitfield_to_le(unsigned char *data) { #ifdef __BIG_ENDIAN_BITFIELD unsigned char tmp = *data, ret = 0; /* * transform bits from aa.bbb.ccc to ccc.bbb.aa */ ret |= tmp & 0xc0 >> 6; ret |= tmp & 0x38 >> 1; ret |= tmp & 0x07 << 5; *data = ret & 0xff; #endif } static void swap_packet_bitfield_from_le(unsigned char *data) { #ifdef __BIG_ENDIAN_BITFIELD unsigned char tmp = *data, ret = 0; /* * transform bits from ccc.bbb.aa to aa.bbb.ccc */ ret |= tmp & 0xe0 >> 5; ret |= tmp & 0x1c << 1; ret |= tmp & 0x03 << 6; *data = ret & 0xff; #endif } static void do_send_fragment(struct ipw_hardware *hw, unsigned char *data, unsigned length) { unsigned i; unsigned long flags; start_timing(); BUG_ON(length > hw->ll_mtu); if (ipwireless_debug) dump_data_bytes("send", data, length); spin_lock_irqsave(&hw->lock, flags); hw->tx_ready = 0; swap_packet_bitfield_to_le(data); if (hw->hw_version == HW_VERSION_1) { outw((unsigned short) length, hw->base_port + IODWR); for (i = 0; i < length; i += 2) { unsigned short d = data[i]; __le16 raw_data; if (i + 1 < length) d |= data[i + 1] << 8; raw_data = cpu_to_le16(d); outw(raw_data, hw->base_port + IODWR); } outw(DCR_TXDONE, hw->base_port + IODCR); } else if (hw->hw_version == HW_VERSION_2) { outw((unsigned short) length, hw->base_port); for (i = 0; i < length; i += 2) { unsigned short d = data[i]; __le16 raw_data; if (i + 1 < length) d |= data[i + 1] << 8; raw_data = cpu_to_le16(d); outw(raw_data, hw->base_port); } while ((i & 3) != 2) { outw((unsigned short) 0xDEAD, hw->base_port); i += 2; } writew(MEMRX_RX, &hw->memory_info_regs->memreg_rx); } spin_unlock_irqrestore(&hw->lock, flags); end_write_timing(length); } static void do_send_packet(struct ipw_hardware *hw, struct ipw_tx_packet *packet) { unsigned short fragment_data_len; unsigned short data_left = packet->length - packet->offset; unsigned short header_size; union nl_packet pkt; header_size = (packet->fragment_count == 0) ? NL_FIRST_PACKET_HEADER_SIZE : NL_FOLLOWING_PACKET_HEADER_SIZE; fragment_data_len = hw->ll_mtu - header_size; if (data_left < fragment_data_len) fragment_data_len = data_left; /* * hdr_first is now in machine bitfield order, which will be swapped * to le just before it goes to hw */ pkt.hdr_first.protocol = packet->protocol; pkt.hdr_first.address = packet->dest_addr; pkt.hdr_first.packet_rank = 0; /* First packet? */ if (packet->fragment_count == 0) { pkt.hdr_first.packet_rank |= NL_FIRST_PACKET; pkt.hdr_first.length_lsb = (unsigned char) packet->length; pkt.hdr_first.length_msb = (unsigned char) (packet->length >> 8); } memcpy(pkt.rawpkt + header_size, ((unsigned char *) packet) + sizeof(struct ipw_tx_packet) + packet->offset, fragment_data_len); packet->offset += fragment_data_len; packet->fragment_count++; /* Last packet? (May also be first packet.) */ if (packet->offset == packet->length) pkt.hdr_first.packet_rank |= NL_LAST_PACKET; do_send_fragment(hw, pkt.rawpkt, header_size + fragment_data_len); /* If this packet has unsent data, then re-queue it. */ if (packet->offset < packet->length) { /* * Re-queue it at the head of the highest priority queue so * it goes before all other packets */ unsigned long flags; spin_lock_irqsave(&hw->lock, flags); list_add(&packet->queue, &hw->tx_queue[0]); hw->tx_queued++; spin_unlock_irqrestore(&hw->lock, flags); } else { if (packet->packet_callback) packet->packet_callback(packet->callback_data, packet->length); kfree(packet); } } static void ipw_setup_hardware(struct ipw_hardware *hw) { unsigned long flags; spin_lock_irqsave(&hw->lock, flags); if (hw->hw_version == HW_VERSION_1) { /* Reset RX FIFO */ outw(DCR_RXRESET, hw->base_port + IODCR); /* SB: Reset TX FIFO */ outw(DCR_TXRESET, hw->base_port + IODCR); /* Enable TX and RX interrupts. */ outw(IER_TXENABLED | IER_RXENABLED, hw->base_port + IOIER); } else { /* * Set INTRACK bit (bit 0), which means we must explicitly * acknowledge interrupts by clearing bit 2 of reg_config_and_status. */ unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status); csr |= 1; writew(csr, &hw->memregs_CCR->reg_config_and_status); } spin_unlock_irqrestore(&hw->lock, flags); } /* * If 'packet' is NULL, then this function allocates a new packet, setting its * length to 0 and ensuring it has the specified minimum amount of free space. * * If 'packet' is not NULL, then this function enlarges it if it doesn't * have the specified minimum amount of free space. * */ static struct ipw_rx_packet *pool_allocate(struct ipw_hardware *hw, struct ipw_rx_packet *packet, int minimum_free_space) { if (!packet) { unsigned long flags; spin_lock_irqsave(&hw->lock, flags); if (!list_empty(&hw->rx_pool)) { packet = list_first_entry(&hw->rx_pool, struct ipw_rx_packet, queue); hw->rx_pool_size--; spin_unlock_irqrestore(&hw->lock, flags); list_del(&packet->queue); } else { const int min_capacity = ipwireless_ppp_mru(hw->network) + 2; int new_capacity; spin_unlock_irqrestore(&hw->lock, flags); new_capacity = (minimum_free_space > min_capacity ? minimum_free_space : min_capacity); packet = kmalloc(sizeof(struct ipw_rx_packet) + new_capacity, GFP_ATOMIC); if (!packet) return NULL; packet->capacity = new_capacity; } packet->length = 0; } if (packet->length + minimum_free_space > packet->capacity) { struct ipw_rx_packet *old_packet = packet; packet = kmalloc(sizeof(struct ipw_rx_packet) + old_packet->length + minimum_free_space, GFP_ATOMIC); if (!packet) { kfree(old_packet); return NULL; } memcpy(packet, old_packet, sizeof(struct ipw_rx_packet) + old_packet->length); packet->capacity = old_packet->length + minimum_free_space; kfree(old_packet); } return packet; } static void pool_free(struct ipw_hardware *hw, struct ipw_rx_packet *packet) { if (hw->rx_pool_size > 6) kfree(packet); else { hw->rx_pool_size++; list_add(&packet->queue, &hw->rx_pool); } } static void queue_received_packet(struct ipw_hardware *hw, unsigned int protocol, unsigned int address, const unsigned char *data, int length, int is_last) { unsigned int channel_idx = address - 1; struct ipw_rx_packet *packet = NULL; unsigned long flags; /* Discard packet if channel index is out of range. */ if (channel_idx >= NL_NUM_OF_ADDRESSES) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": data packet has bad address %u\n", address); return; } /* * ->packet_assembler is safe to touch unlocked, this is the only place */ if (protocol == TL_PROTOCOLID_COM_DATA) { struct ipw_rx_packet **assem = &hw->packet_assembler[channel_idx]; /* * Create a new packet, or assembler already contains one * enlarge it by 'length' bytes. */ (*assem) = pool_allocate(hw, *assem, length); if (!(*assem)) { printk(KERN_ERR IPWIRELESS_PCCARD_NAME ": no memory for incomming data packet, dropped!\n"); return; } (*assem)->protocol = protocol; (*assem)->channel_idx = channel_idx; /* Append this packet data onto existing data. */ memcpy((unsigned char *)(*assem) + sizeof(struct ipw_rx_packet) + (*assem)->length, data, length); (*assem)->length += length; if (is_last) { packet = *assem; *assem = NULL; /* Count queued DATA bytes only */ spin_lock_irqsave(&hw->lock, flags); hw->rx_bytes_queued += packet->length; spin_unlock_irqrestore(&hw->lock, flags); } } else { /* If it's a CTRL packet, don't assemble, just queue it. */ packet = pool_allocate(hw, NULL, length); if (!packet) { printk(KERN_ERR IPWIRELESS_PCCARD_NAME ": no memory for incomming ctrl packet, dropped!\n"); return; } packet->protocol = protocol; packet->channel_idx = channel_idx; memcpy((unsigned char *)packet + sizeof(struct ipw_rx_packet), data, length); packet->length = length; } /* * If this is the last packet, then send the assembled packet on to the * network layer. */ if (packet) { spin_lock_irqsave(&hw->lock, flags); list_add_tail(&packet->queue, &hw->rx_queue); /* Block reception of incoming packets if queue is full. */ hw->blocking_rx = (hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE); spin_unlock_irqrestore(&hw->lock, flags); schedule_work(&hw->work_rx); } } /* * Workqueue callback */ static void ipw_receive_data_work(struct work_struct *work_rx) { struct ipw_hardware *hw = container_of(work_rx, struct ipw_hardware, work_rx); unsigned long flags; spin_lock_irqsave(&hw->lock, flags); while (!list_empty(&hw->rx_queue)) { struct ipw_rx_packet *packet = list_first_entry(&hw->rx_queue, struct ipw_rx_packet, queue); if (hw->shutting_down) break; list_del(&packet->queue); /* * Note: ipwireless_network_packet_received must be called in a * process context (i.e. via schedule_work) because the tty * output code can sleep in the tty_flip_buffer_push call. */ if (packet->protocol == TL_PROTOCOLID_COM_DATA) { if (hw->network != NULL) { /* If the network hasn't been disconnected. */ spin_unlock_irqrestore(&hw->lock, flags); /* * This must run unlocked due to tty processing * and mutex locking */ ipwireless_network_packet_received( hw->network, packet->channel_idx, (unsigned char *)packet + sizeof(struct ipw_rx_packet), packet->length); spin_lock_irqsave(&hw->lock, flags); } /* Count queued DATA bytes only */ hw->rx_bytes_queued -= packet->length; } else { /* * This is safe to be called locked, callchain does * not block */ handle_received_CTRL_packet(hw, packet->channel_idx, (unsigned char *)packet + sizeof(struct ipw_rx_packet), packet->length); } pool_free(hw, packet); /* * Unblock reception of incoming packets if queue is no longer * full. */ hw->blocking_rx = hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE; if (hw->shutting_down) break; } spin_unlock_irqrestore(&hw->lock, flags); } static void handle_received_CTRL_packet(struct ipw_hardware *hw, unsigned int channel_idx, const unsigned char *data, int len) { const struct ipw_control_packet_body *body = (const struct ipw_control_packet_body *) data; unsigned int changed_mask; if (len != sizeof(struct ipw_control_packet_body)) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": control packet was %d bytes - wrong size!\n", len); return; } switch (body->sig_no) { case COMCTRL_CTS: changed_mask = IPW_CONTROL_LINE_CTS; break; case COMCTRL_DCD: changed_mask = IPW_CONTROL_LINE_DCD; break; case COMCTRL_DSR: changed_mask = IPW_CONTROL_LINE_DSR; break; case COMCTRL_RI: changed_mask = IPW_CONTROL_LINE_RI; break; default: changed_mask = 0; } if (changed_mask != 0) { if (body->value) hw->control_lines[channel_idx] |= changed_mask; else hw->control_lines[channel_idx] &= ~changed_mask; if (hw->network) ipwireless_network_notify_control_line_change( hw->network, channel_idx, hw->control_lines[channel_idx], changed_mask); } } static void handle_received_packet(struct ipw_hardware *hw, const union nl_packet *packet, unsigned short len) { unsigned int protocol = packet->hdr.protocol; unsigned int address = packet->hdr.address; unsigned int header_length; const unsigned char *data; unsigned int data_len; int is_last = packet->hdr.packet_rank & NL_LAST_PACKET; if (packet->hdr.packet_rank & NL_FIRST_PACKET) header_length = NL_FIRST_PACKET_HEADER_SIZE; else header_length = NL_FOLLOWING_PACKET_HEADER_SIZE; data = packet->rawpkt + header_length; data_len = len - header_length; switch (protocol) { case TL_PROTOCOLID_COM_DATA: case TL_PROTOCOLID_COM_CTRL: queue_received_packet(hw, protocol, address, data, data_len, is_last); break; case TL_PROTOCOLID_SETUP: handle_received_SETUP_packet(hw, address, data, data_len, is_last); break; } } static void acknowledge_data_read(struct ipw_hardware *hw) { if (hw->hw_version == HW_VERSION_1) outw(DCR_RXDONE, hw->base_port + IODCR); else writew(MEMRX_PCINTACKK, &hw->memory_info_regs->memreg_pc_interrupt_ack); } /* * Retrieve a packet from the IPW hardware. */ static void do_receive_packet(struct ipw_hardware *hw) { unsigned len; unsigned i; unsigned char pkt[LL_MTU_MAX]; start_timing(); if (hw->hw_version == HW_VERSION_1) { len = inw(hw->base_port + IODRR); if (len > hw->ll_mtu) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": received a packet of %u bytes - longer than the MTU!\n", len); outw(DCR_RXDONE | DCR_RXRESET, hw->base_port + IODCR); return; } for (i = 0; i < len; i += 2) { __le16 raw_data = inw(hw->base_port + IODRR); unsigned short data = le16_to_cpu(raw_data); pkt[i] = (unsigned char) data; pkt[i + 1] = (unsigned char) (data >> 8); } } else { len = inw(hw->base_port); if (len > hw->ll_mtu) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": received a packet of %u bytes - longer than the MTU!\n", len); writew(MEMRX_PCINTACKK, &hw->memory_info_regs->memreg_pc_interrupt_ack); return; } for (i = 0; i < len; i += 2) { __le16 raw_data = inw(hw->base_port); unsigned short data = le16_to_cpu(raw_data); pkt[i] = (unsigned char) data; pkt[i + 1] = (unsigned char) (data >> 8); } while ((i & 3) != 2) { inw(hw->base_port); i += 2; } } acknowledge_data_read(hw); swap_packet_bitfield_from_le(pkt); if (ipwireless_debug) dump_data_bytes("recv", pkt, len); handle_received_packet(hw, (union nl_packet *) pkt, len); end_read_timing(len); } static int get_current_packet_priority(struct ipw_hardware *hw) { /* * If we're initializing, don't send anything of higher priority than * PRIO_SETUP. The network layer therefore need not care about * hardware initialization - any of its stuff will simply be queued * until setup is complete. */ return (hw->to_setup || hw->initializing ? PRIO_SETUP + 1 : NL_NUM_OF_PRIORITIES); } /* * return 1 if something has been received from hw */ static int get_packets_from_hw(struct ipw_hardware *hw) { int received = 0; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); while (hw->rx_ready && !hw->blocking_rx) { received = 1; hw->rx_ready--; spin_unlock_irqrestore(&hw->lock, flags); do_receive_packet(hw); spin_lock_irqsave(&hw->lock, flags); } spin_unlock_irqrestore(&hw->lock, flags); return received; } /* * Send pending packet up to given priority, prioritize SETUP data until * hardware is fully setup. * * return 1 if more packets can be sent */ static int send_pending_packet(struct ipw_hardware *hw, int priority_limit) { int more_to_send = 0; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); if (hw->tx_queued && hw->tx_ready) { int priority; struct ipw_tx_packet *packet = NULL; /* Pick a packet */ for (priority = 0; priority < priority_limit; priority++) { if (!list_empty(&hw->tx_queue[priority])) { packet = list_first_entry( &hw->tx_queue[priority], struct ipw_tx_packet, queue); hw->tx_queued--; list_del(&packet->queue); break; } } if (!packet) { hw->tx_queued = 0; spin_unlock_irqrestore(&hw->lock, flags); return 0; } spin_unlock_irqrestore(&hw->lock, flags); /* Send */ do_send_packet(hw, packet); /* Check if more to send */ spin_lock_irqsave(&hw->lock, flags); for (priority = 0; priority < priority_limit; priority++) if (!list_empty(&hw->tx_queue[priority])) { more_to_send = 1; break; } if (!more_to_send) hw->tx_queued = 0; } spin_unlock_irqrestore(&hw->lock, flags); return more_to_send; } /* * Send and receive all queued packets. */ static void ipwireless_do_tasklet(unsigned long hw_) { struct ipw_hardware *hw = (struct ipw_hardware *) hw_; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); if (hw->shutting_down) { spin_unlock_irqrestore(&hw->lock, flags); return; } if (hw->to_setup == 1) { /* * Initial setup data sent to hardware */ hw->to_setup = 2; spin_unlock_irqrestore(&hw->lock, flags); ipw_setup_hardware(hw); ipw_send_setup_packet(hw); send_pending_packet(hw, PRIO_SETUP + 1); get_packets_from_hw(hw); } else { int priority_limit = get_current_packet_priority(hw); int again; spin_unlock_irqrestore(&hw->lock, flags); do { again = send_pending_packet(hw, priority_limit); again |= get_packets_from_hw(hw); } while (again); } } /* * return true if the card is physically present. */ static int is_card_present(struct ipw_hardware *hw) { if (hw->hw_version == HW_VERSION_1) return inw(hw->base_port + IOIR) != 0xFFFF; else return readl(&hw->memory_info_regs->memreg_card_present) == CARD_PRESENT_VALUE; } static irqreturn_t ipwireless_handle_v1_interrupt(int irq, struct ipw_hardware *hw) { unsigned short irqn; irqn = inw(hw->base_port + IOIR); /* Check if card is present */ if (irqn == 0xFFFF) return IRQ_NONE; else if (irqn != 0) { unsigned short ack = 0; unsigned long flags; /* Transmit complete. */ if (irqn & IR_TXINTR) { ack |= IR_TXINTR; spin_lock_irqsave(&hw->lock, flags); hw->tx_ready = 1; spin_unlock_irqrestore(&hw->lock, flags); } /* Received data */ if (irqn & IR_RXINTR) { ack |= IR_RXINTR; spin_lock_irqsave(&hw->lock, flags); hw->rx_ready++; spin_unlock_irqrestore(&hw->lock, flags); } if (ack != 0) { outw(ack, hw->base_port + IOIR); tasklet_schedule(&hw->tasklet); } return IRQ_HANDLED; } return IRQ_NONE; } static void acknowledge_pcmcia_interrupt(struct ipw_hardware *hw) { unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status); csr &= 0xfffd; writew(csr, &hw->memregs_CCR->reg_config_and_status); } static irqreturn_t ipwireless_handle_v2_v3_interrupt(int irq, struct ipw_hardware *hw) { int tx = 0; int rx = 0; int rx_repeat = 0; int try_mem_tx_old; unsigned long flags; do { unsigned short memtx = readw(hw->memreg_tx); unsigned short memtx_serial; unsigned short memrxdone = readw(&hw->memory_info_regs->memreg_rx_done); try_mem_tx_old = 0; /* check whether the interrupt was generated by ipwireless card */ if (!(memtx & MEMTX_TX) && !(memrxdone & MEMRX_RX_DONE)) { /* check if the card uses memreg_tx_old register */ if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) { memtx = readw(&hw->memory_info_regs->memreg_tx_old); if (memtx & MEMTX_TX) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Using memreg_tx_old\n"); hw->memreg_tx = &hw->memory_info_regs->memreg_tx_old; } else { return IRQ_NONE; } } else return IRQ_NONE; } /* * See if the card is physically present. Note that while it is * powering up, it appears not to be present. */ if (!is_card_present(hw)) { acknowledge_pcmcia_interrupt(hw); return IRQ_HANDLED; } memtx_serial = memtx & (unsigned short) 0xff00; if (memtx & MEMTX_TX) { writew(memtx_serial, hw->memreg_tx); if (hw->serial_number_detected) { if (memtx_serial != hw->last_memtx_serial) { hw->last_memtx_serial = memtx_serial; spin_lock_irqsave(&hw->lock, flags); hw->rx_ready++; spin_unlock_irqrestore(&hw->lock, flags); rx = 1; } else /* Ignore 'Timer Recovery' duplicates. */ rx_repeat = 1; } else { /* * If a non-zero serial number is seen, then enable * serial number checking. */ if (memtx_serial != 0) { hw->serial_number_detected = 1; printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": memreg_tx serial num detected\n"); spin_lock_irqsave(&hw->lock, flags); hw->rx_ready++; spin_unlock_irqrestore(&hw->lock, flags); } rx = 1; } } if (memrxdone & MEMRX_RX_DONE) { writew(0, &hw->memory_info_regs->memreg_rx_done); spin_lock_irqsave(&hw->lock, flags); hw->tx_ready = 1; spin_unlock_irqrestore(&hw->lock, flags); tx = 1; } if (tx) writew(MEMRX_PCINTACKK, &hw->memory_info_regs->memreg_pc_interrupt_ack); acknowledge_pcmcia_interrupt(hw); if (tx || rx) tasklet_schedule(&hw->tasklet); else if (!rx_repeat) { if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) { if (hw->serial_number_detected) printk(KERN_WARNING IPWIRELESS_PCCARD_NAME ": spurious interrupt - new_tx mode\n"); else { printk(KERN_WARNING IPWIRELESS_PCCARD_NAME ": no valid memreg_tx value - switching to the old memreg_tx\n"); hw->memreg_tx = &hw->memory_info_regs->memreg_tx_old; try_mem_tx_old = 1; } } else printk(KERN_WARNING IPWIRELESS_PCCARD_NAME ": spurious interrupt - old_tx mode\n"); } } while (try_mem_tx_old == 1); return IRQ_HANDLED; } irqreturn_t ipwireless_interrupt(int irq, void *dev_id) { struct ipw_dev *ipw = dev_id; if (ipw->hardware->hw_version == HW_VERSION_1) return ipwireless_handle_v1_interrupt(irq, ipw->hardware); else return ipwireless_handle_v2_v3_interrupt(irq, ipw->hardware); } static void flush_packets_to_hw(struct ipw_hardware *hw) { int priority_limit; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); priority_limit = get_current_packet_priority(hw); spin_unlock_irqrestore(&hw->lock, flags); while (send_pending_packet(hw, priority_limit)); } static void send_packet(struct ipw_hardware *hw, int priority, struct ipw_tx_packet *packet) { unsigned long flags; spin_lock_irqsave(&hw->lock, flags); list_add_tail(&packet->queue, &hw->tx_queue[priority]); hw->tx_queued++; spin_unlock_irqrestore(&hw->lock, flags); flush_packets_to_hw(hw); } /* Create data packet, non-atomic allocation */ static void *alloc_data_packet(int data_size, unsigned char dest_addr, unsigned char protocol) { struct ipw_tx_packet *packet = kzalloc( sizeof(struct ipw_tx_packet) + data_size, GFP_ATOMIC); if (!packet) return NULL; INIT_LIST_HEAD(&packet->queue); packet->dest_addr = dest_addr; packet->protocol = protocol; packet->length = data_size; return packet; } static void *alloc_ctrl_packet(int header_size, unsigned char dest_addr, unsigned char protocol, unsigned char sig_no) { /* * sig_no is located right after ipw_tx_packet struct in every * CTRL or SETUP packets, we can use ipw_control_packet as a * common struct */ struct ipw_control_packet *packet = kzalloc(header_size, GFP_ATOMIC); if (!packet) return NULL; INIT_LIST_HEAD(&packet->header.queue); packet->header.dest_addr = dest_addr; packet->header.protocol = protocol; packet->header.length = header_size - sizeof(struct ipw_tx_packet); packet->body.sig_no = sig_no; return packet; } int ipwireless_send_packet(struct ipw_hardware *hw, unsigned int channel_idx, const unsigned char *data, unsigned int length, void (*callback) (void *cb, unsigned int length), void *callback_data) { struct ipw_tx_packet *packet; packet = alloc_data_packet(length, (channel_idx + 1), TL_PROTOCOLID_COM_DATA); if (!packet) return -ENOMEM; packet->packet_callback = callback; packet->callback_data = callback_data; memcpy((unsigned char *) packet + sizeof(struct ipw_tx_packet), data, length); send_packet(hw, PRIO_DATA, packet); return 0; } static int set_control_line(struct ipw_hardware *hw, int prio, unsigned int channel_idx, int line, int state) { struct ipw_control_packet *packet; int protocolid = TL_PROTOCOLID_COM_CTRL; if (prio == PRIO_SETUP) protocolid = TL_PROTOCOLID_SETUP; packet = alloc_ctrl_packet(sizeof(struct ipw_control_packet), (channel_idx + 1), protocolid, line); if (!packet) return -ENOMEM; packet->header.length = sizeof(struct ipw_control_packet_body); packet->body.value = (state == 0 ? 0 : 1); send_packet(hw, prio, &packet->header); return 0; } static int set_DTR(struct ipw_hardware *hw, int priority, unsigned int channel_idx, int state) { if (state != 0) hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_DTR; else hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_DTR; return set_control_line(hw, priority, channel_idx, COMCTRL_DTR, state); } static int set_RTS(struct ipw_hardware *hw, int priority, unsigned int channel_idx, int state) { if (state != 0) hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_RTS; else hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_RTS; return set_control_line(hw, priority, channel_idx, COMCTRL_RTS, state); } int ipwireless_set_DTR(struct ipw_hardware *hw, unsigned int channel_idx, int state) { return set_DTR(hw, PRIO_CTRL, channel_idx, state); } int ipwireless_set_RTS(struct ipw_hardware *hw, unsigned int channel_idx, int state) { return set_RTS(hw, PRIO_CTRL, channel_idx, state); } struct ipw_setup_get_version_query_packet { struct ipw_tx_packet header; struct tl_setup_get_version_qry body; }; struct ipw_setup_config_packet { struct ipw_tx_packet header; struct tl_setup_config_msg body; }; struct ipw_setup_config_done_packet { struct ipw_tx_packet header; struct tl_setup_config_done_msg body; }; struct ipw_setup_open_packet { struct ipw_tx_packet header; struct tl_setup_open_msg body; }; struct ipw_setup_info_packet { struct ipw_tx_packet header; struct tl_setup_info_msg body; }; struct ipw_setup_reboot_msg_ack { struct ipw_tx_packet header; struct TlSetupRebootMsgAck body; }; /* This handles the actual initialization of the card */ static void __handle_setup_get_version_rsp(struct ipw_hardware *hw) { struct ipw_setup_config_packet *config_packet; struct ipw_setup_config_done_packet *config_done_packet; struct ipw_setup_open_packet *open_packet; struct ipw_setup_info_packet *info_packet; int port; unsigned int channel_idx; /* generate config packet */ for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) { config_packet = alloc_ctrl_packet( sizeof(struct ipw_setup_config_packet), ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, TL_SETUP_SIGNO_CONFIG_MSG); if (!config_packet) goto exit_nomem; config_packet->header.length = sizeof(struct tl_setup_config_msg); config_packet->body.port_no = port; config_packet->body.prio_data = PRIO_DATA; config_packet->body.prio_ctrl = PRIO_CTRL; send_packet(hw, PRIO_SETUP, &config_packet->header); } config_done_packet = alloc_ctrl_packet( sizeof(struct ipw_setup_config_done_packet), ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, TL_SETUP_SIGNO_CONFIG_DONE_MSG); if (!config_done_packet) goto exit_nomem; config_done_packet->header.length = sizeof(struct tl_setup_config_done_msg); send_packet(hw, PRIO_SETUP, &config_done_packet->header); /* generate open packet */ for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) { open_packet = alloc_ctrl_packet( sizeof(struct ipw_setup_open_packet), ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, TL_SETUP_SIGNO_OPEN_MSG); if (!open_packet) goto exit_nomem; open_packet->header.length = sizeof(struct tl_setup_open_msg); open_packet->body.port_no = port; send_packet(hw, PRIO_SETUP, &open_packet->header); } for (channel_idx = 0; channel_idx < NL_NUM_OF_ADDRESSES; channel_idx++) { int ret; ret = set_DTR(hw, PRIO_SETUP, channel_idx, (hw->control_lines[channel_idx] & IPW_CONTROL_LINE_DTR) != 0); if (ret) { printk(KERN_ERR IPWIRELESS_PCCARD_NAME ": error setting DTR (%d)\n", ret); return; } set_RTS(hw, PRIO_SETUP, channel_idx, (hw->control_lines [channel_idx] & IPW_CONTROL_LINE_RTS) != 0); if (ret) { printk(KERN_ERR IPWIRELESS_PCCARD_NAME ": error setting RTS (%d)\n", ret); return; } } /* * For NDIS we assume that we are using sync PPP frames, for COM async. * This driver uses NDIS mode too. We don't bother with translation * from async -> sync PPP. */ info_packet = alloc_ctrl_packet(sizeof(struct ipw_setup_info_packet), ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, TL_SETUP_SIGNO_INFO_MSG); if (!info_packet) goto exit_nomem; info_packet->header.length = sizeof(struct tl_setup_info_msg); info_packet->body.driver_type = NDISWAN_DRIVER; info_packet->body.major_version = NDISWAN_DRIVER_MAJOR_VERSION; info_packet->body.minor_version = NDISWAN_DRIVER_MINOR_VERSION; send_packet(hw, PRIO_SETUP, &info_packet->header); /* Initialization is now complete, so we clear the 'to_setup' flag */ hw->to_setup = 0; return; exit_nomem: printk(KERN_ERR IPWIRELESS_PCCARD_NAME ": not enough memory to alloc control packet\n"); hw->to_setup = -1; } static void handle_setup_get_version_rsp(struct ipw_hardware *hw, unsigned char vers_no) { del_timer(&hw->setup_timer); hw->initializing = 0; printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": card is ready.\n"); if (vers_no == TL_SETUP_VERSION) __handle_setup_get_version_rsp(hw); else printk(KERN_ERR IPWIRELESS_PCCARD_NAME ": invalid hardware version no %u\n", (unsigned int) vers_no); } static void ipw_send_setup_packet(struct ipw_hardware *hw) { struct ipw_setup_get_version_query_packet *ver_packet; ver_packet = alloc_ctrl_packet( sizeof(struct ipw_setup_get_version_query_packet), ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, TL_SETUP_SIGNO_GET_VERSION_QRY); ver_packet->header.length = sizeof(struct tl_setup_get_version_qry); /* * Response is handled in handle_received_SETUP_packet */ send_packet(hw, PRIO_SETUP, &ver_packet->header); } static void handle_received_SETUP_packet(struct ipw_hardware *hw, unsigned int address, const unsigned char *data, int len, int is_last) { const union ipw_setup_rx_msg *rx_msg = (const union ipw_setup_rx_msg *) data; if (address != ADDR_SETUP_PROT) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": setup packet has bad address %d\n", address); return; } switch (rx_msg->sig_no) { case TL_SETUP_SIGNO_GET_VERSION_RSP: if (hw->to_setup) handle_setup_get_version_rsp(hw, rx_msg->version_rsp_msg.version); break; case TL_SETUP_SIGNO_OPEN_MSG: if (ipwireless_debug) { unsigned int channel_idx = rx_msg->open_msg.port_no - 1; printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": OPEN_MSG [channel %u] reply received\n", channel_idx); } break; case TL_SETUP_SIGNO_INFO_MSG_ACK: if (ipwireless_debug) printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": card successfully configured as NDISWAN\n"); break; case TL_SETUP_SIGNO_REBOOT_MSG: if (hw->to_setup) printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": Setup not completed - ignoring reboot msg\n"); else { struct ipw_setup_reboot_msg_ack *packet; printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": Acknowledging REBOOT message\n"); packet = alloc_ctrl_packet( sizeof(struct ipw_setup_reboot_msg_ack), ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP, TL_SETUP_SIGNO_REBOOT_MSG_ACK); packet->header.length = sizeof(struct TlSetupRebootMsgAck); send_packet(hw, PRIO_SETUP, &packet->header); if (hw->reboot_callback) hw->reboot_callback(hw->reboot_callback_data); } break; default: printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": unknown setup message %u received\n", (unsigned int) rx_msg->sig_no); } } static void do_close_hardware(struct ipw_hardware *hw) { unsigned int irqn; if (hw->hw_version == HW_VERSION_1) { /* Disable TX and RX interrupts. */ outw(0, hw->base_port + IOIER); /* Acknowledge any outstanding interrupt requests */ irqn = inw(hw->base_port + IOIR); if (irqn & IR_TXINTR) outw(IR_TXINTR, hw->base_port + IOIR); if (irqn & IR_RXINTR) outw(IR_RXINTR, hw->base_port + IOIR); synchronize_irq(hw->irq); } } struct ipw_hardware *ipwireless_hardware_create(void) { int i; struct ipw_hardware *hw = kzalloc(sizeof(struct ipw_hardware), GFP_KERNEL); if (!hw) return NULL; hw->irq = -1; hw->initializing = 1; hw->tx_ready = 1; hw->rx_bytes_queued = 0; hw->rx_pool_size = 0; hw->last_memtx_serial = (unsigned short) 0xffff; for (i = 0; i < NL_NUM_OF_PRIORITIES; i++) INIT_LIST_HEAD(&hw->tx_queue[i]); INIT_LIST_HEAD(&hw->rx_queue); INIT_LIST_HEAD(&hw->rx_pool); spin_lock_init(&hw->lock); tasklet_init(&hw->tasklet, ipwireless_do_tasklet, (unsigned long) hw); INIT_WORK(&hw->work_rx, ipw_receive_data_work); setup_timer(&hw->setup_timer, ipwireless_setup_timer, (unsigned long) hw); return hw; } void ipwireless_init_hardware_v1(struct ipw_hardware *hw, unsigned int base_port, void __iomem *attr_memory, void __iomem *common_memory, int is_v2_card, void (*reboot_callback) (void *data), void *reboot_callback_data) { if (hw->removed) { hw->removed = 0; enable_irq(hw->irq); } hw->base_port = base_port; hw->hw_version = (is_v2_card ? HW_VERSION_2 : HW_VERSION_1); hw->ll_mtu = (hw->hw_version == HW_VERSION_1 ? LL_MTU_V1 : LL_MTU_V2); hw->memregs_CCR = (struct MEMCCR __iomem *) ((unsigned short __iomem *) attr_memory + 0x200); hw->memory_info_regs = (struct MEMINFREG __iomem *) common_memory; hw->memreg_tx = &hw->memory_info_regs->memreg_tx_new; hw->reboot_callback = reboot_callback; hw->reboot_callback_data = reboot_callback_data; } void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw) { hw->initializing = 1; hw->init_loops = 0; printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": waiting for card to start up...\n"); ipwireless_setup_timer((unsigned long) hw); } static void ipwireless_setup_timer(unsigned long data) { struct ipw_hardware *hw = (struct ipw_hardware *) data; hw->init_loops++; if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY && hw->hw_version == HW_VERSION_2 && hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": failed to startup using TX2, trying TX\n"); hw->memreg_tx = &hw->memory_info_regs->memreg_tx_old; hw->init_loops = 0; } /* Give up after a certain number of retries */ if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY) { printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": card failed to start up!\n"); hw->initializing = 0; } else { /* Do not attempt to write to the board if it is not present. */ if (is_card_present(hw)) { unsigned long flags; spin_lock_irqsave(&hw->lock, flags); hw->to_setup = 1; hw->tx_ready = 1; spin_unlock_irqrestore(&hw->lock, flags); tasklet_schedule(&hw->tasklet); } mod_timer(&hw->setup_timer, jiffies + msecs_to_jiffies(TL_SETUP_VERSION_QRY_TMO)); } } /* * Stop any interrupts from executing so that, once this function returns, * other layers of the driver can be sure they won't get any more callbacks. * Thus must be called on a proper process context. */ void ipwireless_stop_interrupts(struct ipw_hardware *hw) { if (!hw->shutting_down) { /* Tell everyone we are going down. */ hw->shutting_down = 1; del_timer(&hw->setup_timer); /* Prevent the hardware from sending any more interrupts */ do_close_hardware(hw); } } void ipwireless_hardware_free(struct ipw_hardware *hw) { int i; struct ipw_rx_packet *rp, *rq; struct ipw_tx_packet *tp, *tq; ipwireless_stop_interrupts(hw); flush_work_sync(&hw->work_rx); for (i = 0; i < NL_NUM_OF_ADDRESSES; i++) if (hw->packet_assembler[i] != NULL) kfree(hw->packet_assembler[i]); for (i = 0; i < NL_NUM_OF_PRIORITIES; i++) list_for_each_entry_safe(tp, tq, &hw->tx_queue[i], queue) { list_del(&tp->queue); kfree(tp); } list_for_each_entry_safe(rp, rq, &hw->rx_queue, queue) { list_del(&rp->queue); kfree(rp); } list_for_each_entry_safe(rp, rq, &hw->rx_pool, queue) { list_del(&rp->queue); kfree(rp); } kfree(hw); } /* * Associate the specified network with this hardware, so it will receive events * from it. */ void ipwireless_associate_network(struct ipw_hardware *hw, struct ipw_network *network) { hw->network = network; }
gpl-2.0
codename13/android_kernel_samsung_kylessopen
drivers/block/paride/bpck.c
14851
9505
/* bpck.c (c) 1996-8 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License. bpck.c is a low-level protocol driver for the MicroSolutions "backpack" parallel port IDE adapter. */ /* Changes: 1.01 GRG 1998.05.05 init_proto, release_proto, pi->delay 1.02 GRG 1998.08.15 default pi->delay returned to 4 */ #define BPCK_VERSION "1.02" #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/wait.h> #include <asm/io.h> #include "paride.h" #undef r2 #undef w2 #define PC pi->private #define r2() (PC=(in_p(2) & 0xff)) #define w2(byte) {out_p(2,byte); PC = byte;} #define t2(pat) {PC ^= pat; out_p(2,PC);} #define e2() {PC &= 0xfe; out_p(2,PC);} #define o2() {PC |= 1; out_p(2,PC);} #define j44(l,h) (((l>>3)&0x7)|((l>>4)&0x8)|((h<<1)&0x70)|(h&0x80)) /* cont = 0 - access the IDE register file cont = 1 - access the IDE command set cont = 2 - use internal bpck register addressing */ static int cont_map[3] = { 0x40, 0x48, 0 }; static int bpck_read_regr( PIA *pi, int cont, int regr ) { int r, l, h; r = regr + cont_map[cont]; switch (pi->mode) { case 0: w0(r & 0xf); w0(r); t2(2); t2(4); l = r1(); t2(4); h = r1(); return j44(l,h); case 1: w0(r & 0xf); w0(r); t2(2); e2(); t2(0x20); t2(4); h = r0(); t2(1); t2(0x20); return h; case 2: case 3: case 4: w0(r); w2(9); w2(0); w2(0x20); h = r4(); w2(0); return h; } return -1; } static void bpck_write_regr( PIA *pi, int cont, int regr, int val ) { int r; r = regr + cont_map[cont]; switch (pi->mode) { case 0: case 1: w0(r); t2(2); w0(val); o2(); t2(4); t2(1); break; case 2: case 3: case 4: w0(r); w2(9); w2(0); w0(val); w2(1); w2(3); w2(0); break; } } /* These macros access the bpck registers in native addressing */ #define WR(r,v) bpck_write_regr(pi,2,r,v) #define RR(r) (bpck_read_regr(pi,2,r)) static void bpck_write_block( PIA *pi, char * buf, int count ) { int i; switch (pi->mode) { case 0: WR(4,0x40); w0(0x40); t2(2); t2(1); for (i=0;i<count;i++) { w0(buf[i]); t2(4); } WR(4,0); break; case 1: WR(4,0x50); w0(0x40); t2(2); t2(1); for (i=0;i<count;i++) { w0(buf[i]); t2(4); } WR(4,0x10); break; case 2: WR(4,0x48); w0(0x40); w2(9); w2(0); w2(1); for (i=0;i<count;i++) w4(buf[i]); w2(0); WR(4,8); break; case 3: WR(4,0x48); w0(0x40); w2(9); w2(0); w2(1); for (i=0;i<count/2;i++) w4w(((u16 *)buf)[i]); w2(0); WR(4,8); break; case 4: WR(4,0x48); w0(0x40); w2(9); w2(0); w2(1); for (i=0;i<count/4;i++) w4l(((u32 *)buf)[i]); w2(0); WR(4,8); break; } } static void bpck_read_block( PIA *pi, char * buf, int count ) { int i, l, h; switch (pi->mode) { case 0: WR(4,0x40); w0(0x40); t2(2); for (i=0;i<count;i++) { t2(4); l = r1(); t2(4); h = r1(); buf[i] = j44(l,h); } WR(4,0); break; case 1: WR(4,0x50); w0(0x40); t2(2); t2(0x20); for(i=0;i<count;i++) { t2(4); buf[i] = r0(); } t2(1); t2(0x20); WR(4,0x10); break; case 2: WR(4,0x48); w0(0x40); w2(9); w2(0); w2(0x20); for (i=0;i<count;i++) buf[i] = r4(); w2(0); WR(4,8); break; case 3: WR(4,0x48); w0(0x40); w2(9); w2(0); w2(0x20); for (i=0;i<count/2;i++) ((u16 *)buf)[i] = r4w(); w2(0); WR(4,8); break; case 4: WR(4,0x48); w0(0x40); w2(9); w2(0); w2(0x20); for (i=0;i<count/4;i++) ((u32 *)buf)[i] = r4l(); w2(0); WR(4,8); break; } } static int bpck_probe_unit ( PIA *pi ) { int o1, o0, f7, id; int t, s; id = pi->unit; s = 0; w2(4); w2(0xe); r2(); t2(2); o1 = r1()&0xf8; o0 = r0(); w0(255-id); w2(4); w0(id); t2(8); t2(8); t2(8); t2(2); t = r1()&0xf8; f7 = ((id % 8) == 7); if ((f7) || (t != o1)) { t2(2); s = r1()&0xf8; } if ((t == o1) && ((!f7) || (s == o1))) { w2(0x4c); w0(o0); return 0; } t2(8); w0(0); t2(2); w2(0x4c); w0(o0); return 1; } static void bpck_connect ( PIA *pi ) { pi->saved_r0 = r0(); w0(0xff-pi->unit); w2(4); w0(pi->unit); t2(8); t2(8); t2(8); t2(2); t2(2); switch (pi->mode) { case 0: t2(8); WR(4,0); break; case 1: t2(8); WR(4,0x10); break; case 2: case 3: case 4: w2(0); WR(4,8); break; } WR(5,8); if (pi->devtype == PI_PCD) { WR(0x46,0x10); /* fiddle with ESS logic ??? */ WR(0x4c,0x38); WR(0x4d,0x88); WR(0x46,0xa0); WR(0x41,0); WR(0x4e,8); } } static void bpck_disconnect ( PIA *pi ) { w0(0); if (pi->mode >= 2) { w2(9); w2(0); } else t2(2); w2(0x4c); w0(pi->saved_r0); } static void bpck_force_spp ( PIA *pi ) /* This fakes the EPP protocol to turn off EPP ... */ { pi->saved_r0 = r0(); w0(0xff-pi->unit); w2(4); w0(pi->unit); t2(8); t2(8); t2(8); t2(2); t2(2); w2(0); w0(4); w2(9); w2(0); w0(0); w2(1); w2(3); w2(0); w0(0); w2(9); w2(0); w2(0x4c); w0(pi->saved_r0); } #define TEST_LEN 16 static int bpck_test_proto( PIA *pi, char * scratch, int verbose ) { int i, e, l, h, om; char buf[TEST_LEN]; bpck_force_spp(pi); switch (pi->mode) { case 0: bpck_connect(pi); WR(0x13,0x7f); w0(0x13); t2(2); for(i=0;i<TEST_LEN;i++) { t2(4); l = r1(); t2(4); h = r1(); buf[i] = j44(l,h); } bpck_disconnect(pi); break; case 1: bpck_connect(pi); WR(0x13,0x7f); w0(0x13); t2(2); t2(0x20); for(i=0;i<TEST_LEN;i++) { t2(4); buf[i] = r0(); } t2(1); t2(0x20); bpck_disconnect(pi); break; case 2: case 3: case 4: om = pi->mode; pi->mode = 0; bpck_connect(pi); WR(7,3); WR(4,8); bpck_disconnect(pi); pi->mode = om; bpck_connect(pi); w0(0x13); w2(9); w2(1); w0(0); w2(3); w2(0); w2(0xe0); switch (pi->mode) { case 2: for (i=0;i<TEST_LEN;i++) buf[i] = r4(); break; case 3: for (i=0;i<TEST_LEN/2;i++) ((u16 *)buf)[i] = r4w(); break; case 4: for (i=0;i<TEST_LEN/4;i++) ((u32 *)buf)[i] = r4l(); break; } w2(0); WR(7,0); bpck_disconnect(pi); break; } if (verbose) { printk("%s: bpck: 0x%x unit %d mode %d: ", pi->device,pi->port,pi->unit,pi->mode); for (i=0;i<TEST_LEN;i++) printk("%3d",buf[i]); printk("\n"); } e = 0; for (i=0;i<TEST_LEN;i++) if (buf[i] != (i+1)) e++; return e; } static void bpck_read_eeprom ( PIA *pi, char * buf ) { int i,j,k,n,p,v,f, om, od; bpck_force_spp(pi); om = pi->mode; od = pi->delay; pi->mode = 0; pi->delay = 6; bpck_connect(pi); n = 0; WR(4,0); for (i=0;i<64;i++) { WR(6,8); WR(6,0xc); p = 0x100; for (k=0;k<9;k++) { f = (((i + 0x180) & p) != 0) * 2; WR(6,f+0xc); WR(6,f+0xd); WR(6,f+0xc); p = (p >> 1); } for (j=0;j<2;j++) { v = 0; for (k=0;k<8;k++) { WR(6,0xc); WR(6,0xd); WR(6,0xc); f = RR(0); v = 2*v + (f == 0x84); } buf[2*i+1-j] = v; } } WR(6,8); WR(6,0); WR(5,8); bpck_disconnect(pi); if (om >= 2) { bpck_connect(pi); WR(7,3); WR(4,8); bpck_disconnect(pi); } pi->mode = om; pi->delay = od; } static int bpck_test_port ( PIA *pi ) /* check for 8-bit port */ { int i, r, m; w2(0x2c); i = r0(); w0(255-i); r = r0(); w0(i); m = -1; if (r == i) m = 2; if (r == (255-i)) m = 0; w2(0xc); i = r0(); w0(255-i); r = r0(); w0(i); if (r != (255-i)) m = -1; if (m == 0) { w2(6); w2(0xc); r = r0(); w0(0xaa); w0(r); w0(0xaa); } if (m == 2) { w2(0x26); w2(0xc); } if (m == -1) return 0; return 5; } static void bpck_log_adapter( PIA *pi, char * scratch, int verbose ) { char *mode_string[5] = { "4-bit","8-bit","EPP-8", "EPP-16","EPP-32" }; #ifdef DUMP_EEPROM int i; #endif bpck_read_eeprom(pi,scratch); #ifdef DUMP_EEPROM if (verbose) { for(i=0;i<128;i++) if ((scratch[i] < ' ') || (scratch[i] > '~')) scratch[i] = '.'; printk("%s: bpck EEPROM: %64.64s\n",pi->device,scratch); printk("%s: %64.64s\n",pi->device,&scratch[64]); } #endif printk("%s: bpck %s, backpack %8.8s unit %d", pi->device,BPCK_VERSION,&scratch[110],pi->unit); printk(" at 0x%x, mode %d (%s), delay %d\n",pi->port, pi->mode,mode_string[pi->mode],pi->delay); } static struct pi_protocol bpck = { .owner = THIS_MODULE, .name = "bpck", .max_mode = 5, .epp_first = 2, .default_delay = 4, .max_units = 255, .write_regr = bpck_write_regr, .read_regr = bpck_read_regr, .write_block = bpck_write_block, .read_block = bpck_read_block, .connect = bpck_connect, .disconnect = bpck_disconnect, .test_port = bpck_test_port, .probe_unit = bpck_probe_unit, .test_proto = bpck_test_proto, .log_adapter = bpck_log_adapter, }; static int __init bpck_init(void) { return paride_register(&bpck); } static void __exit bpck_exit(void) { paride_unregister(&bpck); } MODULE_LICENSE("GPL"); module_init(bpck_init) module_exit(bpck_exit)
gpl-2.0
syntheticpp/linux
drivers/block/paride/bpck.c
14851
9505
/* bpck.c (c) 1996-8 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License. bpck.c is a low-level protocol driver for the MicroSolutions "backpack" parallel port IDE adapter. */ /* Changes: 1.01 GRG 1998.05.05 init_proto, release_proto, pi->delay 1.02 GRG 1998.08.15 default pi->delay returned to 4 */ #define BPCK_VERSION "1.02" #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/wait.h> #include <asm/io.h> #include "paride.h" #undef r2 #undef w2 #define PC pi->private #define r2() (PC=(in_p(2) & 0xff)) #define w2(byte) {out_p(2,byte); PC = byte;} #define t2(pat) {PC ^= pat; out_p(2,PC);} #define e2() {PC &= 0xfe; out_p(2,PC);} #define o2() {PC |= 1; out_p(2,PC);} #define j44(l,h) (((l>>3)&0x7)|((l>>4)&0x8)|((h<<1)&0x70)|(h&0x80)) /* cont = 0 - access the IDE register file cont = 1 - access the IDE command set cont = 2 - use internal bpck register addressing */ static int cont_map[3] = { 0x40, 0x48, 0 }; static int bpck_read_regr( PIA *pi, int cont, int regr ) { int r, l, h; r = regr + cont_map[cont]; switch (pi->mode) { case 0: w0(r & 0xf); w0(r); t2(2); t2(4); l = r1(); t2(4); h = r1(); return j44(l,h); case 1: w0(r & 0xf); w0(r); t2(2); e2(); t2(0x20); t2(4); h = r0(); t2(1); t2(0x20); return h; case 2: case 3: case 4: w0(r); w2(9); w2(0); w2(0x20); h = r4(); w2(0); return h; } return -1; } static void bpck_write_regr( PIA *pi, int cont, int regr, int val ) { int r; r = regr + cont_map[cont]; switch (pi->mode) { case 0: case 1: w0(r); t2(2); w0(val); o2(); t2(4); t2(1); break; case 2: case 3: case 4: w0(r); w2(9); w2(0); w0(val); w2(1); w2(3); w2(0); break; } } /* These macros access the bpck registers in native addressing */ #define WR(r,v) bpck_write_regr(pi,2,r,v) #define RR(r) (bpck_read_regr(pi,2,r)) static void bpck_write_block( PIA *pi, char * buf, int count ) { int i; switch (pi->mode) { case 0: WR(4,0x40); w0(0x40); t2(2); t2(1); for (i=0;i<count;i++) { w0(buf[i]); t2(4); } WR(4,0); break; case 1: WR(4,0x50); w0(0x40); t2(2); t2(1); for (i=0;i<count;i++) { w0(buf[i]); t2(4); } WR(4,0x10); break; case 2: WR(4,0x48); w0(0x40); w2(9); w2(0); w2(1); for (i=0;i<count;i++) w4(buf[i]); w2(0); WR(4,8); break; case 3: WR(4,0x48); w0(0x40); w2(9); w2(0); w2(1); for (i=0;i<count/2;i++) w4w(((u16 *)buf)[i]); w2(0); WR(4,8); break; case 4: WR(4,0x48); w0(0x40); w2(9); w2(0); w2(1); for (i=0;i<count/4;i++) w4l(((u32 *)buf)[i]); w2(0); WR(4,8); break; } } static void bpck_read_block( PIA *pi, char * buf, int count ) { int i, l, h; switch (pi->mode) { case 0: WR(4,0x40); w0(0x40); t2(2); for (i=0;i<count;i++) { t2(4); l = r1(); t2(4); h = r1(); buf[i] = j44(l,h); } WR(4,0); break; case 1: WR(4,0x50); w0(0x40); t2(2); t2(0x20); for(i=0;i<count;i++) { t2(4); buf[i] = r0(); } t2(1); t2(0x20); WR(4,0x10); break; case 2: WR(4,0x48); w0(0x40); w2(9); w2(0); w2(0x20); for (i=0;i<count;i++) buf[i] = r4(); w2(0); WR(4,8); break; case 3: WR(4,0x48); w0(0x40); w2(9); w2(0); w2(0x20); for (i=0;i<count/2;i++) ((u16 *)buf)[i] = r4w(); w2(0); WR(4,8); break; case 4: WR(4,0x48); w0(0x40); w2(9); w2(0); w2(0x20); for (i=0;i<count/4;i++) ((u32 *)buf)[i] = r4l(); w2(0); WR(4,8); break; } } static int bpck_probe_unit ( PIA *pi ) { int o1, o0, f7, id; int t, s; id = pi->unit; s = 0; w2(4); w2(0xe); r2(); t2(2); o1 = r1()&0xf8; o0 = r0(); w0(255-id); w2(4); w0(id); t2(8); t2(8); t2(8); t2(2); t = r1()&0xf8; f7 = ((id % 8) == 7); if ((f7) || (t != o1)) { t2(2); s = r1()&0xf8; } if ((t == o1) && ((!f7) || (s == o1))) { w2(0x4c); w0(o0); return 0; } t2(8); w0(0); t2(2); w2(0x4c); w0(o0); return 1; } static void bpck_connect ( PIA *pi ) { pi->saved_r0 = r0(); w0(0xff-pi->unit); w2(4); w0(pi->unit); t2(8); t2(8); t2(8); t2(2); t2(2); switch (pi->mode) { case 0: t2(8); WR(4,0); break; case 1: t2(8); WR(4,0x10); break; case 2: case 3: case 4: w2(0); WR(4,8); break; } WR(5,8); if (pi->devtype == PI_PCD) { WR(0x46,0x10); /* fiddle with ESS logic ??? */ WR(0x4c,0x38); WR(0x4d,0x88); WR(0x46,0xa0); WR(0x41,0); WR(0x4e,8); } } static void bpck_disconnect ( PIA *pi ) { w0(0); if (pi->mode >= 2) { w2(9); w2(0); } else t2(2); w2(0x4c); w0(pi->saved_r0); } static void bpck_force_spp ( PIA *pi ) /* This fakes the EPP protocol to turn off EPP ... */ { pi->saved_r0 = r0(); w0(0xff-pi->unit); w2(4); w0(pi->unit); t2(8); t2(8); t2(8); t2(2); t2(2); w2(0); w0(4); w2(9); w2(0); w0(0); w2(1); w2(3); w2(0); w0(0); w2(9); w2(0); w2(0x4c); w0(pi->saved_r0); } #define TEST_LEN 16 static int bpck_test_proto( PIA *pi, char * scratch, int verbose ) { int i, e, l, h, om; char buf[TEST_LEN]; bpck_force_spp(pi); switch (pi->mode) { case 0: bpck_connect(pi); WR(0x13,0x7f); w0(0x13); t2(2); for(i=0;i<TEST_LEN;i++) { t2(4); l = r1(); t2(4); h = r1(); buf[i] = j44(l,h); } bpck_disconnect(pi); break; case 1: bpck_connect(pi); WR(0x13,0x7f); w0(0x13); t2(2); t2(0x20); for(i=0;i<TEST_LEN;i++) { t2(4); buf[i] = r0(); } t2(1); t2(0x20); bpck_disconnect(pi); break; case 2: case 3: case 4: om = pi->mode; pi->mode = 0; bpck_connect(pi); WR(7,3); WR(4,8); bpck_disconnect(pi); pi->mode = om; bpck_connect(pi); w0(0x13); w2(9); w2(1); w0(0); w2(3); w2(0); w2(0xe0); switch (pi->mode) { case 2: for (i=0;i<TEST_LEN;i++) buf[i] = r4(); break; case 3: for (i=0;i<TEST_LEN/2;i++) ((u16 *)buf)[i] = r4w(); break; case 4: for (i=0;i<TEST_LEN/4;i++) ((u32 *)buf)[i] = r4l(); break; } w2(0); WR(7,0); bpck_disconnect(pi); break; } if (verbose) { printk("%s: bpck: 0x%x unit %d mode %d: ", pi->device,pi->port,pi->unit,pi->mode); for (i=0;i<TEST_LEN;i++) printk("%3d",buf[i]); printk("\n"); } e = 0; for (i=0;i<TEST_LEN;i++) if (buf[i] != (i+1)) e++; return e; } static void bpck_read_eeprom ( PIA *pi, char * buf ) { int i,j,k,n,p,v,f, om, od; bpck_force_spp(pi); om = pi->mode; od = pi->delay; pi->mode = 0; pi->delay = 6; bpck_connect(pi); n = 0; WR(4,0); for (i=0;i<64;i++) { WR(6,8); WR(6,0xc); p = 0x100; for (k=0;k<9;k++) { f = (((i + 0x180) & p) != 0) * 2; WR(6,f+0xc); WR(6,f+0xd); WR(6,f+0xc); p = (p >> 1); } for (j=0;j<2;j++) { v = 0; for (k=0;k<8;k++) { WR(6,0xc); WR(6,0xd); WR(6,0xc); f = RR(0); v = 2*v + (f == 0x84); } buf[2*i+1-j] = v; } } WR(6,8); WR(6,0); WR(5,8); bpck_disconnect(pi); if (om >= 2) { bpck_connect(pi); WR(7,3); WR(4,8); bpck_disconnect(pi); } pi->mode = om; pi->delay = od; } static int bpck_test_port ( PIA *pi ) /* check for 8-bit port */ { int i, r, m; w2(0x2c); i = r0(); w0(255-i); r = r0(); w0(i); m = -1; if (r == i) m = 2; if (r == (255-i)) m = 0; w2(0xc); i = r0(); w0(255-i); r = r0(); w0(i); if (r != (255-i)) m = -1; if (m == 0) { w2(6); w2(0xc); r = r0(); w0(0xaa); w0(r); w0(0xaa); } if (m == 2) { w2(0x26); w2(0xc); } if (m == -1) return 0; return 5; } static void bpck_log_adapter( PIA *pi, char * scratch, int verbose ) { char *mode_string[5] = { "4-bit","8-bit","EPP-8", "EPP-16","EPP-32" }; #ifdef DUMP_EEPROM int i; #endif bpck_read_eeprom(pi,scratch); #ifdef DUMP_EEPROM if (verbose) { for(i=0;i<128;i++) if ((scratch[i] < ' ') || (scratch[i] > '~')) scratch[i] = '.'; printk("%s: bpck EEPROM: %64.64s\n",pi->device,scratch); printk("%s: %64.64s\n",pi->device,&scratch[64]); } #endif printk("%s: bpck %s, backpack %8.8s unit %d", pi->device,BPCK_VERSION,&scratch[110],pi->unit); printk(" at 0x%x, mode %d (%s), delay %d\n",pi->port, pi->mode,mode_string[pi->mode],pi->delay); } static struct pi_protocol bpck = { .owner = THIS_MODULE, .name = "bpck", .max_mode = 5, .epp_first = 2, .default_delay = 4, .max_units = 255, .write_regr = bpck_write_regr, .read_regr = bpck_read_regr, .write_block = bpck_write_block, .read_block = bpck_read_block, .connect = bpck_connect, .disconnect = bpck_disconnect, .test_port = bpck_test_port, .probe_unit = bpck_probe_unit, .test_proto = bpck_test_proto, .log_adapter = bpck_log_adapter, }; static int __init bpck_init(void) { return paride_register(&bpck); } static void __exit bpck_exit(void) { paride_unregister(&bpck); } MODULE_LICENSE("GPL"); module_init(bpck_init) module_exit(bpck_exit)
gpl-2.0
dorimanx/Dorimanx-LG-G2-D802-Kernel
arch/powerpc/kernel/machine_kexec.c
4
6501
/* * Code to handle transition of Linux booting another kernel. * * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz * Copyright (C) 2005 IBM Corporation. * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/kexec.h> #include <linux/reboot.h> #include <linux/threads.h> #include <linux/memblock.h> #include <linux/of.h> #include <linux/irq.h> #include <linux/ftrace.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/sections.h> void machine_kexec_mask_interrupts(void) { unsigned int i; struct irq_desc *desc; for_each_irq_desc(i, desc) { struct irq_chip *chip; chip = irq_desc_get_chip(desc); if (!chip) continue; if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) chip->irq_eoi(&desc->irq_data); if (chip->irq_mask) chip->irq_mask(&desc->irq_data); if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) chip->irq_disable(&desc->irq_data); } } void machine_crash_shutdown(struct pt_regs *regs) { default_machine_crash_shutdown(regs); } /* * Do what every setup is needed on image and the * reboot code buffer to allow us to avoid allocations * later. */ int machine_kexec_prepare(struct kimage *image) { if (ppc_md.machine_kexec_prepare) return ppc_md.machine_kexec_prepare(image); else return default_machine_kexec_prepare(image); } void machine_kexec_cleanup(struct kimage *image) { } void arch_crash_save_vmcoreinfo(void) { #ifdef CONFIG_NEED_MULTIPLE_NODES VMCOREINFO_SYMBOL(node_data); VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); #endif #ifndef CONFIG_NEED_MULTIPLE_NODES VMCOREINFO_SYMBOL(contig_page_data); #endif } /* * Do not allocate memory (or fail in any way) in machine_kexec(). * We are past the point of no return, committed to rebooting now. */ void machine_kexec(struct kimage *image) { int save_ftrace_enabled; save_ftrace_enabled = __ftrace_enabled_save(); if (ppc_md.machine_kexec) ppc_md.machine_kexec(image); else default_machine_kexec(image); __ftrace_enabled_restore(save_ftrace_enabled); /* Fall back to normal restart if we're still alive. */ machine_restart(NULL); for(;;); } void __init reserve_crashkernel(void) { unsigned long long crash_size, crash_base; int ret; /* use common parsing */ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base); if (ret == 0 && crash_size > 0) { crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; } if (crashk_res.end == crashk_res.start) { crashk_res.start = crashk_res.end = 0; return; } /* We might have got these values via the command line or the * device tree, either way sanitise them now. */ crash_size = resource_size(&crashk_res); #ifndef CONFIG_NONSTATIC_KERNEL if (crashk_res.start != KDUMP_KERNELBASE) printk("Crash kernel location must be 0x%x\n", KDUMP_KERNELBASE); crashk_res.start = KDUMP_KERNELBASE; #else if (!crashk_res.start) { #ifdef CONFIG_PPC64 /* * On 64bit we split the RMO in half but cap it at half of * a small SLB (128MB) since the crash kernel needs to place * itself and some stacks to be in the first segment. */ crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2)); #else crashk_res.start = KDUMP_KERNELBASE; #endif } crash_base = PAGE_ALIGN(crashk_res.start); if (crash_base != crashk_res.start) { printk("Crash kernel base must be aligned to 0x%lx\n", PAGE_SIZE); crashk_res.start = crash_base; } #endif crash_size = PAGE_ALIGN(crash_size); crashk_res.end = crashk_res.start + crash_size - 1; /* The crash region must not overlap the current kernel */ if (overlaps_crashkernel(__pa(_stext), _end - _stext)) { printk(KERN_WARNING "Crash kernel can not overlap current kernel\n"); crashk_res.start = crashk_res.end = 0; return; } /* Crash kernel trumps memory limit */ if (memory_limit && memory_limit <= crashk_res.end) { memory_limit = crashk_res.end + 1; printk("Adjusted memory limit for crashkernel, now 0x%llx\n", (unsigned long long)memory_limit); } printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20), (unsigned long)(crashk_res.start >> 20), (unsigned long)(memblock_phys_mem_size() >> 20)); memblock_reserve(crashk_res.start, crash_size); } int overlaps_crashkernel(unsigned long start, unsigned long size) { return (start + size) > crashk_res.start && start <= crashk_res.end; } /* Values we need to export to the second kernel via the device tree. */ static phys_addr_t kernel_end; static phys_addr_t crashk_size; static struct property kernel_end_prop = { .name = "linux,kernel-end", .length = sizeof(phys_addr_t), .value = &kernel_end, }; static struct property crashk_base_prop = { .name = "linux,crashkernel-base", .length = sizeof(phys_addr_t), .value = &crashk_res.start, }; static struct property crashk_size_prop = { .name = "linux,crashkernel-size", .length = sizeof(phys_addr_t), .value = &crashk_size, }; static void __init export_crashk_values(struct device_node *node) { struct property *prop; /* There might be existing crash kernel properties, but we can't * be sure what's in them, so remove them. */ prop = of_find_property(node, "linux,crashkernel-base", NULL); if (prop) of_remove_property(node, prop); prop = of_find_property(node, "linux,crashkernel-size", NULL); if (prop) of_remove_property(node, prop); if (crashk_res.start != 0) { of_add_property(node, &crashk_base_prop); crashk_size = resource_size(&crashk_res); of_add_property(node, &crashk_size_prop); } /* * memory_limit is required by the kexec-tools to limit the * crash regions to the actual memory used. */ of_update_property(node, &memory_limit_prop); } static int __init kexec_setup(void) { struct device_node *node; struct property *prop; node = of_find_node_by_path("/chosen"); if (!node) return -ENOENT; /* remove any stale properties so ours can be found */ prop = of_find_property(node, kernel_end_prop.name, NULL); if (prop) of_remove_property(node, prop); /* information needed by userspace when using default_machine_kexec */ kernel_end = __pa(_end); of_add_property(node, &kernel_end_prop); export_crashk_values(node); of_node_put(node); return 0; } late_initcall(kexec_setup);
gpl-2.0
Intel-Corp/systemd
src/network/networkd-netdev-tunnel.c
4
21938
/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ /*** This file is part of systemd. Copyright 2014 Susant Sahani systemd is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. systemd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with systemd; If not, see <http://www.gnu.org/licenses/>. ***/ #include <arpa/inet.h> #include <net/if.h> #include <linux/ip.h> #include <linux/if_tunnel.h> #include <linux/ip6_tunnel.h> #include "sd-netlink.h" #include "networkd-netdev-tunnel.h" #include "networkd-link.h" #include "util.h" #include "missing.h" #include "conf-parser.h" #define DEFAULT_TNL_HOP_LIMIT 64 #define IP6_FLOWINFO_FLOWLABEL htonl(0x000FFFFF) static const char* const ip6tnl_mode_table[_NETDEV_IP6_TNL_MODE_MAX] = { [NETDEV_IP6_TNL_MODE_IP6IP6] = "ip6ip6", [NETDEV_IP6_TNL_MODE_IPIP6] = "ipip6", [NETDEV_IP6_TNL_MODE_ANYIP6] = "any", }; DEFINE_STRING_TABLE_LOOKUP(ip6tnl_mode, Ip6TnlMode); DEFINE_CONFIG_PARSE_ENUM(config_parse_ip6tnl_mode, ip6tnl_mode, Ip6TnlMode, "Failed to parse ip6 tunnel Mode"); static int netdev_ipip_fill_message_create(NetDev *netdev, Link *link, sd_netlink_message *m) { Tunnel *t = IPIP(netdev); int r; assert(netdev); assert(link); assert(m); assert(t); assert(t->family == AF_INET); r = sd_netlink_message_append_u32(m, IFLA_IPTUN_LINK, link->ifindex); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_LINK attribute: %m"); r = sd_netlink_message_append_in_addr(m, IFLA_IPTUN_LOCAL, &t->local.in); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_LOCAL attribute: %m"); r = sd_netlink_message_append_in_addr(m, IFLA_IPTUN_REMOTE, &t->remote.in); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_REMOTE attribute: %m"); r = sd_netlink_message_append_u8(m, IFLA_IPTUN_TTL, t->ttl); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_TTL attribute: %m"); r = sd_netlink_message_append_u8(m, IFLA_IPTUN_PMTUDISC, t->pmtudisc); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_PMTUDISC attribute: %m"); return r; } static int netdev_sit_fill_message_create(NetDev *netdev, Link *link, sd_netlink_message *m) { Tunnel *t = SIT(netdev); int r; assert(netdev); assert(link); assert(m); assert(t); assert(t->family == AF_INET); r = sd_netlink_message_append_u32(m, IFLA_IPTUN_LINK, link->ifindex); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_LINK attribute: %m"); r = sd_netlink_message_append_in_addr(m, IFLA_IPTUN_LOCAL, &t->local.in); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_LOCAL attribute: %m"); r = sd_netlink_message_append_in_addr(m, IFLA_IPTUN_REMOTE, &t->remote.in); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_REMOTE attribute: %m"); r = sd_netlink_message_append_u8(m, IFLA_IPTUN_TTL, t->ttl); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_TTL attribute: %m"); r = sd_netlink_message_append_u8(m, IFLA_IPTUN_PMTUDISC, t->pmtudisc); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_PMTUDISC attribute: %m"); return r; } static int netdev_gre_fill_message_create(NetDev *netdev, Link *link, sd_netlink_message *m) { Tunnel *t; int r; assert(netdev); if (netdev->kind == NETDEV_KIND_GRE) t = GRE(netdev); else t = GRETAP(netdev); assert(t); assert(t->family == AF_INET); assert(link); assert(m); r = sd_netlink_message_append_u32(m, IFLA_GRE_LINK, link->ifindex); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_GRE_LINK attribute: %m"); r = sd_netlink_message_append_in_addr(m, IFLA_GRE_LOCAL, &t->local.in); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_GRE_LOCAL attribute: %m"); r = sd_netlink_message_append_in_addr(m, IFLA_GRE_REMOTE, &t->remote.in); if (r < 0) log_netdev_error_errno(netdev, r, "Could not append IFLA_GRE_REMOTE attribute: %m"); r = sd_netlink_message_append_u8(m, IFLA_GRE_TTL, t->ttl); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_GRE_TTL attribute: %m"); r = sd_netlink_message_append_u8(m, IFLA_GRE_TOS, t->tos); if (r < 0) log_netdev_error_errno(netdev, r, "Could not append IFLA_GRE_TOS attribute: %m"); r = sd_netlink_message_append_u8(m, IFLA_GRE_PMTUDISC, t->pmtudisc); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_GRE_PMTUDISC attribute: %m"); return r; } static int netdev_ip6gre_fill_message_create(NetDev *netdev, Link *link, sd_netlink_message *m) { Tunnel *t; int r; assert(netdev); if (netdev->kind == NETDEV_KIND_IP6GRE) t = IP6GRE(netdev); else t = IP6GRETAP(netdev); assert(t); assert(t->family == AF_INET6); assert(link); assert(m); r = sd_netlink_message_append_u32(m, IFLA_GRE_LINK, link->ifindex); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_GRE_LINK attribute: %m"); r = sd_netlink_message_append_in6_addr(m, IFLA_GRE_LOCAL, &t->local.in6); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_GRE_LOCAL attribute: %m"); r = sd_netlink_message_append_in6_addr(m, IFLA_GRE_REMOTE, &t->remote.in6); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_GRE_REMOTE attribute: %m"); r = sd_netlink_message_append_u8(m, IFLA_GRE_TTL, t->ttl); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_GRE_TTL attribute: %m"); if (t->ipv6_flowlabel != _NETDEV_IPV6_FLOWLABEL_INVALID) { r = sd_netlink_message_append_u32(m, IFLA_GRE_FLOWINFO, t->ipv6_flowlabel); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_GRE_FLOWINFO attribute: %m"); } r = sd_netlink_message_append_u32(m, IFLA_GRE_FLAGS, t->flags); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_GRE_FLAGS attribute: %m"); return r; } static int netdev_vti_fill_message_create(NetDev *netdev, Link *link, sd_netlink_message *m) { Tunnel *t = VTI(netdev); int r; assert(netdev); assert(link); assert(m); assert(t); assert(t->family == AF_INET); r = sd_netlink_message_append_u32(m, IFLA_VTI_LINK, link->ifindex); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_LINK attribute: %m"); r = sd_netlink_message_append_in_addr(m, IFLA_VTI_LOCAL, &t->local.in); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_LOCAL attribute: %m"); r = sd_netlink_message_append_in_addr(m, IFLA_VTI_REMOTE, &t->remote.in); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_REMOTE attribute: %m"); return r; } static int netdev_vti6_fill_message_create(NetDev *netdev, Link *link, sd_netlink_message *m) { Tunnel *t = VTI6(netdev); int r; assert(netdev); assert(link); assert(m); assert(t); assert(t->family == AF_INET6); r = sd_netlink_message_append_u32(m, IFLA_VTI_LINK, link->ifindex); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_LINK attribute: %m"); r = sd_netlink_message_append_in6_addr(m, IFLA_VTI_LOCAL, &t->local.in6); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_LOCAL attribute: %m"); r = sd_netlink_message_append_in6_addr(m, IFLA_VTI_REMOTE, &t->remote.in6); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_REMOTE attribute: %m"); return r; } static int netdev_ip6tnl_fill_message_create(NetDev *netdev, Link *link, sd_netlink_message *m) { Tunnel *t = IP6TNL(netdev); uint8_t proto; int r; assert(netdev); assert(link); assert(m); assert(t); assert(t->family == AF_INET6); r = sd_netlink_message_append_u32(m, IFLA_IPTUN_LINK, link->ifindex); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_LINK attribute: %m"); r = sd_netlink_message_append_in6_addr(m, IFLA_IPTUN_LOCAL, &t->local.in6); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_LOCAL attribute: %m"); r = sd_netlink_message_append_in6_addr(m, IFLA_IPTUN_REMOTE, &t->remote.in6); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_REMOTE attribute: %m"); r = sd_netlink_message_append_u8(m, IFLA_IPTUN_TTL, t->ttl); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_TTL attribute: %m"); if (t->ipv6_flowlabel != _NETDEV_IPV6_FLOWLABEL_INVALID) { r = sd_netlink_message_append_u32(m, IFLA_IPTUN_FLOWINFO, t->ipv6_flowlabel); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_FLOWINFO attribute: %m"); } if (t->copy_dscp) t->flags |= IP6_TNL_F_RCV_DSCP_COPY; if (t->encap_limit != IPV6_DEFAULT_TNL_ENCAP_LIMIT) { r = sd_netlink_message_append_u8(m, IFLA_IPTUN_ENCAP_LIMIT, t->encap_limit); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_ENCAP_LIMIT attribute: %m"); } r = sd_netlink_message_append_u32(m, IFLA_IPTUN_FLAGS, t->flags); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_FLAGS attribute: %m"); switch (t->ip6tnl_mode) { case NETDEV_IP6_TNL_MODE_IP6IP6: proto = IPPROTO_IPV6; break; case NETDEV_IP6_TNL_MODE_IPIP6: proto = IPPROTO_IPIP; break; case NETDEV_IP6_TNL_MODE_ANYIP6: default: proto = 0; break; } r = sd_netlink_message_append_u8(m, IFLA_IPTUN_PROTO, proto); if (r < 0) return log_netdev_error_errno(netdev, r, "Could not append IFLA_IPTUN_MODE attribute: %m"); return r; } static int netdev_tunnel_verify(NetDev *netdev, const char *filename) { Tunnel *t = NULL; assert(netdev); assert(filename); switch (netdev->kind) { case NETDEV_KIND_IPIP: t = IPIP(netdev); break; case NETDEV_KIND_SIT: t = SIT(netdev); break; case NETDEV_KIND_GRE: t = GRE(netdev); break; case NETDEV_KIND_GRETAP: t = GRETAP(netdev); break; case NETDEV_KIND_IP6GRE: t = IP6GRE(netdev); break; case NETDEV_KIND_IP6GRETAP: t = IP6GRETAP(netdev); break; case NETDEV_KIND_VTI: t = VTI(netdev); break; case NETDEV_KIND_VTI6: t = VTI6(netdev); break; case NETDEV_KIND_IP6TNL: t = IP6TNL(netdev); break; default: assert_not_reached("Invalid tunnel kind"); } assert(t); if (t->remote.in.s_addr == INADDR_ANY) { log_warning("Tunnel without remote address configured in %s. Ignoring", filename); return -EINVAL; } if (t->family != AF_INET && t->family != AF_INET6) { log_warning("Tunnel with invalid address family configured in %s. Ignoring", filename); return -EINVAL; } if (netdev->kind == NETDEV_KIND_IP6TNL) { if (t->ip6tnl_mode == _NETDEV_IP6_TNL_MODE_INVALID) { log_warning("IP6 Tunnel without mode configured in %s. Ignoring", filename); return -EINVAL; } } return 0; } int config_parse_tunnel_address(const char *unit, const char *filename, unsigned line, const char *section, unsigned section_line, const char *lvalue, int ltype, const char *rvalue, void *data, void *userdata) { Tunnel *t = userdata; union in_addr_union *addr = data, buffer; int r, f; assert(filename); assert(lvalue); assert(rvalue); assert(data); r = in_addr_from_string_auto(rvalue, &f, &buffer); if (r < 0) { log_syntax(unit, LOG_ERR, filename, line, r, "Tunnel address is invalid, ignoring assignment: %s", rvalue); return 0; } if (t->family != AF_UNSPEC && t->family != f) { log_syntax(unit, LOG_ERR, filename, line, 0, "Tunnel addresses incompatible, ignoring assignment: %s", rvalue); return 0; } t->family = f; *addr = buffer; return 0; } int config_parse_ipv6_flowlabel(const char* unit, const char *filename, unsigned line, const char *section, unsigned section_line, const char *lvalue, int ltype, const char *rvalue, void *data, void *userdata) { IPv6FlowLabel *ipv6_flowlabel = data; Tunnel *t = userdata; int k = 0; int r; assert(filename); assert(lvalue); assert(rvalue); assert(ipv6_flowlabel); if (streq(rvalue, "inherit")) { *ipv6_flowlabel = IP6_FLOWINFO_FLOWLABEL; t->flags |= IP6_TNL_F_USE_ORIG_FLOWLABEL; } else { r = config_parse_int(unit, filename, line, section, section_line, lvalue, ltype, rvalue, &k, userdata); if (r < 0) return r; if (k > 0xFFFFF) log_syntax(unit, LOG_ERR, filename, line, 0, "Failed to parse IPv6 flowlabel option, ignoring: %s", rvalue); else { *ipv6_flowlabel = htonl(k) & IP6_FLOWINFO_FLOWLABEL; t->flags &= ~IP6_TNL_F_USE_ORIG_FLOWLABEL; } } return 0; } int config_parse_encap_limit(const char* unit, const char *filename, unsigned line, const char *section, unsigned section_line, const char *lvalue, int ltype, const char *rvalue, void *data, void *userdata) { Tunnel *t = userdata; int k = 0; int r; assert(filename); assert(lvalue); assert(rvalue); if (streq(rvalue, "none")) t->flags |= IP6_TNL_F_IGN_ENCAP_LIMIT; else { r = safe_atoi(rvalue, &k); if (r < 0) { log_syntax(unit, LOG_ERR, filename, line, r, "Failed to parse Tunnel Encapsulation Limit option, ignoring: %s", rvalue); return 0; } if (k > 255 || k < 0) log_syntax(unit, LOG_ERR, filename, line, 0, "Invalid Tunnel Encapsulation value, ignoring: %d", k); else { t->encap_limit = k; t->flags &= ~IP6_TNL_F_IGN_ENCAP_LIMIT; } } return 0; } static void ipip_init(NetDev *n) { Tunnel *t = IPIP(n); assert(n); assert(t); t->pmtudisc = true; } static void sit_init(NetDev *n) { Tunnel *t = SIT(n); assert(n); assert(t); t->pmtudisc = true; } static void vti_init(NetDev *n) { Tunnel *t; assert(n); if (n->kind == NETDEV_KIND_VTI) t = VTI(n); else t = VTI6(n); assert(t); t->pmtudisc = true; } static void gre_init(NetDev *n) { Tunnel *t; assert(n); if (n->kind == NETDEV_KIND_GRE) t = GRE(n); else t = GRETAP(n); assert(t); t->pmtudisc = true; } static void ip6gre_init(NetDev *n) { Tunnel *t; assert(n); if (n->kind == NETDEV_KIND_IP6GRE) t = IP6GRE(n); else t = IP6GRETAP(n); assert(t); t->ttl = DEFAULT_TNL_HOP_LIMIT; } static void ip6tnl_init(NetDev *n) { Tunnel *t = IP6TNL(n); assert(n); assert(t); t->ttl = DEFAULT_TNL_HOP_LIMIT; t->encap_limit = IPV6_DEFAULT_TNL_ENCAP_LIMIT; t->ip6tnl_mode = _NETDEV_IP6_TNL_MODE_INVALID; t->ipv6_flowlabel = _NETDEV_IPV6_FLOWLABEL_INVALID; } const NetDevVTable ipip_vtable = { .object_size = sizeof(Tunnel), .init = ipip_init, .sections = "Match\0NetDev\0Tunnel\0", .fill_message_create = netdev_ipip_fill_message_create, .create_type = NETDEV_CREATE_STACKED, .config_verify = netdev_tunnel_verify, }; const NetDevVTable sit_vtable = { .object_size = sizeof(Tunnel), .init = sit_init, .sections = "Match\0NetDev\0Tunnel\0", .fill_message_create = netdev_sit_fill_message_create, .create_type = NETDEV_CREATE_STACKED, .config_verify = netdev_tunnel_verify, }; const NetDevVTable vti_vtable = { .object_size = sizeof(Tunnel), .init = vti_init, .sections = "Match\0NetDev\0Tunnel\0", .fill_message_create = netdev_vti_fill_message_create, .create_type = NETDEV_CREATE_STACKED, .config_verify = netdev_tunnel_verify, }; const NetDevVTable vti6_vtable = { .object_size = sizeof(Tunnel), .init = vti_init, .sections = "Match\0NetDev\0Tunnel\0", .fill_message_create = netdev_vti6_fill_message_create, .create_type = NETDEV_CREATE_STACKED, .config_verify = netdev_tunnel_verify, }; const NetDevVTable gre_vtable = { .object_size = sizeof(Tunnel), .init = gre_init, .sections = "Match\0NetDev\0Tunnel\0", .fill_message_create = netdev_gre_fill_message_create, .create_type = NETDEV_CREATE_STACKED, .config_verify = netdev_tunnel_verify, }; const NetDevVTable gretap_vtable = { .object_size = sizeof(Tunnel), .init = gre_init, .sections = "Match\0NetDev\0Tunnel\0", .fill_message_create = netdev_gre_fill_message_create, .create_type = NETDEV_CREATE_STACKED, .config_verify = netdev_tunnel_verify, }; const NetDevVTable ip6gre_vtable = { .object_size = sizeof(Tunnel), .init = ip6gre_init, .sections = "Match\0NetDev\0Tunnel\0", .fill_message_create = netdev_ip6gre_fill_message_create, .create_type = NETDEV_CREATE_STACKED, .config_verify = netdev_tunnel_verify, }; const NetDevVTable ip6gretap_vtable = { .object_size = sizeof(Tunnel), .init = ip6gre_init, .sections = "Match\0NetDev\0Tunnel\0", .fill_message_create = netdev_ip6gre_fill_message_create, .create_type = NETDEV_CREATE_STACKED, .config_verify = netdev_tunnel_verify, }; const NetDevVTable ip6tnl_vtable = { .object_size = sizeof(Tunnel), .init = ip6tnl_init, .sections = "Match\0NetDev\0Tunnel\0", .fill_message_create = netdev_ip6tnl_fill_message_create, .create_type = NETDEV_CREATE_STACKED, .config_verify = netdev_tunnel_verify, };
gpl-2.0
zombi-x/android_kernel_asus_tf701t
drivers/mmc/host/sdhci-pltfm.c
4
7057
/* * sdhci-pltfm.c Support for SDHCI platform devices * Copyright (c) 2009 Intel Corporation * * Copyright (c) 2007, 2011 Freescale Semiconductor, Inc. * Copyright (c) 2009 MontaVista Software, Inc. * * Authors: Xiaobo Xie <X.Xie@freescale.com> * Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Supports: * SDHCI platform devices * * Inspired by sdhci-pci.c, by Pierre Ossman */ #include <linux/err.h> #include <linux/module.h> #include <linux/of.h> #ifdef CONFIG_PPC #include <asm/machdep.h> #endif #include "sdhci-pltfm.h" static struct sdhci_ops sdhci_pltfm_ops = { }; #ifdef CONFIG_OF static bool sdhci_of_wp_inverted(struct device_node *np) { if (of_get_property(np, "sdhci,wp-inverted", NULL)) return true; /* Old device trees don't have the wp-inverted property. */ #ifdef CONFIG_PPC return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds); #else return false; #endif /* CONFIG_PPC */ } void sdhci_get_of_property(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); const __be32 *clk; int size; if (of_device_is_available(np)) { if (of_get_property(np, "sdhci,auto-cmd12", NULL)) host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; if (of_get_property(np, "sdhci,1-bit-only", NULL)) host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; if (sdhci_of_wp_inverted(np)) host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc")) host->quirks |= SDHCI_QUIRK_BROKEN_DMA; if (of_device_is_compatible(np, "fsl,p2020-esdhc") || of_device_is_compatible(np, "fsl,p1010-esdhc") || of_device_is_compatible(np, "fsl,mpc8536-esdhc")) host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; clk = of_get_property(np, "clock-frequency", &size); if (clk && size == sizeof(*clk) && *clk) pltfm_host->clock = be32_to_cpup(clk); } } #else void sdhci_get_of_property(struct platform_device *pdev) {} #endif /* CONFIG_OF */ EXPORT_SYMBOL_GPL(sdhci_get_of_property); struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, struct sdhci_pltfm_data *pdata) { struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct device_node *np = pdev->dev.of_node; struct resource *iomem; int ret; iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) { ret = -ENOMEM; goto err; } if (resource_size(iomem) < 0x100) dev_err(&pdev->dev, "Invalid iomem size!\n"); /* Some PCI-based MFD need the parent here */ if (pdev->dev.parent != &platform_bus && !np) host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host)); else host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host)); if (IS_ERR(host)) { ret = PTR_ERR(host); goto err; } pltfm_host = sdhci_priv(host); host->hw_name = dev_name(&pdev->dev); if (pdata && pdata->ops) host->ops = pdata->ops; else host->ops = &sdhci_pltfm_ops; if (pdata) { host->quirks = pdata->quirks; host->quirks2 = pdata->quirks2; } host->irq = platform_get_irq(pdev, 0); if (!request_mem_region(iomem->start, resource_size(iomem), mmc_hostname(host->mmc))) { dev_err(&pdev->dev, "cannot request region\n"); ret = -EBUSY; goto err_request; } host->ioaddr = ioremap(iomem->start, resource_size(iomem)); if (!host->ioaddr) { dev_err(&pdev->dev, "failed to remap registers\n"); ret = -ENOMEM; goto err_remap; } platform_set_drvdata(pdev, host); return host; err_remap: release_mem_region(iomem->start, resource_size(iomem)); err_request: sdhci_free_host(host); err: dev_err(&pdev->dev, "%s failed %d\n", __func__, ret); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(sdhci_pltfm_init); void sdhci_pltfm_free(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); iounmap(host->ioaddr); release_mem_region(iomem->start, resource_size(iomem)); sdhci_free_host(host); platform_set_drvdata(pdev, NULL); } EXPORT_SYMBOL_GPL(sdhci_pltfm_free); int sdhci_pltfm_register(struct platform_device *pdev, struct sdhci_pltfm_data *pdata) { struct sdhci_host *host; int ret = 0; host = sdhci_pltfm_init(pdev, pdata); if (IS_ERR(host)) return PTR_ERR(host); sdhci_get_of_property(pdev); ret = sdhci_add_host(host); if (ret) sdhci_pltfm_free(pdev); return ret; } EXPORT_SYMBOL_GPL(sdhci_pltfm_register); int sdhci_pltfm_unregister(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); sdhci_remove_host(host, dead); sdhci_pltfm_free(pdev); return 0; } EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister); #ifdef CONFIG_PM static int sdhci_pltfm_suspend(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); int ret; pr_info("%s: sdhci_pltfm_suspend++\n", mmc_hostname(host->mmc)); ret = sdhci_suspend_host(host); if (ret) { dev_err(dev, "suspend failed, error = %d\n", ret); return ret; } if (host->ops && host->ops->suspend) ret = host->ops->suspend(host); if (ret) { dev_err(dev, "suspend hook failed, error = %d\n", ret); sdhci_resume_host(host); } pr_info("%s: sdhci_pltfm_suspend--\n", mmc_hostname(host->mmc)); return ret; } static int sdhci_pltfm_resume(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); int ret = 0; pr_info("%s: sdhci_pltfm_resume++\n", mmc_hostname(host->mmc)); if (host->ops && host->ops->resume) ret = host->ops->resume(host); if (ret) { dev_err(dev, "resume hook failed, error = %d\n", ret); return ret; } ret = sdhci_resume_host(host); if (ret) dev_err(dev, "resume failed, error = %d\n", ret); pr_info("%s: sdhci_pltfm_resume--\n", mmc_hostname(host->mmc)); return ret; } const struct dev_pm_ops sdhci_pltfm_pmops = { .suspend = sdhci_pltfm_suspend, .resume = sdhci_pltfm_resume, }; EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops); #endif /* CONFIG_PM */ static int __init sdhci_pltfm_drv_init(void) { pr_info("sdhci-pltfm: SDHCI platform and OF driver helper\n"); return 0; } module_init(sdhci_pltfm_drv_init); static void __exit sdhci_pltfm_drv_exit(void) { } module_exit(sdhci_pltfm_drv_exit); MODULE_DESCRIPTION("SDHCI platform and OF driver helper"); MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL v2");
gpl-2.0
johnhubbard/kgdb-pci-kernel
arch/xtensa/kernel/setup.c
4
10089
/* * arch/xtensa/kernel/setup.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995 Linus Torvalds * Copyright (C) 2001 - 2005 Tensilica Inc. * * Chris Zankel <chris@zankel.net> * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> * Kevin Chea * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca> */ #include <linux/errno.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/screen_info.h> #include <linux/bootmem.h> #include <linux/kernel.h> #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) # include <linux/console.h> #endif #ifdef CONFIG_RTC # include <linux/timex.h> #endif #ifdef CONFIG_PROC_FS # include <linux/seq_file.h> #endif #include <asm/bootparam.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/timex.h> #include <asm/platform.h> #include <asm/page.h> #include <asm/setup.h> #include <asm/param.h> #include <platform/hardware.h> #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16}; #endif #ifdef CONFIG_BLK_DEV_FD extern struct fd_ops no_fd_ops; struct fd_ops *fd_ops; #endif extern struct rtc_ops no_rtc_ops; struct rtc_ops *rtc_ops; #ifdef CONFIG_BLK_DEV_INITRD extern void *initrd_start; extern void *initrd_end; int initrd_is_mapped = 0; extern int initrd_below_start_ok; #endif unsigned char aux_device_present; extern unsigned long loops_per_jiffy; /* Command line specified as configuration option. */ static char __initdata command_line[COMMAND_LINE_SIZE]; #ifdef CONFIG_CMDLINE_BOOL static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; #endif sysmem_info_t __initdata sysmem; #ifdef CONFIG_MMU extern void init_mmu(void); #else static inline void init_mmu(void) { } #endif extern void zones_init(void); /* * Boot parameter parsing. * * The Xtensa port uses a list of variable-sized tags to pass data to * the kernel. The first tag must be a BP_TAG_FIRST tag for the list * to be recognised. The list is terminated with a zero-sized * BP_TAG_LAST tag. */ typedef struct tagtable { u32 tag; int (*parse)(const bp_tag_t*); } tagtable_t; #define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \ __attribute__((unused, __section__(".taglist"))) = { tag, fn } /* parse current tag */ static int __init parse_tag_mem(const bp_tag_t *tag) { meminfo_t *mi = (meminfo_t*)(tag->data); if (mi->type != MEMORY_TYPE_CONVENTIONAL) return -1; if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) { printk(KERN_WARNING "Ignoring memory bank 0x%08lx size %ldKB\n", (unsigned long)mi->start, (unsigned long)mi->end - (unsigned long)mi->start); return -EINVAL; } sysmem.bank[sysmem.nr_banks].type = mi->type; sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(mi->start); sysmem.bank[sysmem.nr_banks].end = mi->end & PAGE_SIZE; sysmem.nr_banks++; return 0; } __tagtable(BP_TAG_MEMORY, parse_tag_mem); #ifdef CONFIG_BLK_DEV_INITRD static int __init parse_tag_initrd(const bp_tag_t* tag) { meminfo_t* mi; mi = (meminfo_t*)(tag->data); initrd_start = (void*)(mi->start); initrd_end = (void*)(mi->end); return 0; } __tagtable(BP_TAG_INITRD, parse_tag_initrd); #endif /* CONFIG_BLK_DEV_INITRD */ static int __init parse_tag_cmdline(const bp_tag_t* tag) { strncpy(command_line, (char*)(tag->data), COMMAND_LINE_SIZE); command_line[COMMAND_LINE_SIZE - 1] = '\0'; return 0; } __tagtable(BP_TAG_COMMAND_LINE, parse_tag_cmdline); static int __init parse_bootparam(const bp_tag_t* tag) { extern tagtable_t __tagtable_begin, __tagtable_end; tagtable_t *t; /* Boot parameters must start with a BP_TAG_FIRST tag. */ if (tag->id != BP_TAG_FIRST) { printk(KERN_WARNING "Invalid boot parameters!\n"); return 0; } tag = (bp_tag_t*)((unsigned long)tag + sizeof(bp_tag_t) + tag->size); /* Parse all tags. */ while (tag != NULL && tag->id != BP_TAG_LAST) { for (t = &__tagtable_begin; t < &__tagtable_end; t++) { if (tag->id == t->tag) { t->parse(tag); break; } } if (t == &__tagtable_end) printk(KERN_WARNING "Ignoring tag " "0x%08x\n", tag->id); tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size); } return 0; } /* * Initialize architecture. (Early stage) */ void __init init_arch(bp_tag_t *bp_start) { sysmem.nr_banks = 0; #ifdef CONFIG_CMDLINE_BOOL strcpy(command_line, default_command_line); #endif /* Parse boot parameters */ if (bp_start) parse_bootparam(bp_start); if (sysmem.nr_banks == 0) { sysmem.nr_banks = 1; sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START; sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE; } /* Early hook for platforms */ platform_init(bp_start); /* Initialize MMU. */ init_mmu(); } /* * Initialize system. Setup memory and reserve regions. */ extern char _end; extern char _stext; extern char _WindowVectors_text_start; extern char _WindowVectors_text_end; extern char _DebugInterruptVector_literal_start; extern char _DebugInterruptVector_text_end; extern char _KernelExceptionVector_literal_start; extern char _KernelExceptionVector_text_end; extern char _UserExceptionVector_literal_start; extern char _UserExceptionVector_text_end; extern char _DoubleExceptionVector_literal_start; extern char _DoubleExceptionVector_text_end; void __init setup_arch(char **cmdline_p) { extern int mem_reserve(unsigned long, unsigned long, int); extern void bootmem_init(void); memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; *cmdline_p = command_line; /* Reserve some memory regions */ #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start < initrd_end) { initrd_is_mapped = mem_reserve(__pa(initrd_start), __pa(initrd_end), 0); initrd_below_start_ok = 1; } else { initrd_start = 0; } #endif mem_reserve(__pa(&_stext),__pa(&_end), 1); mem_reserve(__pa(&_WindowVectors_text_start), __pa(&_WindowVectors_text_end), 0); mem_reserve(__pa(&_DebugInterruptVector_literal_start), __pa(&_DebugInterruptVector_text_end), 0); mem_reserve(__pa(&_KernelExceptionVector_literal_start), __pa(&_KernelExceptionVector_text_end), 0); mem_reserve(__pa(&_UserExceptionVector_literal_start), __pa(&_UserExceptionVector_text_end), 0); mem_reserve(__pa(&_DoubleExceptionVector_literal_start), __pa(&_DoubleExceptionVector_text_end), 0); bootmem_init(); platform_setup(cmdline_p); paging_init(); zones_init(); #ifdef CONFIG_VT # if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; # elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; # endif #endif #ifdef CONFIG_PCI platform_pcibios_init(); #endif } void machine_restart(char * cmd) { platform_restart(); } void machine_halt(void) { platform_halt(); while (1); } void machine_power_off(void) { platform_power_off(); while (1); } #ifdef CONFIG_PROC_FS /* * Display some core information through /proc/cpuinfo. */ static int c_show(struct seq_file *f, void *slot) { /* high-level stuff */ seq_printf(f,"processor\t: 0\n" "vendor_id\t: Tensilica\n" "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" "core ID\t\t: " XCHAL_CORE_ID "\n" "build ID\t: 0x%x\n" "byte order\t: %s\n" "cpu MHz\t\t: %lu.%02lu\n" "bogomips\t: %lu.%02lu\n", XCHAL_BUILD_UNIQUE_ID, XCHAL_HAVE_BE ? "big" : "little", CCOUNT_PER_JIFFY/(1000000/HZ), (CCOUNT_PER_JIFFY/(10000/HZ)) % 100, loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ)) % 100); seq_printf(f,"flags\t\t: " #if XCHAL_HAVE_NMI "nmi " #endif #if XCHAL_HAVE_DEBUG "debug " # if XCHAL_HAVE_OCD "ocd " # endif #endif #if XCHAL_HAVE_DENSITY "density " #endif #if XCHAL_HAVE_BOOLEANS "boolean " #endif #if XCHAL_HAVE_LOOPS "loop " #endif #if XCHAL_HAVE_NSA "nsa " #endif #if XCHAL_HAVE_MINMAX "minmax " #endif #if XCHAL_HAVE_SEXT "sext " #endif #if XCHAL_HAVE_CLAMPS "clamps " #endif #if XCHAL_HAVE_MAC16 "mac16 " #endif #if XCHAL_HAVE_MUL16 "mul16 " #endif #if XCHAL_HAVE_MUL32 "mul32 " #endif #if XCHAL_HAVE_MUL32_HIGH "mul32h " #endif #if XCHAL_HAVE_FP "fpu " #endif "\n"); /* Registers. */ seq_printf(f,"physical aregs\t: %d\n" "misc regs\t: %d\n" "ibreak\t\t: %d\n" "dbreak\t\t: %d\n", XCHAL_NUM_AREGS, XCHAL_NUM_MISC_REGS, XCHAL_NUM_IBREAK, XCHAL_NUM_DBREAK); /* Interrupt. */ seq_printf(f,"num ints\t: %d\n" "ext ints\t: %d\n" "int levels\t: %d\n" "timers\t\t: %d\n" "debug level\t: %d\n", XCHAL_NUM_INTERRUPTS, XCHAL_NUM_EXTINTERRUPTS, XCHAL_NUM_INTLEVELS, XCHAL_NUM_TIMERS, XCHAL_DEBUGLEVEL); /* Cache */ seq_printf(f,"icache line size: %d\n" "icache ways\t: %d\n" "icache size\t: %d\n" "icache flags\t: " #if XCHAL_ICACHE_LINE_LOCKABLE "lock" #endif "\n" "dcache line size: %d\n" "dcache ways\t: %d\n" "dcache size\t: %d\n" "dcache flags\t: " #if XCHAL_DCACHE_IS_WRITEBACK "writeback" #endif #if XCHAL_DCACHE_LINE_LOCKABLE "lock" #endif "\n", XCHAL_ICACHE_LINESIZE, XCHAL_ICACHE_WAYS, XCHAL_ICACHE_SIZE, XCHAL_DCACHE_LINESIZE, XCHAL_DCACHE_WAYS, XCHAL_DCACHE_SIZE); return 0; } /* * We show only CPU #0 info. */ static void * c_start(struct seq_file *f, loff_t *pos) { return (void *) ((*pos == 0) ? (void *)1 : NULL); } static void * c_next(struct seq_file *f, void *v, loff_t *pos) { return NULL; } static void c_stop(struct seq_file *f, void *v) { } const struct seq_operations cpuinfo_op = { start: c_start, next: c_next, stop: c_stop, show: c_show }; #endif /* CONFIG_PROC_FS */
gpl-2.0
huz123/bricked.tenderloin
arch/arm/mach-msm/board-qsd8x50a-st1x.c
4
62076
/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/kernel.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/android_pmem.h> #include <linux/bootmem.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/delay.h> #include <linux/mfd/tps65023.h> #include <linux/bma150.h> #include <linux/power_supply.h> #include <linux/clk.h> #include <linux/gpio_keys.h> #include <linux/input/qci_kbd.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/io.h> #include <asm/setup.h> #include <asm/mach/mmc.h> #include <mach/vreg.h> #include <mach/mpp.h> #include <mach/gpio.h> #include <mach/board.h> #include <mach/sirc.h> #include <mach/dma.h> #include <mach/rpc_hsusb.h> #include <mach/msm_hsusb.h> #include <mach/msm_hsusb_hw.h> #include <mach/msm_serial_hs.h> #include <mach/msm_touchpad.h> #include <mach/msm_i2ckbd.h> #include <mach/pmic.h> #include <mach/camera.h> #include <mach/memory.h> #include <mach/msm_spi.h> #include <mach/msm_tsif.h> #include <mach/msm_battery.h> #include <mach/clk.h> #include <mach/tpm_st_i2c.h> #include <mach/rpc_server_handset.h> #include <mach/socinfo.h> #include "devices.h" #include "timer.h" #include "msm-keypad-devices.h" #include "pm.h" #include <linux/msm_kgsl.h> #include <linux/smsc911x.h> #ifdef CONFIG_USB_ANDROID #include <linux/usb/android_composite.h> #endif #include "smd_private.h" #define MSM_PMEM_MDP_SIZE 0x408000 #define SMEM_SPINLOCK_I2C "D:I2C02000021" #define MSM_PMEM_ADSP_SIZE 0x2A05000 #define MSM_FB_SIZE_ST15 0x810000 #define MSM_AUDIO_SIZE 0x80000 #define MSM_SMI_BASE 0xE0000000 #define MSM_SHARED_RAM_PHYS (MSM_SMI_BASE + 0x00100000) #define MSM_PMEM_SMI_BASE (MSM_SMI_BASE + 0x02B00000) #define MSM_PMEM_SMI_SIZE 0x01500000 #define MSM_FB_BASE MSM_PMEM_SMI_BASE #define MSM_PMEM_SMIPOOL_BASE (MSM_FB_BASE + MSM_FB_SIZE_ST15) #define MSM_PMEM_SMIPOOL_SIZE (MSM_PMEM_SMI_SIZE - MSM_FB_SIZE_ST15) #define PMEM_KERNEL_EBI1_SIZE (10 * 1024 * 1024) #define PMIC_VREG_GP6_LEVEL 2900 #define MSM_GPIO_SD_DET 100 static struct resource smsc911x_resources[] = { [0] = { .name = "smsc911x-memory", .flags = IORESOURCE_MEM, }, [1] = { .flags = IORESOURCE_IRQ, }, }; static struct smsc911x_platform_config smsc911x_config = { .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, .flags = SMSC911X_USE_32BIT, .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct platform_device smsc911x_device = { .name = "smsc911x", .id = -1, .num_resources = ARRAY_SIZE(smsc911x_resources), .resource = smsc911x_resources, .dev = { .platform_data = &smsc911x_config, }, }; static struct resource smc91x_resources[] = { [0] = { .flags = IORESOURCE_MEM, }, [1] = { .flags = IORESOURCE_IRQ, }, }; static struct clk *hs_clk; static struct clk *phy_clk; #ifdef CONFIG_USB_FUNCTION static struct usb_mass_storage_platform_data usb_mass_storage_pdata = { .nluns = 0x02, .buf_size = 16384, .vendor = "GOOGLE", .product = "Mass storage", .release = 0xffff, }; static struct platform_device mass_storage_device = { .name = "usb_mass_storage", .id = -1, .dev = { .platform_data = &usb_mass_storage_pdata, }, }; #endif #ifdef CONFIG_USB_ANDROID static char *usb_functions_default[] = { "diag", "modem", "nmea", "rmnet", "usb_mass_storage", }; static char *usb_functions_default_adb[] = { "diag", "adb", "modem", "nmea", "rmnet", "usb_mass_storage", }; static char *usb_functions_rndis[] = { "rndis", "usb_mass_storage", }; static char *usb_functions_rndis_adb[] = { "rndis", "adb", "usb_mass_storage", }; static char *usb_functions_all[] = { #ifdef CONFIG_USB_ANDROID_RNDIS "rndis", #endif #ifdef CONFIG_USB_ANDROID_DIAG "diag", #endif "adb", #ifdef CONFIG_USB_F_SERIAL "modem", "nmea", #endif #ifdef CONFIG_USB_ANDROID_RMNET "rmnet", #endif "usb_mass_storage", #ifdef CONFIG_USB_ANDROID_ACM "acm", #endif }; static struct android_usb_product usb_products[] = { { .product_id = 0x9026, .num_functions = ARRAY_SIZE(usb_functions_default), .functions = usb_functions_default, }, { .product_id = 0x9025, .num_functions = ARRAY_SIZE(usb_functions_default_adb), .functions = usb_functions_default_adb, }, { .product_id = 0xf00e, .num_functions = ARRAY_SIZE(usb_functions_rndis), .functions = usb_functions_rndis, }, { .product_id = 0x9024, .num_functions = ARRAY_SIZE(usb_functions_rndis_adb), .functions = usb_functions_rndis_adb, }, }; static struct usb_mass_storage_platform_data mass_storage_pdata = { .nluns = 1, .vendor = "Qualcomm Incorporated", .product = "Mass storage", .release = 0x0100, }; static struct platform_device usb_mass_storage_device = { .name = "usb_mass_storage", .id = -1, .dev = { .platform_data = &mass_storage_pdata, }, }; static struct usb_ether_platform_data rndis_pdata = { /* ethaddr is filled by board_serialno_setup */ .vendorID = 0x05C6, .vendorDescr = "Qualcomm Incorporated", }; static struct platform_device rndis_device = { .name = "rndis", .id = -1, .dev = { .platform_data = &rndis_pdata, }, }; static struct android_usb_platform_data android_usb_pdata = { .vendor_id = 0x05C6, .product_id = 0x9026, .version = 0x0100, .product_name = "Qualcomm HSUSB Device", .manufacturer_name = "Qualcomm Incorporated", .num_products = ARRAY_SIZE(usb_products), .products = usb_products, .num_functions = ARRAY_SIZE(usb_functions_all), .functions = usb_functions_all, .serial_number = "1234567890ABCDEF", }; static struct platform_device android_usb_device = { .name = "android_usb", .id = -1, .dev = { .platform_data = &android_usb_pdata, }, }; static int __init board_serialno_setup(char *serialno) { int i; char *src = serialno; /* create a fake MAC address from our serial number. * first byte is 0x02 to signify locally administered. */ rndis_pdata.ethaddr[0] = 0x02; for (i = 0; *src; i++) { /* XOR the USB serial across the remaining bytes */ rndis_pdata.ethaddr[i % (ETH_ALEN - 1) + 1] ^= *src++; } android_usb_pdata.serial_number = serialno; return 1; } __setup("androidboot.serialno=", board_serialno_setup); #endif static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; #ifdef CONFIG_USB_FUNCTION static struct usb_function_map usb_functions_map[] = { {"diag", 0}, {"adb", 1}, {"modem", 2}, {"nmea", 3}, {"mass_storage", 4}, {"ethernet", 5}, }; /* dynamic composition */ static struct usb_composition usb_func_composition[] = { { .product_id = 0x9012, .functions = 0x5, /* 0101 */ }, { .product_id = 0x9013, .functions = 0x15, /* 10101 */ }, { .product_id = 0x9014, .functions = 0x30, /* 110000 */ }, { .product_id = 0x9015, .functions = 0x12, /* 10010 */ }, { .product_id = 0x9016, .functions = 0xD, /* 01101 */ }, { .product_id = 0x9017, .functions = 0x1D, /* 11101 */ }, { .product_id = 0xF000, .functions = 0x10, /* 10000 */ }, { .product_id = 0xF009, .functions = 0x20, /* 100000 */ }, { .product_id = 0x9018, .functions = 0x1F, /* 011111 */ }, { .product_id = 0x901A, .functions = 0x0F, /* 01111 */ }, }; #endif static struct msm_handset_platform_data hs_platform_data = { .hs_name = "8k_handset", .pwr_key_delay_ms = 500, /* 0 will disable end key */ }; static struct platform_device hs_device = { .name = "msm-handset", .id = -1, .dev = { .platform_data = &hs_platform_data, }, }; #define HUB_3V3_GPIO 38 /* Power to HUB and switch */ #define SWITCH_EN_GPIO 98 /* !CS of analog switch */ #define SWITCH_CONTROL_GPIO 104 /* 0: Host, 1: Peripheral */ #define HUB_RESET_GPIO 108 /* 0: HUB is RESET */ static struct msm_gpio hsusb_gpio_config_data[] = { { GPIO_CFG(HUB_3V3_GPIO, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "hub_power" }, { GPIO_CFG(SWITCH_EN_GPIO, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "swch_enable" }, { GPIO_CFG(SWITCH_CONTROL_GPIO, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "swch_ctrl" }, { GPIO_CFG(HUB_RESET_GPIO, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "hub_reset" }, }; static int msm_otg_gpio_init(void) { int rc; rc = msm_gpios_request_enable(hsusb_gpio_config_data, ARRAY_SIZE(hsusb_gpio_config_data)); if (rc) printk(KERN_ERR "Error gpio req for hsusb\n"); return rc; } static void msm_otg_setup_gpio(enum usb_switch_control mode) { switch (mode) { case USB_SWITCH_HOST: gpio_set_value(HUB_3V3_GPIO, 1); /* Configure analog switch as USB host. */ gpio_set_value(SWITCH_EN_GPIO, 0); gpio_set_value(SWITCH_CONTROL_GPIO, 0); /* Bring HUB out of RESET */ gpio_set_value(HUB_RESET_GPIO, 1); break; case USB_SWITCH_PERIPHERAL: /* Power-up switch */ gpio_set_value(HUB_3V3_GPIO, 1); /* Configure analog switch as USB peripheral. */ gpio_set_value(SWITCH_EN_GPIO, 0); gpio_set_value(SWITCH_CONTROL_GPIO, 1); break; case USB_SWITCH_DISABLE: default: /* Disable Switch */ gpio_set_value(SWITCH_EN_GPIO, 1); gpio_set_value(HUB_RESET_GPIO, 0); /* Power-down HUB and analog switch */ gpio_set_value(HUB_3V3_GPIO, 0); } } #ifdef CONFIG_USB_FS_HOST static struct msm_gpio fsusb_config[] = { { GPIO_CFG(139, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "fs_dat" }, { GPIO_CFG(140, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "fs_se0" }, { GPIO_CFG(141, 3, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "fs_oe_n" }, }; static int fsusb_gpio_init(void) { return msm_gpios_request(fsusb_config, ARRAY_SIZE(fsusb_config)); } static void msm_fsusb_setup_gpio(unsigned int enable) { if (enable) msm_gpios_enable(fsusb_config, ARRAY_SIZE(fsusb_config)); else msm_gpios_disable(fsusb_config, ARRAY_SIZE(fsusb_config)); } #endif #define MSM_USB_BASE ((unsigned)addr) static unsigned ulpi_read(void __iomem *addr, unsigned reg) { unsigned timeout = 100000; /* initiate read operation */ writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg), USB_ULPI_VIEWPORT); /* wait for completion */ while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) cpu_relax(); if (timeout == 0) { printk(KERN_ERR "ulpi_read: timeout %08x\n", readl(USB_ULPI_VIEWPORT)); return 0xffffffff; } return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT)); } static int ulpi_write(void __iomem *addr, unsigned val, unsigned reg) { unsigned timeout = 10000; /* initiate write operation */ writel(ULPI_RUN | ULPI_WRITE | ULPI_ADDR(reg) | ULPI_DATA(val), USB_ULPI_VIEWPORT); /* wait for completion */ while ((readl(USB_ULPI_VIEWPORT) & ULPI_RUN) && (--timeout)) cpu_relax(); if (timeout == 0) { printk(KERN_ERR "ulpi_write: timeout\n"); return -1; } return 0; } static void msm_hsusb_apps_reset_link(int reset) { if (reset) clk_reset(hs_clk, CLK_RESET_ASSERT); else clk_reset(hs_clk, CLK_RESET_DEASSERT); } static void msm_hsusb_apps_reset_phy(void) { clk_reset(phy_clk, CLK_RESET_ASSERT); msleep(1); clk_reset(phy_clk, CLK_RESET_DEASSERT); } #define ULPI_VERIFY_MAX_LOOP_COUNT 3 static int msm_hsusb_phy_verify_access(void __iomem *addr) { int temp; for (temp = 0; temp < ULPI_VERIFY_MAX_LOOP_COUNT; temp++) { if (ulpi_read(addr, ULPI_DEBUG) != (unsigned)-1) break; msm_hsusb_apps_reset_phy(); } if (temp == ULPI_VERIFY_MAX_LOOP_COUNT) { pr_err("%s: ulpi read failed for %d times\n", __func__, ULPI_VERIFY_MAX_LOOP_COUNT); return -1; } return 0; } static unsigned msm_hsusb_ulpi_read_with_reset(void __iomem *addr, unsigned reg) { int temp; unsigned res; for (temp = 0; temp < ULPI_VERIFY_MAX_LOOP_COUNT; temp++) { res = ulpi_read(addr, reg); if (res != -1) return res; msm_hsusb_apps_reset_phy(); } pr_err("%s: ulpi read failed for %d times\n", __func__, ULPI_VERIFY_MAX_LOOP_COUNT); return -1; } static int msm_hsusb_ulpi_write_with_reset(void __iomem *addr, unsigned val, unsigned reg) { int temp; int res; for (temp = 0; temp < ULPI_VERIFY_MAX_LOOP_COUNT; temp++) { res = ulpi_write(addr, val, reg); if (!res) return 0; msm_hsusb_apps_reset_phy(); } pr_err("%s: ulpi write failed for %d times\n", __func__, ULPI_VERIFY_MAX_LOOP_COUNT); return -1; } static int msm_hsusb_phy_caliberate(void __iomem *addr) { int ret; unsigned res; ret = msm_hsusb_phy_verify_access(addr); if (ret) return -ETIMEDOUT; res = msm_hsusb_ulpi_read_with_reset(addr, ULPI_FUNC_CTRL_CLR); if (res == -1) return -ETIMEDOUT; res = msm_hsusb_ulpi_write_with_reset(addr, res | ULPI_SUSPENDM, ULPI_FUNC_CTRL_CLR); if (res) return -ETIMEDOUT; msm_hsusb_apps_reset_phy(); return msm_hsusb_phy_verify_access(addr); } #define USB_LINK_RESET_TIMEOUT (msecs_to_jiffies(10)) static int msm_hsusb_native_phy_reset(void __iomem *addr) { u32 temp; unsigned long timeout; msm_hsusb_apps_reset_link(1); msm_hsusb_apps_reset_phy(); msm_hsusb_apps_reset_link(0); /* select ULPI phy */ temp = (readl(USB_PORTSC) & ~PORTSC_PTS); writel(temp | PORTSC_PTS_ULPI, USB_PORTSC); if (msm_hsusb_phy_caliberate(addr)) return -1; /* soft reset phy */ writel(USBCMD_RESET, USB_USBCMD); timeout = jiffies + USB_LINK_RESET_TIMEOUT; while (readl(USB_USBCMD) & USBCMD_RESET) { if (time_after(jiffies, timeout)) { pr_err("usb link reset timeout\n"); break; } msleep(1); } return 0; } static struct msm_hsusb_platform_data msm_hsusb_pdata = { #ifdef CONFIG_USB_FUNCTION .version = 0x0100, .phy_info = (USB_PHY_INTEGRATED | USB_PHY_MODEL_180NM), .vendor_id = 0x5c6, .product_name = "Qualcomm HSUSB Device", .serial_number = "1234567890ABCDEF", .manufacturer_name = "Qualcomm Incorporated", .compositions = usb_func_composition, .num_compositions = ARRAY_SIZE(usb_func_composition), .function_map = usb_functions_map, .num_functions = ARRAY_SIZE(usb_functions_map), .config_gpio = NULL, .phy_reset = msm_hsusb_native_phy_reset, #endif }; static struct vreg *vreg_usb; static void msm_hsusb_vbus_power(unsigned phy_info, int on) { switch (PHY_TYPE(phy_info)) { case USB_PHY_INTEGRATED: if (on) msm_hsusb_vbus_powerup(); else msm_hsusb_vbus_shutdown(); break; case USB_PHY_SERIAL_PMIC: if (on) vreg_enable(vreg_usb); else vreg_disable(vreg_usb); break; default: pr_err("%s: undefined phy type ( %X ) \n", __func__, phy_info); } } static struct msm_usb_host_platform_data msm_usb_host_pdata = { .phy_info = (USB_PHY_INTEGRATED | USB_PHY_MODEL_180NM), .vbus_power = msm_hsusb_vbus_power, }; #ifdef CONFIG_USB_FS_HOST static struct msm_usb_host_platform_data msm_usb_host2_pdata = { .phy_info = USB_PHY_SERIAL_PMIC, .config_gpio = msm_fsusb_setup_gpio, .vbus_power = msm_hsusb_vbus_power, }; #endif static struct android_pmem_platform_data android_pmem_kernel_ebi1_pdata = { .name = PMEM_KERNEL_EBI1_DATA_NAME, /* if no allocator_type, defaults to PMEM_ALLOCATORTYPE_BITMAP, * the only valid choice at this time. The board structure is * set to all zeros by the C runtime initialization and that is now * the enum value of PMEM_ALLOCATORTYPE_BITMAP, now forced to 0 in * include/linux/android_pmem.h. */ .cached = 0, }; #ifdef CONFIG_KERNEL_PMEM_SMI_REGION static struct android_pmem_platform_data android_pmem_kernel_smi_pdata = { .name = PMEM_KERNEL_SMI_DATA_NAME, /* if no allocator_type, defaults to PMEM_ALLOCATORTYPE_BITMAP, * the only valid choice at this time. The board structure is * set to all zeros by the C runtime initialization and that is now * the enum value of PMEM_ALLOCATORTYPE_BITMAP, now forced to 0 in * include/linux/android_pmem.h. */ .cached = 0, }; #else static struct android_pmem_platform_data android_pmem_smipool_pdata = { .name = "pmem_smipool", .start = MSM_PMEM_SMIPOOL_BASE, .size = MSM_PMEM_SMIPOOL_SIZE, .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, .cached = 0, }; #endif static struct android_pmem_platform_data android_pmem_pdata = { .name = "pmem", .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, .cached = 1, }; static struct android_pmem_platform_data android_pmem_adsp_pdata = { .name = "pmem_adsp", .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, .cached = 0, }; static struct platform_device android_pmem_device = { .name = "android_pmem", .id = 0, .dev = { .platform_data = &android_pmem_pdata }, }; static struct platform_device android_pmem_adsp_device = { .name = "android_pmem", .id = 1, .dev = { .platform_data = &android_pmem_adsp_pdata }, }; static struct platform_device android_pmem_kernel_ebi1_device = { .name = "android_pmem", .id = 3, .dev = { .platform_data = &android_pmem_kernel_ebi1_pdata }, }; #ifdef CONFIG_KERNEL_PMEM_SMI_REGION static struct platform_device android_pmem_kernel_smi_device = { .name = "android_pmem", .id = 4, .dev = { .platform_data = &android_pmem_kernel_smi_pdata }, }; #else static struct platform_device android_pmem_smipool_device = { .name = "android_pmem", .id = 2, .dev = { .platform_data = &android_pmem_smipool_pdata }, }; #endif static struct resource msm_fb_resources[] = { { .flags = IORESOURCE_DMA, } }; static int msm_fb_detect_panel(const char *name) { int ret; if (!strcmp(name, "lcdc_st15") || !strcmp(name, "hdmi_sii9022")) ret = 0; else ret = -ENODEV; return ret; } /* Only allow a small subset of machines to set the offset via FB PAN_DISPLAY */ static int msm_fb_allow_set_offset(void) { return 1; } static struct msm_fb_platform_data msm_fb_pdata = { .detect_client = msm_fb_detect_panel, .allow_set_offset = msm_fb_allow_set_offset, }; static struct platform_device msm_fb_device = { .name = "msm_fb", .id = 0, .num_resources = ARRAY_SIZE(msm_fb_resources), .resource = msm_fb_resources, .dev = { .platform_data = &msm_fb_pdata, } }; static struct msm_gpio bma_spi_gpio_config_data[] = { { GPIO_CFG(22, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "bma_irq" }, }; static int msm_bma_gpio_setup(struct device *dev) { int rc; rc = msm_gpios_request_enable(bma_spi_gpio_config_data, ARRAY_SIZE(bma_spi_gpio_config_data)); return rc; } static void msm_bma_gpio_teardown(struct device *dev) { msm_gpios_disable_free(bma_spi_gpio_config_data, ARRAY_SIZE(bma_spi_gpio_config_data)); } static struct bma150_platform_data bma_pdata = { .setup = msm_bma_gpio_setup, .teardown = msm_bma_gpio_teardown, }; static struct resource qsd_spi_resources[] = { { .name = "spi_irq_in", .start = INT_SPI_INPUT, .end = INT_SPI_INPUT, .flags = IORESOURCE_IRQ, }, { .name = "spi_irq_out", .start = INT_SPI_OUTPUT, .end = INT_SPI_OUTPUT, .flags = IORESOURCE_IRQ, }, { .name = "spi_irq_err", .start = INT_SPI_ERROR, .end = INT_SPI_ERROR, .flags = IORESOURCE_IRQ, }, { .name = "spi_base", .start = 0xA1200000, .end = 0xA1200000 + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "spidm_channels", .flags = IORESOURCE_DMA, }, { .name = "spidm_crci", .flags = IORESOURCE_DMA, }, }; static struct platform_device qsd_device_spi = { .name = "spi_qsd", .id = 0, .num_resources = ARRAY_SIZE(qsd_spi_resources), .resource = qsd_spi_resources, }; static struct spi_board_info msm_spi_board_info[] __initdata = { { .modalias = "bma150", .mode = SPI_MODE_3, .irq = MSM_GPIO_TO_INT(22), .bus_num = 0, .chip_select = 0, .max_speed_hz = 10000000, .platform_data = &bma_pdata, }, }; #define CT_CSR_PHYS 0xA8700000 #define TCSR_SPI_MUX (ct_csr_base + 0x54) static int msm_qsd_spi_dma_config(void) { void __iomem *ct_csr_base = 0; u32 spi_mux; int ret = 0; ct_csr_base = ioremap(CT_CSR_PHYS, PAGE_SIZE); if (!ct_csr_base) { pr_err("%s: Could not remap %x\n", __func__, CT_CSR_PHYS); return -1; } spi_mux = readl(TCSR_SPI_MUX); switch (spi_mux) { case (1): qsd_spi_resources[4].start = DMOV_HSUART1_RX_CHAN; qsd_spi_resources[4].end = DMOV_HSUART1_TX_CHAN; qsd_spi_resources[5].start = DMOV_HSUART1_RX_CRCI; qsd_spi_resources[5].end = DMOV_HSUART1_TX_CRCI; break; case (2): qsd_spi_resources[4].start = DMOV_HSUART2_RX_CHAN; qsd_spi_resources[4].end = DMOV_HSUART2_TX_CHAN; qsd_spi_resources[5].start = DMOV_HSUART2_RX_CRCI; qsd_spi_resources[5].end = DMOV_HSUART2_TX_CRCI; break; case (3): qsd_spi_resources[4].start = DMOV_CE_OUT_CHAN; qsd_spi_resources[4].end = DMOV_CE_IN_CHAN; qsd_spi_resources[5].start = DMOV_CE_OUT_CRCI; qsd_spi_resources[5].end = DMOV_CE_IN_CRCI; break; default: ret = -1; } iounmap(ct_csr_base); return ret; } static struct msm_gpio qsd_spi_gpio_config_data[] = { { GPIO_CFG(17, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_clk" }, { GPIO_CFG(18, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_mosi" }, { GPIO_CFG(19, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_miso" }, { GPIO_CFG(20, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "spi_cs0" }, { GPIO_CFG(21, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "spi_pwr" }, }; static int msm_qsd_spi_gpio_config(void) { int rc; rc = msm_gpios_request_enable(qsd_spi_gpio_config_data, ARRAY_SIZE(qsd_spi_gpio_config_data)); if (rc) return rc; /* Set direction for SPI_PWR */ gpio_direction_output(21, 1); return 0; } static void msm_qsd_spi_gpio_release(void) { msm_gpios_disable_free(qsd_spi_gpio_config_data, ARRAY_SIZE(qsd_spi_gpio_config_data)); } static struct msm_spi_platform_data qsd_spi_pdata = { .max_clock_speed = 19200000, .gpio_config = msm_qsd_spi_gpio_config, .gpio_release = msm_qsd_spi_gpio_release, .dma_config = msm_qsd_spi_dma_config, }; static void __init msm_qsd_spi_init(void) { qsd_device_spi.dev.platform_data = &qsd_spi_pdata; } static int mddi_toshiba_pmic_bl(int level) { return -EPERM; } static struct msm_panel_common_pdata mddi_toshiba_pdata = { .pmic_backlight = mddi_toshiba_pmic_bl, }; static struct platform_device mddi_toshiba_device = { .name = "mddi_toshiba", .id = 0, .dev = { .platform_data = &mddi_toshiba_pdata, } }; static void msm_fb_vreg_config(const char *name, int on) { struct vreg *vreg; int ret = 0; vreg = vreg_get(NULL, name); if (IS_ERR(vreg)) { printk(KERN_ERR "%s: vreg_get(%s) failed (%ld)\n", __func__, name, PTR_ERR(vreg)); return; } ret = (on) ? vreg_enable(vreg) : vreg_disable(vreg); if (ret) printk(KERN_ERR "%s: %s(%s) failed!\n", __func__, (on) ? "vreg_enable" : "vreg_disable", name); } #define MDDI_RST_OUT_GPIO 100 static int mddi_power_save_on; static int msm_fb_mddi_power_save(int on) { int ret; int flag_on = !!on; if (mddi_power_save_on == flag_on) return 0; mddi_power_save_on = flag_on; ret = pmic_lp_mode_control(flag_on ? OFF_CMD : ON_CMD, PM_VREG_LP_MSME2_ID); if (ret) printk(KERN_ERR "%s: pmic_lp_mode_control failed!\n", __func__); msm_fb_vreg_config("gp5", flag_on); msm_fb_vreg_config("boost", flag_on); return ret; } static int msm_fb_mddi_sel_clk(u32 *clk_rate) { *clk_rate *= 2; return 0; } static struct mddi_platform_data mddi_pdata = { .mddi_power_save = msm_fb_mddi_power_save, .mddi_sel_clk = msm_fb_mddi_sel_clk, }; static struct vreg *vreg_msme2; static int __init st15_hdmi_vreg_init(void) { int rc; vreg_msme2 = vreg_get(NULL, "msme2"); if (IS_ERR(vreg_msme2)) { pr_err("%s: msme2 vreg get failed (%ld)\n", __func__, PTR_ERR(vreg_msme2)); return -EINVAL; } rc = vreg_set_level(vreg_msme2, 1200); if (rc) { pr_err("%s: vreg msme2 set level failed (%d)\n", __func__, rc); return rc; } return 0; } static int st15_hdmi_power(int on) { int rc; if (on) rc = vreg_enable(vreg_msme2); else { rc = vreg_disable(vreg_msme2); msleep(30); } if (rc) { pr_err("%s: msme2 vreg %s failed (%d)\n", __func__, on ? "enable" : "disable", rc); return rc; } return 0; } static unsigned int msm_fb_lcdc_get_clk(void) { /* Return 160MHz(in Hz) as the AXI clock for st1x device */ return 192000000; } static int msm_fb_lcdc_gpio_config(int on) { if (on) { gpio_set_value(17, 1); gpio_set_value(19, 1); gpio_set_value(20, 1); gpio_set_value(22, 0); gpio_set_value(32, 1); gpio_set_value(155, 1); st15_hdmi_power(1); gpio_set_value(22, 1); udelay(100); /* reset pulse */ gpio_set_value(22, 0); udelay(50); gpio_set_value(22, 1); } else { gpio_set_value(17, 0); gpio_set_value(19, 0); gpio_set_value(20, 0); gpio_set_value(22, 0); gpio_set_value(32, 0); gpio_set_value(155, 0); st15_hdmi_power(0); } return 0; } static struct msm_gpio msm_fb_st15_gpio_config_data[] = { { GPIO_CFG(17, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_en0" }, { GPIO_CFG(19, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "dat_pwr_sv" }, { GPIO_CFG(20, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lvds_pwr_dn" }, { GPIO_CFG(22, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_en1" }, { GPIO_CFG(32, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "lcdc_en2" }, { GPIO_CFG(103, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "hdmi_irq" }, { GPIO_CFG(155, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "hdmi_3v3" }, }; static struct lcdc_platform_data lcdc_pdata = { .lcdc_gpio_config = msm_fb_lcdc_gpio_config, .lcdc_get_clk = msm_fb_lcdc_get_clk, }; static struct msm_panel_common_pdata mdp_pdata = { .gpio = 98, }; #define LID_SENSOR_GPIO 41 static struct gpio_keys_button gpio_keys_buttons[] = { { .code = SW_LID, .gpio = LID_SENSOR_GPIO, .desc = "Lid", .active_low = 1, .type = EV_SW, .wakeup = 1 }, }; static struct gpio_keys_platform_data gpio_keys_data = { .buttons = gpio_keys_buttons, .nbuttons = ARRAY_SIZE(gpio_keys_buttons), .rep = 0, }; static struct platform_device msm_gpio_keys = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &gpio_keys_data, }, }; static void __init lid_sensor_gpio_init(void) { if (gpio_tlmm_config(GPIO_CFG(LID_SENSOR_GPIO, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_6MA), GPIO_CFG_ENABLE)) { pr_err("%s: gpio_tlmm_config for gpio=%d failed", __func__, LID_SENSOR_GPIO); } } static void __init msm_fb_add_devices(void) { int rc; msm_fb_register_device("mdp", &mdp_pdata); msm_fb_register_device("pmdh", &mddi_pdata); msm_fb_register_device("emdh", &mddi_pdata); msm_fb_register_device("tvenc", 0); rc = st15_hdmi_vreg_init(); if (rc) return; rc = msm_gpios_request_enable( msm_fb_st15_gpio_config_data, ARRAY_SIZE(msm_fb_st15_gpio_config_data)); if (rc) { printk(KERN_ERR "%s: unable to init lcdc gpios\n", __func__); return; } msm_fb_register_device("lcdc", &lcdc_pdata); } static struct resource msm_audio_resources[] = { { .flags = IORESOURCE_DMA, }, { .name = "aux_pcm_dout", .start = 68, .end = 68, .flags = IORESOURCE_IO, }, { .name = "aux_pcm_din", .start = 69, .end = 69, .flags = IORESOURCE_IO, }, { .name = "aux_pcm_syncout", .start = 70, .end = 70, .flags = IORESOURCE_IO, }, { .name = "aux_pcm_clkin_a", .start = 71, .end = 71, .flags = IORESOURCE_IO, }, { .name = "sdac_din", .start = 144, .end = 144, .flags = IORESOURCE_IO, }, { .name = "sdac_dout", .start = 145, .end = 145, .flags = IORESOURCE_IO, }, { .name = "sdac_wsout", .start = 143, .end = 143, .flags = IORESOURCE_IO, }, { .name = "cc_i2s_clk", .start = 142, .end = 142, .flags = IORESOURCE_IO, }, { .name = "audio_master_clkout", .start = 146, .end = 146, .flags = IORESOURCE_IO, }, { .name = "audio_base_addr", .start = 0xa0700000, .end = 0xa0700000 + 4, .flags = IORESOURCE_MEM, }, }; static unsigned st15_audio_gpio_on[] = { /* enable headset amplifier */ GPIO_CFG(48, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* enable speaker amplifier */ GPIO_CFG(39, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; static unsigned audio_gpio_on[] = { GPIO_CFG(68, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* PCM_DOUT */ GPIO_CFG(69, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* PCM_DIN */ GPIO_CFG(70, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* PCM_SYNC */ GPIO_CFG(71, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* PCM_CLK */ GPIO_CFG(142, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* CC_I2S_CLK */ GPIO_CFG(143, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* SADC_WSOUT */ GPIO_CFG(145, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* SDAC_DOUT */ GPIO_CFG(146, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* MA_CLK_OUT */ }; void q6audio_enable_spkr_phone(int enable) { gpio_set_value(48, (enable != 0)); pmic_secure_mpp_control_digital_output( PM_MPP_21, PM_MPP__DLOGIC__LVL_VDD, PM_MPP__DLOGIC_OUT__CTRL_HIGH); } static void __init audio_gpio_init(void) { int pin, rc; for (pin = 0; pin < ARRAY_SIZE(audio_gpio_on); pin++) { rc = gpio_tlmm_config(audio_gpio_on[pin], GPIO_CFG_ENABLE); if (rc) { printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n", __func__, audio_gpio_on[pin], rc); return; } } /* enable headset amplifier */ gpio_tlmm_config(st15_audio_gpio_on[0], GPIO_CFG_ENABLE); /* enable speaker amplifier */ gpio_tlmm_config(st15_audio_gpio_on[1], GPIO_CFG_ENABLE); } static struct platform_device msm_audio_device = { .name = "msm_audio", .id = 0, .num_resources = ARRAY_SIZE(msm_audio_resources), .resource = msm_audio_resources, }; static struct resource bluesleep_resources[] = { { .name = "gpio_host_wake", .start = 21, .end = 21, .flags = IORESOURCE_IO, }, { .name = "gpio_ext_wake", .start = 19, .end = 19, .flags = IORESOURCE_IO, }, { .name = "host_wake", .start = MSM_GPIO_TO_INT(21), .end = MSM_GPIO_TO_INT(21), .flags = IORESOURCE_IRQ, }, }; static struct platform_device msm_bluesleep_device = { .name = "bluesleep", .id = -1, .num_resources = ARRAY_SIZE(bluesleep_resources), .resource = bluesleep_resources, }; #ifdef CONFIG_BT static struct platform_device msm_bt_power_device = { .name = "bt_power", }; enum { BT_SYSRST, BT_WAKE, BT_HOST_WAKE, BT_VDD_IO, BT_RFR, BT_CTS, BT_RX, BT_TX, BT_VDD_FREG }; static struct msm_gpio st1_5_bt_config_power_on[] = { { GPIO_CFG(18, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "BT SYSRST" }, { GPIO_CFG(29, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "BT WAKE" }, { GPIO_CFG(40, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "HOST WAKE" }, { GPIO_CFG(22, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "BT VDD_IO" }, { GPIO_CFG(43, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "UART1DM_RFR" }, { GPIO_CFG(44, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "UART1DM_CTS" }, { GPIO_CFG(45, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "UART1DM_RX" }, { GPIO_CFG(46, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "UART1DM_TX" }, }; static int bluetooth_power_st_1_5(int on) { gpio_set_value(18, on); /* SYSRST */ printk(KERN_DEBUG "Bluetooth power switch: %d\n", on); return 0; } static void __init bt_power_init_st_1_5(void) { int rc; gpio_set_value(18, 0); /* SYSRST */ rc = msm_gpios_enable(st1_5_bt_config_power_on, ARRAY_SIZE(st1_5_bt_config_power_on)); if (rc < 0) { printk(KERN_ERR "%s: bt power on gpio config failed: %d\n", __func__, rc); return; } msm_bt_power_device.dev.platform_data = &bluetooth_power_st_1_5; printk(KERN_DEBUG "Bluetooth power switch ST-1.5: initialized\n"); return; } #else #define bt_power_init_st_1_5(x) do {} while (0) #endif static struct resource kgsl_resources[] = { { .name = "kgsl_reg_memory", .start = 0xA0000000, .end = 0xA001ffff, .flags = IORESOURCE_MEM, }, { .name = "kgsl_yamato_irq", .start = INT_GRAPHICS, .end = INT_GRAPHICS, .flags = IORESOURCE_IRQ, }, { .name = "kgsl_2d0_reg_memory", .start = 0xA1300000, .end = 0xA13fffff, .flags = IORESOURCE_MEM, }, { .name = "kgsl_2d0_irq", .start = INT_GRP2D, .end = INT_GRP2D, .flags = IORESOURCE_IRQ, }, }; static struct kgsl_platform_data kgsl_pdata = { .pwrlevel_2d = { { .gpu_freq = 0, .bus_freq = 192000000, }, }, .init_level_2d = 0, .num_levels_2d = 1, .pwrlevel_3d = { { .gpu_freq = 235000000, .bus_freq = 128000000, }, { .gpu_freq = 192000000, .bus_freq = 0, }, }, .init_level_3d = 0, .num_levels_3d = 2, .set_grp2d_async = NULL, /*note: on 8650a async mode is the default */ .set_grp3d_async = NULL, .imem_clk_name = "imem_clk", .grp3d_clk_name = "grp_clk", .grp3d_pclk_name = "grp_pclk", #ifdef CONFIG_MSM_KGSL_2D .grp2d0_clk_name = "grp_2d_clk", .grp2d0_pclk_name = "grp_2d_pclk", #else .grp2d0_clk_name = NULL, #endif .idle_timeout_3d = HZ/5, .idle_timeout_2d = HZ/10, #ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE .pt_va_size = SZ_32M, #else .pt_va_size = SZ_128M, #endif }; static struct platform_device msm_device_kgsl = { .name = "kgsl", .id = -1, .num_resources = ARRAY_SIZE(kgsl_resources), .resource = kgsl_resources, .dev = { .platform_data = &kgsl_pdata, }, }; static struct platform_device msm_device_pmic_leds = { .name = "pmic-leds", .id = -1, }; #if defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) #define TSIF_A_SYNC GPIO_CFG(106, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) #define TSIF_A_DATA GPIO_CFG(107, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) #define TSIF_A_EN GPIO_CFG(108, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) #define TSIF_A_CLK GPIO_CFG(109, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) static const struct msm_gpio tsif_gpios[] = { { .gpio_cfg = TSIF_A_CLK, .label = "tsif_clk", }, { .gpio_cfg = TSIF_A_EN, .label = "tsif_en", }, { .gpio_cfg = TSIF_A_DATA, .label = "tsif_data", }, { .gpio_cfg = TSIF_A_SYNC, .label = "tsif_sync", }, }; static struct msm_tsif_platform_data tsif_platform_data = { .num_gpios = ARRAY_SIZE(tsif_gpios), .gpios = tsif_gpios, }; #endif /* CONFIG_TSIF || CONFIG_TSIF_MODULE */ #ifdef CONFIG_QSD_SVS #define TPS65023_MAX_DCDC1 1600 #else #define TPS65023_MAX_DCDC1 CONFIG_QSD_PMIC_DEFAULT_DCDC1 #endif static int qsd8x50_tps65023_set_dcdc1(int mVolts) { int rc = 0; #ifdef CONFIG_QSD_SVS rc = tps65023_set_dcdc1_level(mVolts); /* By default the TPS65023 will be initialized to 1.225V. * So we can safely switch to any frequency within this * voltage even if the device is not probed/ready. */ if (rc == -ENODEV && mVolts <= CONFIG_QSD_PMIC_DEFAULT_DCDC1) rc = 0; #else /* Disallow frequencies not supported in the default PMIC * output voltage. */ if (mVolts > CONFIG_QSD_PMIC_DEFAULT_DCDC1) rc = -EFAULT; #endif return rc; } static struct msm_acpu_clock_platform_data qsd8x50_clock_data = { .acpu_switch_time_us = 20, .max_speed_delta_khz = 256000, .vdd_switch_time_us = 62, .max_vdd = TPS65023_MAX_DCDC1, .acpu_set_vdd = qsd8x50_tps65023_set_dcdc1, }; static struct msm_gpio qup_i2c_gpios_io[] = { { GPIO_CFG(154, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "qup_scl" }, { GPIO_CFG(156, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "qup_sda" }, }; static struct msm_gpio qup_i2c_gpios_hw[] = { { GPIO_CFG(154, 4, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "qup_scl" }, { GPIO_CFG(156, 3, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "qup_sda" }, }; static void qup_i2c_gpio_config(int adap_id, int config_type) { int rc = 0; struct msm_gpio *qup_i2c_table; /* Each adapter gets 2 lines from the table */ if (adap_id != 4) return; if (config_type) qup_i2c_table = qup_i2c_gpios_hw; else qup_i2c_table = qup_i2c_gpios_io; rc = msm_gpios_enable(qup_i2c_table, 2); if (rc < 0) printk(KERN_ERR "QUP GPIO enable failed: %d\n", rc); } static struct msm_i2c_platform_data qup_i2c_pdata = { .clk_freq = 100000, .clk = "qup_clk", .pclk = "qup_pclk", .msm_i2c_config_gpio = qup_i2c_gpio_config, }; static void __init qup_device_i2c_init(void) { if (msm_gpios_request(qup_i2c_gpios_hw, ARRAY_SIZE(qup_i2c_gpios_hw))) pr_err("failed to request I2C gpios\n"); qup_device_i2c.dev.platform_data = &qup_i2c_pdata; } #define TPM_ACCEPT_CMD_GPIO 85 #define TPM_DATA_AVAIL_GPIO 84 static struct msm_gpio tpm_gpio_config[] = { { GPIO_CFG(TPM_ACCEPT_CMD_GPIO, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "tpm_accept_cmd" }, { GPIO_CFG(TPM_DATA_AVAIL_GPIO, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "tpm_data_avail" }, }; static void tpm_st_i2c_gpio_release(void) { msm_gpios_disable_free(tpm_gpio_config, ARRAY_SIZE(tpm_gpio_config)); } static int tpm_st_i2c_gpio_setup(void) { return msm_gpios_request_enable(tpm_gpio_config, ARRAY_SIZE(tpm_gpio_config)); } static struct tpm_st_i2c_platform_data tpm_st_i2c_data = { .accept_cmd_gpio = TPM_ACCEPT_CMD_GPIO, .data_avail_gpio = TPM_DATA_AVAIL_GPIO, .accept_cmd_irq = MSM_GPIO_TO_INT(TPM_ACCEPT_CMD_GPIO), .data_avail_irq = MSM_GPIO_TO_INT(TPM_DATA_AVAIL_GPIO), .gpio_setup = tpm_st_i2c_gpio_setup, .gpio_release = tpm_st_i2c_gpio_release, }; static int hdmi_sii9022_cable_detect(int insert) { /* turn off on-board & external VGA * when HDMI is plugged in */ if (insert) { gpio_set_value(32, 0); gpio_set_value(19, 0); } else { gpio_set_value(32, 1); gpio_set_value(19, 1); } return 0; } static struct msm_hdmi_platform_data hdmi_sii9022_i2c_data = { .irq = MSM_GPIO_TO_INT(103), .cable_detect = hdmi_sii9022_cable_detect, }; static struct qci_kbd_platform_data qci_i2ckbd_pdata = { #ifdef CONFIG_KEYBOARD_QCIKBD_REPEAT .repeat = true, #endif }; static struct i2c_board_info msm_i2c_st1_5_info[] __initdata = { { I2C_BOARD_INFO("qci-i2ckbd", 0x18), .irq = 37, .platform_data = &qci_i2ckbd_pdata, }, { I2C_BOARD_INFO("qci-i2cpad", 0x19), .irq = 35, }, { I2C_BOARD_INFO("qci-i2cec", 0x1A), .irq = 42, }, { I2C_BOARD_INFO("hmc5843", 0x1E), }, { I2C_BOARD_INFO("bma150", 0x38), }, { I2C_BOARD_INFO("isl29011", 0x44), }, { I2C_BOARD_INFO("tps65023", 0x48), }, }; static struct i2c_board_info msm_qup_st1_5_info[] __initdata = { { I2C_BOARD_INFO("sii9022", 0x72 >> 1), .platform_data = &hdmi_sii9022_i2c_data, }, { I2C_BOARD_INFO("tpm_st_i2c", 0x13), .platform_data = &tpm_st_i2c_data, }, }; static u32 msm_calculate_batt_capacity(u32 current_voltage); static struct msm_psy_batt_pdata msm_psy_batt_data = { .voltage_min_design = 3200, .voltage_max_design = 4200, .avail_chg_sources = AC_CHG | USB_CHG , .batt_technology = POWER_SUPPLY_TECHNOLOGY_LION, .calculate_capacity = &msm_calculate_batt_capacity, }; static u32 msm_calculate_batt_capacity(u32 current_voltage) { u32 low_voltage = msm_psy_batt_data.voltage_min_design; u32 high_voltage = msm_psy_batt_data.voltage_max_design; return (current_voltage - low_voltage) * 100 / (high_voltage - low_voltage); } static struct platform_device msm_batt_device = { .name = "msm-battery", .id = -1, .dev.platform_data = &msm_psy_batt_data, }; static int hsusb_rpc_connect(int connect) { if (connect) return msm_hsusb_rpc_connect(); else return msm_hsusb_rpc_close(); } static struct vreg *vreg_3p3; static int msm_hsusb_ldo_init(int init) { if (init) { vreg_3p3 = vreg_get(NULL, "usb"); if (IS_ERR(vreg_3p3)) return PTR_ERR(vreg_3p3); vreg_set_level(vreg_3p3, 3300); } else vreg_put(vreg_3p3); return 0; } static int msm_hsusb_ldo_enable(int enable) { static int ldo_status; if (!vreg_3p3 || IS_ERR(vreg_3p3)) return -ENODEV; if (ldo_status == enable) return 0; ldo_status = enable; if (enable) return vreg_enable(vreg_3p3); return vreg_disable(vreg_3p3); } static struct msm_otg_platform_data msm_otg_pdata = { .rpc_connect = hsusb_rpc_connect, .phy_reset = msm_hsusb_native_phy_reset, .setup_gpio = msm_otg_setup_gpio, .otg_mode = OTG_USER_CONTROL, .usb_mode = USB_PERIPHERAL_MODE, .vbus_power = msm_hsusb_vbus_power, .chg_vbus_draw = hsusb_chg_vbus_draw, .chg_connected = hsusb_chg_connected, .chg_init = hsusb_chg_init, .ldo_enable = msm_hsusb_ldo_enable, .ldo_init = msm_hsusb_ldo_init, .pclk_src_name = "ebi1_usb_clk", }; static struct msm_hsusb_gadget_platform_data msm_gadget_pdata; static struct platform_device *devices[] __initdata = { &msm_fb_device, &mddi_toshiba_device, &smc91x_device, &msm_device_smd, &msm_device_dmov, &android_pmem_kernel_ebi1_device, #ifdef CONFIG_KERNEL_PMEM_SMI_REGION &android_pmem_kernel_smi_device, #else &android_pmem_smipool_device, #endif &android_pmem_device, &android_pmem_adsp_device, &msm_device_nand, &msm_device_i2c, &qup_device_i2c, &qsd_device_spi, &msm_device_hsusb_peripheral, &msm_device_gadget_peripheral, #ifdef CONFIG_USB_FUNCTION &mass_storage_device, #endif #ifdef CONFIG_USB_ANDROID &usb_mass_storage_device, &rndis_device, #ifdef CONFIG_USB_ANDROID_DIAG &usb_diag_device, #endif &android_usb_device, #endif &msm_device_otg, &msm_device_tssc, &msm_audio_device, &msm_device_uart_dm1, &msm_bluesleep_device, #ifdef CONFIG_BT &msm_bt_power_device, #endif #if !defined(CONFIG_MSM_SERIAL_DEBUGGER) &msm_device_uart3, #endif &msm_device_pmic_leds, &msm_device_kgsl, &hs_device, #if defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) &msm_device_tsif, #endif #ifdef CONFIG_MT9T013 &msm_camera_sensor_mt9t013, #endif #ifdef CONFIG_MT9D112 &msm_camera_sensor_mt9d112, #endif #ifdef CONFIG_S5K3E2FX &msm_camera_sensor_s5k3e2fx, #endif #ifdef CONFIG_MT9P012 &msm_camera_sensor_mt9p012, #endif #ifdef CONFIG_MT9P012_KM &msm_camera_sensor_mt9p012_km, #endif &msm_batt_device, &msm_gpio_keys, }; static void __init qsd8x50_init_irq(void) { msm_init_irq(); msm_init_sirc(); } static void __init qsd8x50_init_host(void) { vreg_usb = vreg_get(NULL, "boost"); if (IS_ERR(vreg_usb)) { printk(KERN_ERR "%s: vreg get failed (%ld)\n", __func__, PTR_ERR(vreg_usb)); return; } platform_device_register(&msm_device_hsusb_otg); if (msm_otg_gpio_init()) return; msm_add_host(0, &msm_usb_host_pdata); #ifdef CONFIG_USB_FS_HOST if (fsusb_gpio_init()) return; msm_add_host(1, &msm_usb_host2_pdata); #endif } #if (defined(CONFIG_MMC_MSM_SDC1_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC2_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC3_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC4_SUPPORT)) struct sdcc_gpio { struct msm_gpio *cfg_data; uint32_t size; struct msm_gpio *sleep_cfg_data; }; static struct msm_gpio sdc1_cfg_data[] = { {GPIO_CFG(51, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_dat_3"}, {GPIO_CFG(52, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_dat_2"}, {GPIO_CFG(53, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_dat_1"}, {GPIO_CFG(54, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_dat_0"}, {GPIO_CFG(55, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_cmd"}, {GPIO_CFG(56, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "sdc1_clk"}, }; static struct msm_gpio sdc1_sleep_cfg_data[] = { {GPIO_CFG(51, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "sdc1_dat_3"}, {GPIO_CFG(52, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "sdc1_dat_2"}, {GPIO_CFG(53, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "sdc1_dat_1"}, {GPIO_CFG(54, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "sdc1_dat_0"}, {GPIO_CFG(55, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "sdc1_cmd"}, {GPIO_CFG(56, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "sdc1_clk"}, }; static struct msm_gpio sdc2_cfg_data[] = { {GPIO_CFG(62, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), "sdc2_clk"}, {GPIO_CFG(63, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_cmd"}, {GPIO_CFG(64, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_3"}, {GPIO_CFG(65, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_2"}, {GPIO_CFG(66, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_1"}, {GPIO_CFG(67, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_0"}, }; static struct msm_gpio sdc3_cfg_data[] = { {GPIO_CFG(88, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "sdc3_clk"}, {GPIO_CFG(89, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_cmd"}, {GPIO_CFG(90, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_3"}, {GPIO_CFG(91, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_2"}, {GPIO_CFG(92, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_1"}, {GPIO_CFG(93, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_0"}, #ifdef CONFIG_MMC_MSM_SDC3_8_BIT_SUPPORT {GPIO_CFG(158, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_4"}, {GPIO_CFG(159, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_5"}, {GPIO_CFG(160, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_6"}, {GPIO_CFG(161, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_7"}, #endif }; static struct msm_gpio sdc4_cfg_data[] = { {GPIO_CFG(142, 3, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "sdc4_clk"}, {GPIO_CFG(143, 3, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_cmd"}, {GPIO_CFG(144, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_dat_0"}, {GPIO_CFG(145, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_dat_1"}, {GPIO_CFG(146, 3, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_dat_2"}, {GPIO_CFG(147, 3, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_dat_3"}, }; static struct sdcc_gpio sdcc_cfg_data[] = { { .cfg_data = sdc1_cfg_data, .size = ARRAY_SIZE(sdc1_cfg_data), .sleep_cfg_data = sdc1_sleep_cfg_data, }, { .cfg_data = sdc2_cfg_data, .size = ARRAY_SIZE(sdc2_cfg_data), }, { .cfg_data = sdc3_cfg_data, .size = ARRAY_SIZE(sdc3_cfg_data), }, { .cfg_data = sdc4_cfg_data, .size = ARRAY_SIZE(sdc4_cfg_data), }, }; static unsigned long vreg_sts, gpio_sts; static struct vreg *vreg_mmc; static struct vreg *vreg_movi; static void msm_sdcc_setup_gpio(int dev_id, unsigned int enable) { int rc = 0; struct sdcc_gpio *curr; curr = &sdcc_cfg_data[dev_id - 1]; if (!(test_bit(dev_id, &gpio_sts)^enable)) return; if (enable) { set_bit(dev_id, &gpio_sts); rc = msm_gpios_request_enable(curr->cfg_data, curr->size); if (rc) printk(KERN_ERR "%s: Failed to turn on GPIOs for slot %d\n", __func__, dev_id); } else { clear_bit(dev_id, &gpio_sts); if (curr->sleep_cfg_data) { msm_gpios_enable(curr->sleep_cfg_data, curr->size); msm_gpios_free(curr->sleep_cfg_data, curr->size); return; } msm_gpios_disable_free(curr->cfg_data, curr->size); } } static uint32_t msm_sdcc_setup_power(struct device *dv, unsigned int vdd) { int rc = 0; struct platform_device *pdev; pdev = container_of(dv, struct platform_device, dev); msm_sdcc_setup_gpio(pdev->id, !!vdd); if (vdd == 0) { if (!vreg_sts) return 0; clear_bit(pdev->id, &vreg_sts); if (!vreg_sts) { rc = vreg_disable(vreg_mmc); if (rc) printk(KERN_ERR "%s: return val: %d \n", __func__, rc); } return 0; } if (!vreg_sts) { rc = vreg_set_level(vreg_mmc, PMIC_VREG_GP6_LEVEL); if (!rc) rc = vreg_enable(vreg_mmc); if (rc) printk(KERN_ERR "%s: return val: %d \n", __func__, rc); } set_bit(pdev->id, &vreg_sts); return 0; } static int msm_sdcc_get_wpswitch(struct device *dv) { return -1; } #if defined(CONFIG_MMC_MSM_CARD_HW_DETECTION) static unsigned int st1_5_sdcc_slot_status(struct device *dev) { return (unsigned int) !(gpio_get_value(MSM_GPIO_SD_DET)); } #endif #ifdef CONFIG_MMC_MSM_SDC1_SUPPORT static struct mmc_platform_data qsd8x50_sdc1_data = { .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .wpswitch = msm_sdcc_get_wpswitch, #ifdef CONFIG_MMC_MSM_SDC1_DUMMY52_REQUIRED .dummy52_required = 1, #endif #if defined(CONFIG_MMC_MSM_CARD_HW_DETECTION) .status = st1_5_sdcc_slot_status, .status_irq = MSM_GPIO_TO_INT(MSM_GPIO_SD_DET), .irq_flags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, #endif .msmsdcc_fmin = 144000, .msmsdcc_fmid = 25000000, .msmsdcc_fmax = 40000000, .nonremovable = 0, }; #endif #ifdef CONFIG_MMC_MSM_SDC2_SUPPORT static struct mmc_platform_data qsd8x50_sdc2_data = { .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .wpswitch = msm_sdcc_get_wpswitch, #ifdef CONFIG_MMC_MSM_SDC2_DUMMY52_REQUIRED .dummy52_required = 1, #endif .msmsdcc_fmin = 144000, .msmsdcc_fmid = 25000000, .msmsdcc_fmax = 40000000, .nonremovable = 0, }; #endif #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT static struct mmc_platform_data qsd8x50_sdc3_data = { .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power, #ifdef CONFIG_MMC_MSM_SDC3_8_BIT_SUPPORT .mmc_bus_width = MMC_CAP_8_BIT_DATA, #else .mmc_bus_width = MMC_CAP_4_BIT_DATA, #endif #ifdef CONFIG_MMC_MSM_SDC3_DUMMY52_REQUIRED .dummy52_required = 1, #endif .msmsdcc_fmin = 144000, .msmsdcc_fmid = 25000000, .msmsdcc_fmax = 49152000, .nonremovable = 1, }; #endif #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT static struct mmc_platform_data qsd8x50_sdc4_data = { .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .wpswitch = msm_sdcc_get_wpswitch, #ifdef CONFIG_MMC_MSM_SDC4_DUMMY52_REQUIRED .dummy52_required = 1, #endif .msmsdcc_fmin = 144000, .msmsdcc_fmid = 25000000, .msmsdcc_fmax = 49152000, .nonremovable = 0, }; #endif static void __init qsd8x50_init_mmc(void) { int rc; vreg_mmc = vreg_get(NULL, "wlan"); if (IS_ERR(vreg_mmc)) { printk(KERN_ERR "%s: vreg get failed (%ld)\n", __func__, PTR_ERR(vreg_mmc)); return; } rc = vreg_disable(vreg_mmc); if (rc) printk(KERN_ERR "%s: vreg_disable(vreg_mmc) returned %d\n", __func__, rc); /* TODO: Conflicts with BT. */ vreg_movi = vreg_get(NULL, "wlan"); if (IS_ERR(vreg_movi)) { printk(KERN_ERR "%s: vreg_get(mmc) failed (%ld)\n", __func__, PTR_ERR(vreg_movi)); return; } rc = vreg_disable(vreg_movi); if (rc) printk(KERN_ERR "%s: vreg_disable(vreg_movi) returned %d\n", __func__, rc); mdelay(100); #ifdef CONFIG_MMC_MSM_SDC1_SUPPORT msm_add_sdcc(1, &qsd8x50_sdc1_data); #if defined(CONFIG_MMC_MSM_CARD_HW_DETECTION) if (!gpio_request(MSM_GPIO_SD_DET, "sd-det")) { if (gpio_tlmm_config(GPIO_CFG(MSM_GPIO_SD_DET, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG_ENABLE)) pr_err("Failed to configure GPIO %d\n", MSM_GPIO_SD_DET); } else pr_err("Failed to request GPIO%d\n", MSM_GPIO_SD_DET); #endif #endif #ifdef CONFIG_MMC_MSM_SDC2_SUPPORT msm_add_sdcc(2, &qsd8x50_sdc2_data); #endif #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT msm_add_sdcc(3, &qsd8x50_sdc3_data); #endif #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT msm_add_sdcc(4, &qsd8x50_sdc4_data); #endif } #endif static int __init qsd8x50_cfg_smsc911x(void) { int rc = 0; u8 enet_clk_en_gpio = 33; u8 irq_gpio = 105; smsc911x_resources[0].start = 0x9C000000; smsc911x_resources[0].end = 0x9C0002ff; smsc911x_resources[1].start = MSM_GPIO_TO_INT(irq_gpio); smsc911x_resources[1].end = MSM_GPIO_TO_INT(irq_gpio); rc = gpio_request(enet_clk_en_gpio, "smsc911x_enet_clk_en"); if (rc) { pr_err("Failed to request GPIO pin %d (rc=%d)\n", enet_clk_en_gpio, rc); goto err; } rc = gpio_request(irq_gpio, "smsc911x_irq"); if (rc) { pr_err("Failed to request GPIO pin %d (rc=%d)\n", irq_gpio, rc); goto err; } rc = gpio_tlmm_config(GPIO_CFG(irq_gpio, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), GPIO_CFG_ENABLE); if (rc) { printk(KERN_ERR "smsc911x: Could not configure IRQ gpio\n"); goto err; } gpio_direction_output(irq_gpio, 1); gpio_direction_input(irq_gpio); rc = gpio_tlmm_config(GPIO_CFG(enet_clk_en_gpio, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), GPIO_CFG_ENABLE); if (rc) { printk(KERN_ERR "smsc911x: Could not configure ENET_CLK_EN gpio\n"); goto err; } gpio_direction_output(enet_clk_en_gpio, 1); platform_device_register(&smsc911x_device); err: gpio_free(enet_clk_en_gpio); gpio_free(irq_gpio); return -ENODEV; } static struct msm_pm_platform_data msm_pm_data[MSM_PM_SLEEP_MODE_NR] = { [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].supported = 1, [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].suspend_enabled = 1, [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].idle_enabled = 1, [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].latency = 8594, [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].residency = 23740, [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].supported = 1, [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].suspend_enabled = 1, [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].idle_enabled = 1, [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].latency = 4594, [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].residency = 23740, [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].supported = 1, [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].suspend_enabled = 1, [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].idle_enabled = 0, [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency = 443, [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].residency = 1098, [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].supported = 1, [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].suspend_enabled = 1, [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].idle_enabled = 1, [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].latency = 2, [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].residency = 0, }; static void msm_i2c_gpio_config(int iface, int config_type) { int gpio_scl; int gpio_sda; if (iface) { gpio_scl = 60; gpio_sda = 61; } else { gpio_scl = 95; gpio_sda = 96; } if (config_type) { gpio_tlmm_config(GPIO_CFG(gpio_scl, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), GPIO_CFG_ENABLE); gpio_tlmm_config(GPIO_CFG(gpio_sda, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), GPIO_CFG_ENABLE); } else { gpio_tlmm_config(GPIO_CFG(gpio_scl, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), GPIO_CFG_ENABLE); gpio_tlmm_config(GPIO_CFG(gpio_sda, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), GPIO_CFG_ENABLE); } } static struct msm_i2c_platform_data msm_i2c_pdata = { .clk_freq = 100000, .rsl_id = SMEM_SPINLOCK_I2C, .pri_clk = 95, .pri_dat = 96, .aux_clk = 60, .aux_dat = 61, .msm_i2c_config_gpio = msm_i2c_gpio_config, }; static void __init msm_device_i2c_init(void) { if (gpio_request(95, "i2c_pri_clk")) pr_err("failed to request gpio i2c_pri_clk\n"); if (gpio_request(96, "i2c_pri_dat")) pr_err("failed to request gpio i2c_pri_dat\n"); if (gpio_request(60, "i2c_sec_clk")) pr_err("failed to request gpio i2c_sec_clk\n"); if (gpio_request(61, "i2c_sec_dat")) pr_err("failed to request gpio i2c_sec_dat\n"); msm_i2c_pdata.rmutex = 1; msm_i2c_pdata.pm_lat = msm_pm_data[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] .latency; msm_device_i2c.dev.platform_data = &msm_i2c_pdata; } static unsigned pmem_kernel_ebi1_size = PMEM_KERNEL_EBI1_SIZE; static int __init pmem_kernel_ebi1_size_setup(char *p) { pmem_kernel_ebi1_size = memparse(p, NULL); return 0; } early_param("pmem_kernel_ebi1_size", pmem_kernel_ebi1_size_setup); #ifdef CONFIG_KERNEL_PMEM_SMI_REGION static unsigned pmem_kernel_smi_size = MSM_PMEM_SMIPOOL_SIZE; static int __init pmem_kernel_smi_size_setup(char *p) { pmem_kernel_smi_size = memparse(p, NULL); /* Make sure that we don't allow more SMI memory then is available - the kernel mapping code has no way of knowing if it has gone over the edge */ if (pmem_kernel_smi_size > MSM_PMEM_SMIPOOL_SIZE) pmem_kernel_smi_size = MSM_PMEM_SMIPOOL_SIZE; return 0; } early_param("pmem_kernel_smi_size", pmem_kernel_smi_size_setup); #endif static unsigned pmem_mdp_size = MSM_PMEM_MDP_SIZE; static int __init pmem_mdp_size_setup(char *p) { pmem_mdp_size = memparse(p, NULL); return 0; } early_param("pmem_mdp_size", pmem_mdp_size_setup); static unsigned pmem_adsp_size = MSM_PMEM_ADSP_SIZE; static int __init pmem_adsp_size_setup(char *p) { pmem_adsp_size = memparse(p, NULL); return 0; } early_param("pmem_adsp_size", pmem_adsp_size_setup); static unsigned audio_size = MSM_AUDIO_SIZE; static int __init audio_size_setup(char *p) { audio_size = memparse(p, NULL); return 0; } early_param("audio_size", audio_size_setup); static void __init qsd8x50_init(void) { if (socinfo_init() < 0) printk(KERN_ERR "%s: socinfo_init() failed!\n", __func__); msm_clock_init(msm_clocks_8x50, msm_num_clocks_8x50); hs_clk = clk_get(NULL, "usb_hs_clk"); if (IS_ERR(hs_clk)) printk(KERN_ERR "%s: hs_clk get failed!\n", __func__); phy_clk = clk_get(NULL, "usb_phy_clk"); if (IS_ERR(phy_clk)) printk(KERN_ERR "%s: phy_clk get failed!\n", __func__); qsd8x50_cfg_smsc911x(); msm_acpu_clock_init(&qsd8x50_clock_data); msm_hsusb_pdata.swfi_latency = msm_pm_data [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency; msm_device_hsusb_peripheral.dev.platform_data = &msm_hsusb_pdata; msm_device_otg.dev.platform_data = &msm_otg_pdata; msm_device_gadget_peripheral.dev.platform_data = &msm_gadget_pdata; #if defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) msm_device_tsif.dev.platform_data = &tsif_platform_data; #endif platform_add_devices(devices, ARRAY_SIZE(devices)); msm_fb_add_devices(); qsd8x50_init_host(); #if (defined(CONFIG_MMC_MSM_SDC1_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC2_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC3_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC4_SUPPORT)) qsd8x50_init_mmc(); #endif bt_power_init_st_1_5(); audio_gpio_init(); msm_device_i2c_init(); lid_sensor_gpio_init(); msm_qsd_spi_init(); qup_device_i2c_init(); i2c_register_board_info(0, msm_i2c_st1_5_info, ARRAY_SIZE(msm_i2c_st1_5_info)); i2c_register_board_info(4, msm_qup_st1_5_info, ARRAY_SIZE(msm_qup_st1_5_info)); spi_register_board_info(msm_spi_board_info, ARRAY_SIZE(msm_spi_board_info)); msm_pm_set_platform_data(msm_pm_data, ARRAY_SIZE(msm_pm_data)); #ifdef CONFIG_SURF_FFA_GPIO_KEYPAD platform_device_register(&keypad_device_surf); #endif } static void __init qsd8x50_allocate_memory_regions(void) { void *addr; unsigned long size; size = pmem_kernel_ebi1_size; if (size) { addr = alloc_bootmem_aligned(size, 0x100000); android_pmem_kernel_ebi1_pdata.start = __pa(addr); android_pmem_kernel_ebi1_pdata.size = size; pr_info("allocating %lu bytes at %p (%lx physical) for kernel" " ebi1 pmem arena\n", size, addr, __pa(addr)); } #ifdef CONFIG_KERNEL_PMEM_SMI_REGION size = pmem_kernel_smi_size; if (size > MSM_PMEM_SMIPOOL_SIZE) { printk(KERN_ERR "pmem kernel smi arena size %lu is too big\n", size); size = MSM_PMEM_SMIPOOL_SIZE; } android_pmem_kernel_smi_pdata.start = MSM_PMEM_SMIPOOL_BASE; android_pmem_kernel_smi_pdata.size = size; pr_info("allocating %lu bytes at %lx (%lx physical)" "for pmem kernel smi arena\n", size, (long unsigned int) MSM_PMEM_SMIPOOL_BASE, __pa(MSM_PMEM_SMIPOOL_BASE)); #endif size = pmem_mdp_size; if (size) { addr = alloc_bootmem(size); android_pmem_pdata.start = __pa(addr); android_pmem_pdata.size = size; pr_info("allocating %lu bytes at %p (%lx physical) for mdp " "pmem arena\n", size, addr, __pa(addr)); } size = pmem_adsp_size; if (size) { addr = alloc_bootmem(size); android_pmem_adsp_pdata.start = __pa(addr); android_pmem_adsp_pdata.size = size; pr_info("allocating %lu bytes at %p (%lx physical) for adsp " "pmem arena\n", size, addr, __pa(addr)); } size = MSM_FB_SIZE_ST15; addr = (void *)MSM_FB_BASE; msm_fb_resources[0].start = (unsigned long)addr; msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1; pr_info("using %lu bytes of SMI at %lx physical for fb\n", size, (unsigned long)addr); size = audio_size ? : MSM_AUDIO_SIZE; addr = alloc_bootmem(size); msm_audio_resources[0].start = __pa(addr); msm_audio_resources[0].end = msm_audio_resources[0].start + size - 1; pr_info("allocating %lu bytes at %p (%lx physical) for audio\n", size, addr, __pa(addr)); } static void __init qsd8x50_map_io(void) { msm_shared_ram_phys = MSM_SHARED_RAM_PHYS; msm_map_qsd8x50_io(); qsd8x50_allocate_memory_regions(); } MACHINE_START(QSD8X50A_ST1_5, "QCT QSD8X50A ST1.5") #ifdef CONFIG_MSM_DEBUG_UART .phys_io = MSM_DEBUG_UART_PHYS, .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc, #endif .boot_params = PHYS_OFFSET + 0x100, .map_io = qsd8x50_map_io, .init_irq = qsd8x50_init_irq, .init_machine = qsd8x50_init, .timer = &msm_timer, MACHINE_END
gpl-2.0
nslu2/linux-2.4.x
arch/sh64/lib/io.c
4
4656
/* * Copyright (C) 2000 David J. Mckay (david.mckay@st.com) * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. * * This file contains the I/O routines for use on the overdrive board * */ #include <linux/config.h> #include <linux/types.h> #include <linux/delay.h> #include <asm/system.h> #include <asm/processor.h> #include <asm/io.h> #ifdef CONFIG_SH_CAYMAN #include <asm/cayman.h> #endif /* * readX/writeX() are used to access memory mapped devices. On some * architectures the memory mapped IO stuff needs to be accessed * differently. On the SuperH architecture, we just read/write the * memory location directly. */ #define dprintk(x...) //#define io_addr(x) (((unsigned)(x) & 0x000fffff)| PCI_ST50_IO_ADDR ) #ifdef CONFIG_SH_CAYMAN extern unsigned long smsc_virt; extern unsigned long pciio_virt; #define io_addr(x) ( ((x)<0x400) ? \ (((x) << 2)|smsc_virt) : \ ((unsigned long)(x)+pciio_virt) ) #else #define io_addr(x) ((unsigned long)(x)+pciio_virt) #endif unsigned long inb(unsigned long port) { unsigned long r; r = ctrl_inb(io_addr(port)); dprintk("inb(0x%x)=0x%x (0x%x)\n", port, r, io_addr(port)); return r; } unsigned long inw(unsigned long port) { unsigned long r; r = ctrl_inw(io_addr(port)); dprintk("inw(0x%x)=0x%x (0x%x)\n", port, r, io_addr(port)); return r; } unsigned long inl(unsigned long port) { unsigned long r; r = ctrl_inl(io_addr(port)); dprintk("inl(0x%x)=0x%x (0x%x)\n", port, r, io_addr(port)); return r; } void outb(unsigned long value, unsigned long port) { dprintk("outb(0x%x,0x%x) (0x%x)\n", value, port, io_addr(port)); ctrl_outb(value, io_addr(port)); } void outw(unsigned long value, unsigned long port) { dprintk("outw(0x%x,0x%x) (0x%x)\n", value, port, io_addr(port)); ctrl_outw(value, io_addr(port)); } void outl(unsigned long value, unsigned long port) { dprintk("outw(0x%x,0x%x) (0x%x)\n", value, port, io_addr(port)); ctrl_outl(value, io_addr(port)); } /* This is horrible at the moment - needs more work to do something sensible */ #define IO_DELAY() #define OUT_DELAY(x,type) \ void out##x##_p(unsigned type value,unsigned long port){out##x(value,port);IO_DELAY();} #define IN_DELAY(x,type) \ unsigned type in##x##_p(unsigned long port) {unsigned type tmp=in##x(port);IO_DELAY();return tmp;} #if 1 OUT_DELAY(b, long) OUT_DELAY(w, long) OUT_DELAY(l, long) IN_DELAY(b, long) IN_DELAY(w, long) IN_DELAY(l, long) #endif /* Now for the string version of these functions */ void outsb(unsigned long port, const void *addr, unsigned long count) { int i; unsigned char *p = (unsigned char *) addr; for (i = 0; i < count; i++, p++) { outb(*p, port); } } void insb(unsigned long port, void *addr, unsigned long count) { int i; unsigned char *p = (unsigned char *) addr; for (i = 0; i < count; i++, p++) { *p = inb(port); } } /* For the 16 and 32 bit string functions, we have to worry about alignment. * The SH does not do unaligned accesses, so we have to read as bytes and * then write as a word or dword. * This can be optimised a lot more, especially in the case where the data * is aligned */ void outsw(unsigned long port, const void *addr, unsigned long count) { int i; unsigned short tmp; unsigned char *p = (unsigned char *) addr; for (i = 0; i < count; i++, p += 2) { tmp = (*p) | ((*(p + 1)) << 8); outw(tmp, port); } } void insw(unsigned long port, void *addr, unsigned long count) { int i; unsigned short tmp; unsigned char *p = (unsigned char *) addr; for (i = 0; i < count; i++, p += 2) { tmp = inw(port); p[0] = tmp & 0xff; p[1] = (tmp >> 8) & 0xff; } } void outsl(unsigned long port, const void *addr, unsigned long count) { int i; unsigned tmp; unsigned char *p = (unsigned char *) addr; for (i = 0; i < count; i++, p += 4) { tmp = (*p) | ((*(p + 1)) << 8) | ((*(p + 2)) << 16) | ((*(p + 3)) << 24); outl(tmp, port); } } void insl(unsigned long port, void *addr, unsigned long count) { int i; unsigned tmp; unsigned char *p = (unsigned char *) addr; for (i = 0; i < count; i++, p += 4) { tmp = inl(port); p[0] = tmp & 0xff; p[1] = (tmp >> 8) & 0xff; p[2] = (tmp >> 16) & 0xff; p[3] = (tmp >> 24) & 0xff; } } void memcpy_toio(unsigned long to, const void *from, long count) { unsigned char *p = (unsigned char *) from; while (count) { count--; writeb(*p++, to++); } } void memcpy_fromio(void *to, unsigned long from, long count) { int i; unsigned char *p = (unsigned char *) to; for (i = 0; i < count; i++) { p[i] = readb(from); from++; } }
gpl-2.0
dulton/tcpmp-revive
common/helper_video.c
4
15687
/***************************************************************************** * * This program is free software ; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * $Id: helper_video.c 548 2006-01-08 22:41:57Z picard $ * * The Core Pocket Media Player * Copyright (c) 2004-2005 Gabor Kovacs * ****************************************************************************/ #include "common.h" bool_t EqSubtitle(const subtitle* a, const subtitle* b) { return a->FourCC == b->FourCC; } bool_t EqAudio(const audio* a, const audio* b) { return a->Bits == b->Bits && a->Channels == b->Channels && a->SampleRate == b->SampleRate && a->FracBits == b->FracBits && a->Flags == b->Flags && a->Format == b->Format; } bool_t EqFrac(const fraction* a, const fraction* b) { if (a->Den == b->Den && a->Num == b->Num) return 1; if (!a->Den) return b->Den==0; if (!b->Den) return 0; return (int64_t)b->Den * a->Num == (int64_t)a->Den * b->Num; } int BitMaskSize(uint32_t Mask) { int i; for (i=0;Mask;++i) Mask &= Mask - 1; return i; } int BitMaskPos(uint32_t Mask) { int i; for (i=0;Mask && !(Mask&1);++i) Mask >>= 1; return i; } int ScaleRound(int v,int Num,int Den) { int64_t i; if (!Den) return 0; i = (int64_t)v * Num; if (i<0) i-=Den/2; else i+=Den/2; i/=Den; return (int)i; } void FillColor(uint8_t* Dst,int DstPitch,int x,int y,int Width,int Height,int BPP,int Value) { if (Width>0 && Height>0) { uint16_t *p16,*p16e; uint32_t *p32,*p32e; uint8_t* End; Dst += y*DstPitch + (x*BPP)/8; End = Dst + Height * DstPitch; do { switch (BPP) { case 1: Value &= 1; memset(Dst,Value * 0xFF,Width >> 3); break; case 2: Value &= 3; memset(Dst,Value * 0x55,Width >> 2); break; case 4: Value &= 15; memset(Dst,Value * 0x11,Width >> 1); break; case 8: memset(Dst,Value,Width); break; case 16: p16 = (uint16_t*)Dst; p16e = p16+Width; for (;p16!=p16e;++p16) *p16 = (uint16_t)Value; break; case 32: p32 = (uint32_t*)Dst; p32e = p32+Width; for (;p32!=p32e;++p32) *p32 = Value; break; } Dst += DstPitch; } while (Dst != End); } } bool_t EqBlitFX(const blitfx* a, const blitfx* b) { return a->Flags == b->Flags && a->Contrast == b->Contrast && a->Saturation == b->Saturation && a->Brightness == b->Brightness && a->Direction == b->Direction && a->RGBAdjust[0] == b->RGBAdjust[0] && a->RGBAdjust[1] == b->RGBAdjust[1] && a->RGBAdjust[2] == b->RGBAdjust[2] && a->ScaleX == b->ScaleX && a->ScaleY == b->ScaleY; } int CombineDir(int Src, int Blit, int Dst) { //order of transformations // SrcMirror // SrcSwap // BlitSwap // BlitMirror // DstSwap // DstMirror //should be combined to a single // Swap // Mirror if (Dst & DIR_SWAPXY) { if (Blit & DIR_MIRRORLEFTRIGHT) Dst ^= DIR_MIRRORUPDOWN; if (Blit & DIR_MIRRORUPDOWN) Dst ^= DIR_MIRRORLEFTRIGHT; } else Dst ^= Blit & (DIR_MIRRORUPDOWN|DIR_MIRRORLEFTRIGHT); Dst ^= Blit & DIR_SWAPXY; Dst ^= Src & DIR_SWAPXY; if (Dst & DIR_SWAPXY) { if (Src & DIR_MIRRORLEFTRIGHT) Dst ^= DIR_MIRRORUPDOWN; if (Src & DIR_MIRRORUPDOWN) Dst ^= DIR_MIRRORLEFTRIGHT; } else Dst ^= Src & (DIR_MIRRORUPDOWN|DIR_MIRRORLEFTRIGHT); return Dst; } int SurfaceRotate(const video* SrcFormat, const video* DstFormat, const planes Src, planes Dst, int Dir) { blitfx FX; memset(&FX,0,sizeof(FX)); FX.ScaleX = SCALE_ONE; FX.ScaleY = SCALE_ONE; FX.Direction = Dir; return SurfaceCopy(SrcFormat,DstFormat,Src,Dst,&FX); } int SurfaceCopy(const video* SrcFormat, const video* DstFormat, const planes Src, planes Dst, const blitfx* FX) { void* Blit; rect SrcRect; rect DstRect; VirtToPhy(NULL,&SrcRect,SrcFormat); VirtToPhy(NULL,&DstRect,DstFormat); Blit = BlitCreate(DstFormat,SrcFormat,FX,NULL); if (!Blit) return ERR_NOT_SUPPORTED; BlitAlign(Blit,&DstRect,&SrcRect); BlitImage(Blit,Dst,*(const constplanes*)Src,NULL,-1,-1); BlitRelease(Blit); return ERR_NONE; } int SurfaceAlloc(planes Ptr, const video* p) { int i; for (i=0;i<MAXPLANES;++i) Ptr[i] = NULL; if (p->Pixel.Flags & (PF_YUV420|PF_YUV422|PF_YUV444|PF_YUV410)) { int x,y,s; PlanarYUV(&p->Pixel,&x,&y,&s); Ptr[0] = Alloc16(p->Height * p->Pitch); Ptr[1] = Alloc16((p->Height>>y) * (p->Pitch>>s)); Ptr[2] = Alloc16((p->Height>>y) * (p->Pitch>>s)); if (!Ptr[0] || !Ptr[1] || !Ptr[2]) { SurfaceFree(Ptr); return ERR_OUT_OF_MEMORY; } return ERR_NONE; } Ptr[0] = Alloc16(GetImageSize(p)); return Ptr[0] ? ERR_NONE : ERR_OUT_OF_MEMORY; } void SurfaceFree(planes p) { int i; for (i=0;i<MAXPLANES;++i) { Free16(p[i]); p[i] = NULL; } } int DefaultAspect(int Width,int Height) { return ASPECT_ONE; //todo? } void DefaultPitch(video* p) { p->Pitch = p->Width*GetBPP(&p->Pixel); if (p->Pixel.Flags & PF_RGB) p->Pitch = ((p->Pitch+31)>>5)*4; // dword align else p->Pitch = (p->Pitch+7)>>3; // byte align } void DefaultRGB(pixel* p, int BitCount, int RBits, int GBits, int BBits, int RGaps, int GGaps, int BGaps) { p->Flags = PF_RGB; p->BitCount = BitCount; p->BitMask[0] = ((1<<RBits)-1) << (RGaps+GBits+GGaps+BBits+BGaps); p->BitMask[1] = ((1<<GBits)-1) << (GGaps+BBits+BGaps); p->BitMask[2] = ((1<<BBits)-1) << (BGaps); } bool_t Compressed(const pixel* Fmt) { return (Fmt->Flags & PF_FOURCC) && !AnyYUV(Fmt); } bool_t PlanarYUV(const pixel* Fmt, int* x, int* y,int *s) { if (PlanarYUV420(Fmt)) { if (x) *x = 1; if (y) *y = 1; if (s) { if (Fmt->Flags & PF_FOURCC && ((Fmt->FourCC == FOURCC_IMC2) || (Fmt->FourCC == FOURCC_IMC4))) *s = 0; // interleaved uv scanlines else *s = 1; } return 1; } if (PlanarYUV422(Fmt)) { if (x) *x = 1; if (s) *s = 1; if (y) *y = 0; return 1; } if (PlanarYUV444(Fmt)) { if (x) *x = 0; if (y) *y = 0; if (s) *s = 0; return 1; } if (PlanarYUV410(Fmt)) { if (x) *x = 2; if (s) *s = 2; if (y) *y = 2; return 1; } if (x) *x = 0; if (y) *y = 0; if (s) *s = 0; return 0; } typedef struct rgbfourcc { uint32_t FourCC; int BitCount; uint32_t BitMask[3]; } rgbfourcc; static const rgbfourcc RGBFourCC[] = { { FOURCC_RGB32, 32, { 0xFF0000, 0xFF00, 0xFF }}, { FOURCC_RGB24, 24, { 0xFF0000, 0xFF00, 0xFF }}, { FOURCC_RGB16, 16, { 0xF800, 0x07E0, 0x001F }}, { FOURCC_RGB15, 16, { 0x7C00, 0x03E0, 0x001F }}, { FOURCC_BGR32, 32, { 0xFF, 0xFF00, 0xFF0000 }}, { FOURCC_BGR24, 24, { 0xFF, 0xFF00, 0xFF0000 }}, { FOURCC_BGR16, 16, { 0x001F, 0x07E0, 0xF800 }}, { FOURCC_BGR15, 16, { 0x001F, 0x03E0, 0x7C00 }}, {0}, }; uint32_t DefFourCC(const pixel* Fmt) { uint32_t FourCC=0; if (Fmt->Flags & PF_YUV420) return FOURCC_I420; if (Fmt->Flags & PF_YUV422) return FOURCC_YV16; if (Fmt->Flags & PF_YUV410) return FOURCC_YUV9; if (Fmt->Flags & PF_FOURCC) { FourCC = Fmt->FourCC; if (FourCC == FOURCC_YVU9) FourCC = FOURCC_YUV9; if (FourCC == FOURCC_IYUV || FourCC == FOURCC_YV12) FourCC = FOURCC_I420; if (FourCC == FOURCC_YUNV || FourCC == FOURCC_V422 || FourCC == FOURCC_YUYV) FourCC = FOURCC_YUY2; if (FourCC == FOURCC_Y422 || FourCC == FOURCC_UYNV) FourCC = FOURCC_UYVY; } else if (Fmt->Flags & PF_RGB) { const rgbfourcc *i; for (i=RGBFourCC;i->FourCC;++i) if (i->BitCount == Fmt->BitCount && i->BitMask[0] == Fmt->BitMask[0] && i->BitMask[1] == Fmt->BitMask[1] && i->BitMask[2] == Fmt->BitMask[2]) { FourCC = i->FourCC; break; } } return FourCC; } bool_t PlanarYUV420(const pixel* Fmt) { if (Fmt->Flags & PF_YUV420) return 1; return (Fmt->Flags & PF_FOURCC) && ((Fmt->FourCC == FOURCC_YV12) || (Fmt->FourCC == FOURCC_IYUV) || (Fmt->FourCC == FOURCC_I420) || (Fmt->FourCC == FOURCC_IMC2) || (Fmt->FourCC == FOURCC_IMC4)); } bool_t PlanarYUV410(const pixel* Fmt) { if (Fmt->Flags & PF_YUV410) return 1; return (Fmt->Flags & PF_FOURCC) && ((Fmt->FourCC == FOURCC_YVU9) || (Fmt->FourCC == FOURCC_YUV9)); } bool_t PlanarYUV422(const pixel* Fmt) { if (Fmt->Flags & PF_YUV422) return 1; return (Fmt->Flags & PF_FOURCC) && (Fmt->FourCC == FOURCC_YV16); } bool_t PlanarYUV444(const pixel* Fmt) { return (Fmt->Flags & PF_YUV444) != 0; } bool_t PackedYUV(const pixel* Fmt) { return (Fmt->Flags & PF_FOURCC) && ((Fmt->FourCC == FOURCC_YUY2) || (Fmt->FourCC == FOURCC_YUNV) || (Fmt->FourCC == FOURCC_V422) || (Fmt->FourCC == FOURCC_YUYV) || (Fmt->FourCC == FOURCC_VYUY) || (Fmt->FourCC == FOURCC_UYVY) || (Fmt->FourCC == FOURCC_Y422) || (Fmt->FourCC == FOURCC_YVYU) || (Fmt->FourCC == FOURCC_UYNV)); } bool_t AnyYUV(const pixel* Fmt) { return PlanarYUV420(Fmt) || PlanarYUV410(Fmt) || PlanarYUV422(Fmt) || PlanarYUV444(Fmt) || PackedYUV(Fmt); } uint32_t RGBToFormat(rgbval_t RGB, const pixel* Fmt) { uint32_t v; int R,G,B; int Y,U,V; int Pos[3]; R = (INT32LE(RGB) >> 0) & 255; G = (INT32LE(RGB) >> 8) & 255; B = (INT32LE(RGB) >> 16) & 255; if (AnyYUV(Fmt)) { Y = ((2105 * R) + (4128 * G) + (802 * B))/0x2000 + 16; V = ((3596 * R) - (3015 * G) - (582 * B))/0x2000 + 128; U = (-(1212 * R) - (2384 * G) + (3596 * B))/0x2000 + 128; if (Fmt->Flags & PF_INVERTED) { Y ^= 255; U ^= 255; V ^= 255; } v = (Fmt->BitMask[0] / 255) * Y; v += (Fmt->BitMask[1] / 255) * U; v += (Fmt->BitMask[2] / 255) * V; } else { if (Fmt->Flags & PF_INVERTED) { R ^= 255; G ^= 255; B ^= 255; } Pos[0] = BitMaskPos(Fmt->BitMask[0]) + BitMaskSize(Fmt->BitMask[0]); Pos[1] = BitMaskPos(Fmt->BitMask[1]) + BitMaskSize(Fmt->BitMask[1]); Pos[2] = BitMaskPos(Fmt->BitMask[2]) + BitMaskSize(Fmt->BitMask[2]); v = ((R << Pos[0]) & (Fmt->BitMask[0] << 8)) | ((G << Pos[1]) & (Fmt->BitMask[1] << 8)) | ((B << Pos[2]) & (Fmt->BitMask[2] << 8)); v >>= 8; } return v; } void FillInfo(pixel* Fmt) { Fmt->BitCount = GetBPP(Fmt); if (PlanarYUV(Fmt,NULL,NULL,NULL)) { if (Fmt->Flags & (PF_YUV420|PF_YUV422|PF_YUV444|PF_YUV410)) { Fmt->BitMask[0] = 0x000000FF; Fmt->BitMask[1] = 0x0000FF00; Fmt->BitMask[2] = 0x00FF0000; } else switch (Fmt->FourCC) { case FOURCC_IMC4: case FOURCC_I420: case FOURCC_IYUV: case FOURCC_YUV9: Fmt->BitMask[0] = 0x000000FF; Fmt->BitMask[1] = 0x0000FF00; Fmt->BitMask[2] = 0x00FF0000; break; case FOURCC_IMC2: case FOURCC_YV16: case FOURCC_YV12: case FOURCC_YVU9: Fmt->BitMask[0] = 0x000000FF; Fmt->BitMask[1] = 0x00FF0000; Fmt->BitMask[2] = 0x0000FF00; break; } } else if (PackedYUV(Fmt)) switch (Fmt->FourCC) { case FOURCC_YUY2: case FOURCC_YUNV: case FOURCC_V422: case FOURCC_YUYV: Fmt->BitMask[0] = 0x00FF00FF; Fmt->BitMask[1] = 0x0000FF00; Fmt->BitMask[2] = 0xFF000000; break; case FOURCC_YVYU: Fmt->BitMask[0] = 0x00FF00FF; Fmt->BitMask[1] = 0xFF000000; Fmt->BitMask[2] = 0x0000FF00; break; case FOURCC_UYVY: case FOURCC_Y422: case FOURCC_UYNV: Fmt->BitMask[0] = 0xFF00FF00; Fmt->BitMask[1] = 0x000000FF; Fmt->BitMask[2] = 0x00FF0000; break; } } int GetImageSize(const video* p) { int Size = p->Pitch * p->Height; if (PlanarYUV420(&p->Pixel)) Size = (Size*3)/2; //1:0.25:0.25 else if (PlanarYUV422(&p->Pixel)) Size *= 2; //1:0.5:0.5 else if (PlanarYUV444(&p->Pixel)) Size *= 3; //1:1:1 return Size; } int GetBPP(const pixel* Fmt) { if (Fmt->Flags & (PF_RGB | PF_PALETTE)) return Fmt->BitCount; if (PlanarYUV(Fmt,NULL,NULL,NULL)) return 8; if (PackedYUV(Fmt)) return 16; return 0; } bool_t EqPoint(const point* a, const point* b) { return a->x==b->x && a->y==b->y; } bool_t EqRect(const rect* a, const rect* b) { return a->x==b->x && a->y==b->y && a->Width==b->Width && a->Height==b->Height; } bool_t EqPixel(const pixel* a, const pixel* b) { if (a->Flags != b->Flags) return 0; if ((a->Flags & PF_PALETTE) && a->BitCount != b->BitCount) return 0; if ((a->Flags & PF_RGB) && (a->BitCount != b->BitCount || a->BitMask[0] != b->BitMask[0] || a->BitMask[1] != b->BitMask[1] || a->BitMask[2] != b->BitMask[2])) return 0; if ((a->Flags & PF_FOURCC) && a->FourCC != b->FourCC) return 0; return 1; } bool_t EqVideo(const video* a, const video* b) { // no direction check here! return a->Width == b->Width && a->Height == b->Height && a->Pitch == b->Pitch && EqPixel(&a->Pixel,&b->Pixel); } void ClipRectPhy(rect* Physical, const video* p) { if (Physical->x < 0) { Physical->Width += Physical->x; Physical->x = 0; } if (Physical->y < 0) { Physical->Height += Physical->y; Physical->y = 0; } if (Physical->x + Physical->Width > p->Width) { Physical->Width = p->Width - Physical->x; if (Physical->Width < 0) { Physical->Width = 0; Physical->x = 0; } } if (Physical->y + Physical->Height > p->Height) { Physical->Height = p->Height - Physical->y; if (Physical->Height < 0) { Physical->Height = 0; Physical->y = 0; } } } void VirtToPhy(const rect* Virtual, rect* Physical, const video* p) { if (Virtual) { *Physical = *Virtual; if (p->Pixel.Flags & PF_PIXELDOUBLE) { Physical->x >>= 1; Physical->y >>= 1; Physical->Width >>= 1; Physical->Height >>= 1; } if (p->Direction & DIR_SWAPXY) SwapRect(Physical); if (p->Direction & DIR_MIRRORLEFTRIGHT) Physical->x = p->Width - Physical->x - Physical->Width; if (p->Direction & DIR_MIRRORUPDOWN) Physical->y = p->Height - Physical->y - Physical->Height; ClipRectPhy(Physical,p); } else { Physical->x = 0; Physical->y = 0; Physical->Width = p->Width; Physical->Height = p->Height; } } void PhyToVirt(const rect* Physical, rect* Virtual, const video* p) { if (Physical) *Virtual = *Physical; else { Virtual->x = 0; Virtual->y = 0; Virtual->Width = p->Width; Virtual->Height = p->Height; } if (p->Direction & DIR_MIRRORLEFTRIGHT) Virtual->x = p->Width - Virtual->x - Virtual->Width; if (p->Direction & DIR_MIRRORUPDOWN) Virtual->y = p->Height - Virtual->y - Virtual->Height; if (p->Direction & DIR_SWAPXY) SwapRect(Virtual); if (p->Pixel.Flags & PF_PIXELDOUBLE) { Virtual->x <<= 1; Virtual->y <<= 1; Virtual->Width <<= 1; Virtual->Height <<= 1; } }
gpl-2.0
tsoliman/scummvm
engines/bladerunner/script/ai/mutant1.cpp
4
18550
/* ScummVM - Graphic Adventure Engine * * ScummVM is the legal property of its developers, whose names * are too numerous to list here. Please refer to the COPYRIGHT * file distributed with this source distribution. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */ #include "bladerunner/script/ai_script.h" namespace BladeRunner { AIScriptMutant1::AIScriptMutant1(BladeRunnerEngine *vm) : AIScriptBase(vm) { _resumeIdleAfterFramesetCompletesFlag = false; } void AIScriptMutant1::Initialize() { _animationFrame = 0; _animationState = 0; _animationStateNext = 0; _animationNext = 0; _resumeIdleAfterFramesetCompletesFlag = false; Actor_Put_In_Set(kActorMutant1, kSetFreeSlotG); Actor_Set_At_Waypoint(kActorMutant1, 39, 0); Actor_Set_Goal_Number(kActorMutant1, 400); } bool AIScriptMutant1::Update() { if (Global_Variable_Query(kVariableChapter) == 4) { switch (Actor_Query_Goal_Number(kActorMutant1)) { case 400: if (!Game_Flag_Query(kFlagMutantsActive) && Game_Flag_Query(kFlagUG06Chapter4Started) ) { Actor_Set_Goal_Number(kActorMutant1, 401); Actor_Set_Goal_Number(kActorMutant2, 401); Actor_Set_Goal_Number(kActorMutant3, 401); Actor_Set_Targetable(kActorMutant1, true); Actor_Set_Targetable(kActorMutant2, true); Actor_Set_Targetable(kActorMutant3, true); Game_Flag_Set(kFlagMutantsActive); } break; case 401: if (Actor_Query_Which_Set_In(kActorMutant1) == Player_Query_Current_Set() && (Actor_Query_Friendliness_To_Other(kActorMutant1, kActorMcCoy) < 30 || Actor_Query_Combat_Aggressiveness(kActorMutant1) >= 60 ) ) { Actor_Set_Goal_Number(kActorMutant1, 410); } break; case 404: if (!Game_Flag_Query(kFlagMutantsPaused)) { Actor_Set_Goal_Number(kActorMutant1, 403); } break; case 410: if (Actor_Query_Which_Set_In(kActorMutant1) != Player_Query_Current_Set()) { Non_Player_Actor_Combat_Mode_Off(kActorMutant1); Actor_Set_Goal_Number(kActorMutant1, 403); } break; case 599: if (Actor_Query_Which_Set_In(kActorMutant1) != Player_Query_Current_Set()) { Actor_Set_Goal_Number(kActorMutant1, 411); } break; } if (Game_Flag_Query(kFlagMutantsPaused) && Actor_Query_Goal_Number(kActorMutant1) != 599 ) { Actor_Set_Goal_Number(kActorMutant1, 404); } } else if (Global_Variable_Query(kVariableChapter) == 5 && Actor_Query_Goal_Number(kActorMutant1) != 590 ) { if (Actor_Query_Which_Set_In(kActorMutant1) != Player_Query_Current_Set()) { Actor_Set_Goal_Number(kActorMutant1, 590); } } return false; } void AIScriptMutant1::TimerExpired(int timer) { //return false; } void AIScriptMutant1::CompletedMovementTrack() { if (Actor_Query_Goal_Number(kActorMutant1) == 401) { Actor_Set_Goal_Number(kActorMutant1, 403); } } void AIScriptMutant1::ReceivedClue(int clueId, int fromActorId) { //return false; } void AIScriptMutant1::ClickedByPlayer() { //return false; } void AIScriptMutant1::EnteredSet(int setId) { // return false; } void AIScriptMutant1::OtherAgentEnteredThisSet(int otherActorId) { // return false; } void AIScriptMutant1::OtherAgentExitedThisSet(int otherActorId) { // return false; } void AIScriptMutant1::OtherAgentEnteredCombatMode(int otherActorId, int combatMode) { if (Actor_Query_Which_Set_In(kActorMutant1) == Player_Query_Current_Set() && Actor_Query_Goal_Number(kActorMutant1) != 599 ) { if (otherActorId == kActorMcCoy) { if (combatMode) { Actor_Modify_Combat_Aggressiveness(kActorMutant1, 10); } else { Actor_Modify_Combat_Aggressiveness(kActorMutant1, -10); } } else if (otherActorId == kActorFreeSlotA || otherActorId == kActorMutant2 || otherActorId == kActorMutant3 ) { Actor_Modify_Combat_Aggressiveness(kActorMutant1, 10); } else { Actor_Modify_Combat_Aggressiveness(kActorMutant1, -10); } } } void AIScriptMutant1::ShotAtAndMissed() { if (Actor_Query_Goal_Number(kActorMutant1) != 410) { Actor_Modify_Combat_Aggressiveness(kActorMutant1, 10); Actor_Modify_Friendliness_To_Other(kActorMutant1, kActorMcCoy, -10); } } bool AIScriptMutant1::ShotAtAndHit() { if (Actor_Query_Goal_Number(kActorMutant1) != 410) { Actor_Modify_Combat_Aggressiveness(kActorMutant1, 15); Actor_Modify_Friendliness_To_Other(kActorMutant1, kActorMcCoy, -15); } return false; } void AIScriptMutant1::Retired(int byActorId) { Actor_Set_Goal_Number(kActorMutant1, 599); } int AIScriptMutant1::GetFriendlinessModifierIfGetsClue(int otherActorId, int clueId) { return 0; } bool AIScriptMutant1::GoalChanged(int currentGoalNumber, int newGoalNumber) { switch (newGoalNumber) { case 400: AI_Movement_Track_Flush(kActorMutant1); AI_Movement_Track_Append(kActorMutant1, 39, 0); AI_Movement_Track_Repeat(kActorMutant1); if (Game_Flag_Query(kFlagCT04HomelessKilledByMcCoy)) { Actor_Set_Combat_Aggressiveness(kActorMutant1, 70); Actor_Set_Friendliness_To_Other(kActorMutant1, kActorMcCoy, 20); } return true; case 401: Actor_Set_Targetable(kActorMutant1, true); AI_Movement_Track_Flush(kActorMutant1); AI_Movement_Track_Append(kActorMutant1, 39, 0); switch (Random_Query(1, 8)) { case 1: AI_Movement_Track_Append(kActorMutant1, 182, 0); AI_Movement_Track_Append(kActorMutant1, 183, 2); AI_Movement_Track_Append(kActorMutant1, 184, 0); AI_Movement_Track_Repeat(kActorMutant1); break; case 2: AI_Movement_Track_Append(kActorMutant1, 296, 0); AI_Movement_Track_Append(kActorMutant1, 297, 0); AI_Movement_Track_Repeat(kActorMutant1); break; case 3: AI_Movement_Track_Append(kActorMutant1, 176, 0); AI_Movement_Track_Append(kActorMutant1, 177, 0); AI_Movement_Track_Append(kActorMutant1, 178, 2); AI_Movement_Track_Append(kActorMutant1, 177, 0); AI_Movement_Track_Append(kActorMutant1, 176, 1); AI_Movement_Track_Append(kActorMutant1, 39, 45); AI_Movement_Track_Repeat(kActorMutant1); break; case 4: AI_Movement_Track_Append(kActorMutant1, 298, 0); AI_Movement_Track_Append(kActorMutant1, 300, 0); AI_Movement_Track_Repeat(kActorMutant1); break; case 5: AI_Movement_Track_Append(kActorMutant1, 301, 0); AI_Movement_Track_Append(kActorMutant1, 302, 2); AI_Movement_Track_Append(kActorMutant1, 303, 0); AI_Movement_Track_Append(kActorMutant1, 304, 0); AI_Movement_Track_Append(kActorMutant1, 305, 0); AI_Movement_Track_Append(kActorMutant1, 304, 0); AI_Movement_Track_Append(kActorMutant1, 306, 0); AI_Movement_Track_Append(kActorMutant1, 39, 60); AI_Movement_Track_Repeat(kActorMutant1); break; case 6: AI_Movement_Track_Append(kActorMutant1, 307, 0); AI_Movement_Track_Append(kActorMutant1, 308, 0); AI_Movement_Track_Append(kActorMutant1, 309, 1); AI_Movement_Track_Append(kActorMutant1, 310, 3); AI_Movement_Track_Append(kActorMutant1, 311, 0); AI_Movement_Track_Repeat(kActorMutant1); break; case 7: switch (Random_Query(1, 5)) { case 1: AI_Movement_Track_Append(kActorMutant1, 532, 0); AI_Movement_Track_Append(kActorMutant1, 533, 0); AI_Movement_Track_Append(kActorMutant1, 534, 2); AI_Movement_Track_Append(kActorMutant1, 535, 3); AI_Movement_Track_Append(kActorMutant1, 533, 0); AI_Movement_Track_Append(kActorMutant1, 532, 0); AI_Movement_Track_Repeat(kActorMutant1); break; case 2: AI_Movement_Track_Append(kActorMutant1, 532, 0); AI_Movement_Track_Append(kActorMutant1, 533, 0); AI_Movement_Track_Append(kActorMutant1, 535, 2); AI_Movement_Track_Append(kActorMutant1, 534, 3); AI_Movement_Track_Append(kActorMutant1, 533, 0); AI_Movement_Track_Append(kActorMutant1, 532, 0); AI_Movement_Track_Repeat(kActorMutant1); break; case 3: AI_Movement_Track_Append(kActorMutant1, 536, 0); AI_Movement_Track_Append(kActorMutant1, 537, 0); AI_Movement_Track_Append(kActorMutant1, 538, 2); AI_Movement_Track_Append(kActorMutant1, 537, 0); AI_Movement_Track_Append(kActorMutant1, 536, 0); AI_Movement_Track_Repeat(kActorMutant1); break; case 4: AI_Movement_Track_Append(kActorMutant1, 532, 0); AI_Movement_Track_Append(kActorMutant1, 533, 0); AI_Movement_Track_Append(kActorMutant1, 534, 3); AI_Movement_Track_Append(kActorMutant1, 533, 0); AI_Movement_Track_Append(kActorMutant1, 532, 0); AI_Movement_Track_Repeat(kActorMutant1); break; case 5: AI_Movement_Track_Append(kActorMutant1, 532, 0); AI_Movement_Track_Append(kActorMutant1, 533, 0); AI_Movement_Track_Append(kActorMutant1, 535, 1); AI_Movement_Track_Append(kActorMutant1, 533, 0); AI_Movement_Track_Append(kActorMutant1, 532, 0); AI_Movement_Track_Repeat(kActorMutant1); break; default: return true; } break; case 8: if (Game_Flag_Query(kFlagUG07Empty)) { AI_Movement_Track_Append(kActorMutant1, 418, 0); AI_Movement_Track_Append(kActorMutant1, 417, 0); AI_Movement_Track_Append(kActorMutant1, 539, 0); AI_Movement_Track_Repeat(kActorMutant1); } else { Actor_Set_Goal_Number(kActorMutant1, 403); } break; default: AI_Movement_Track_Append(kActorMutant1, 39, 60); AI_Movement_Track_Repeat(kActorMutant1); break; } return true; case 403: Actor_Set_Targetable(kActorMutant1, false); Actor_Set_Goal_Number(kActorMutant1, 401); return true; case 404: AI_Movement_Track_Flush(kActorMutant1); AI_Movement_Track_Append(kActorMutant1, 39, 0); AI_Movement_Track_Repeat(kActorMutant1); return true; case 410: switch (Actor_Query_Which_Set_In(kActorMutant1)) { case kSetUG01: Non_Player_Actor_Combat_Mode_On(kActorMutant1, kActorCombatStateIdle, false, kActorMcCoy, 11, kAnimationModeCombatIdle, kAnimationModeCombatWalk, kAnimationModeCombatRun, -1, -1, -1, 10, 300, false); break; case kSetUG04: // fall through case kSetUG05: // fall through case kSetUG06: Non_Player_Actor_Combat_Mode_On(kActorMutant1, kActorCombatStateIdle, false, kActorMcCoy, 10, kAnimationModeCombatIdle, kAnimationModeCombatWalk, kAnimationModeCombatRun, -1, -1, -1, 10, 300, false); break; case kSetUG07: Non_Player_Actor_Combat_Mode_On(kActorMutant1, kActorCombatStateIdle, false, kActorMcCoy, 12, kAnimationModeCombatIdle, kAnimationModeCombatWalk, kAnimationModeCombatRun, -1, -1, -1, 10, 300, false); break; case kSetUG10: // fall through case kSetUG12: // fall through case kSetUG14: Non_Player_Actor_Combat_Mode_On(kActorMutant1, kActorCombatStateIdle, false, kActorMcCoy, 14, kAnimationModeCombatIdle, kAnimationModeCombatWalk, kAnimationModeCombatRun, -1, -1, -1, 10, 300, false); break; } return true; case 411: AI_Movement_Track_Flush(kActorMutant1); Actor_Set_Intelligence(kActorMutant1, 40); Actor_Set_Health(kActorMutant1, 10 * Query_Difficulty_Level() + 30, 10 * Query_Difficulty_Level() + 30); if (Game_Flag_Query(kFlagCT04HomelessKilledByMcCoy)) { Actor_Set_Combat_Aggressiveness(kActorMutant1, 70); Actor_Set_Friendliness_To_Other(kActorMutant1, kActorMcCoy, 20); } else { Actor_Set_Combat_Aggressiveness(kActorMutant1, 40); Actor_Set_Friendliness_To_Other(kActorMutant1, kActorMcCoy, 45); } // code repeated also in case 599 which precedes this one // redundant? // results in additional reduction in friendliness and increase of aggressiveness for the other two mutants Actor_Modify_Friendliness_To_Other(kActorMutant2, kActorMcCoy, -10); Actor_Modify_Friendliness_To_Other(kActorMutant3, kActorMcCoy, -20); Actor_Modify_Combat_Aggressiveness(kActorMutant2, 10); Actor_Modify_Combat_Aggressiveness(kActorMutant3, 15); Actor_Set_Goal_Number(kActorMutant1, 403); return true; case 590: AI_Movement_Track_Flush(kActorMutant1); AI_Movement_Track_Append(kActorMutant1, 39, 100); AI_Movement_Track_Repeat(kActorMutant1); return true; case 599: AI_Movement_Track_Flush(kActorMutant1); Actor_Change_Animation_Mode(kActorMutant1, kAnimationModeDie); // results in additional reduction in friendlinees and increase of aggressiveness for the other two mutants Actor_Modify_Friendliness_To_Other(kActorMutant2, kActorMcCoy, -10); Actor_Modify_Friendliness_To_Other(kActorMutant3, kActorMcCoy, -20); Actor_Modify_Combat_Aggressiveness(kActorMutant2, 10); Actor_Modify_Combat_Aggressiveness(kActorMutant3, 15); return true; default: break; } return false; } bool AIScriptMutant1::UpdateAnimation(int *animation, int *frame) { switch (_animationState) { case 0: *animation = kModelAnimationMutant1Idle; ++_animationFrame; if (_animationFrame >= Slice_Animation_Query_Number_Of_Frames(kModelAnimationMutant1Idle)) { _animationFrame = 0; } break; case 1: // fall through case 2: *animation = kModelAnimationMutant1Walking; ++_animationFrame; if (_animationFrame >= Slice_Animation_Query_Number_Of_Frames(kModelAnimationMutant1Walking)) { _animationFrame = 0; } break; case 3: if (_animationFrame == 0 && _resumeIdleAfterFramesetCompletesFlag) { *animation = kModelAnimationMutant1Idle; _animationState = 0; } else { *animation = kModelAnimationMutant1MoreCalmTalk; ++_animationFrame; if (_animationFrame >= Slice_Animation_Query_Number_Of_Frames(kModelAnimationMutant1MoreCalmTalk)) { _animationFrame = 0; } } break; case 4: *animation = kModelAnimationMutant1MoreCalmTalk; ++_animationFrame; if (_animationFrame >= Slice_Animation_Query_Number_Of_Frames(kModelAnimationMutant1MoreCalmTalk)) { _animationFrame = 0; _animationState = 3; *animation = kModelAnimationMutant1MoreCalmTalk; } break; case 5: *animation = kModelAnimationMutant1YellOrHurt; ++_animationFrame; if (_animationFrame >= Slice_Animation_Query_Number_Of_Frames(kModelAnimationMutant1YellOrHurt)) { _animationFrame = 0; _animationState = 3; *animation = kModelAnimationMutant1MoreCalmTalk; } break; case 6: *animation = kModelAnimationMutant1MeleeAttack; ++_animationFrame; if (_animationFrame == 5) { int snd; if (Random_Query(1, 2) == 1) { snd = 9010; } else { snd = 9015; } Sound_Play_Speech_Line(kActorMutant1, snd, 75, 0, 99); } if (_animationFrame == 9) { Actor_Combat_AI_Hit_Attempt(kActorMutant1); } if (_animationFrame >= Slice_Animation_Query_Number_Of_Frames(kModelAnimationMutant1MeleeAttack)) { Actor_Change_Animation_Mode(kActorMutant1, kAnimationModeIdle); } break; case 7: *animation = kModelAnimationMutant1Jump; ++_animationFrame; if (_animationFrame == 1) { Ambient_Sounds_Play_Sound(kSfxHURT1M1, 99, 0, 0, 25); } if (_animationFrame >= Slice_Animation_Query_Number_Of_Frames(kModelAnimationMutant1Jump)) { Actor_Change_Animation_Mode(kActorMutant1, kAnimationModeIdle); } break; case 8: *animation = kModelAnimationMutant1ShotDead; ++_animationFrame; if (_animationFrame == 1) { Sound_Play(kSfxYELL1M1, 100, 0, 0, 50); } if (_animationFrame >= Slice_Animation_Query_Number_Of_Frames(kModelAnimationMutant1ShotDead)) { Actor_Change_Animation_Mode(kActorMutant1, 88); } break; case 9: *animation = kModelAnimationMutant1ShotDead; _animationFrame = Slice_Animation_Query_Number_Of_Frames(kModelAnimationMutant1ShotDead) - 1; break; case 10: *animation = kModelAnimationMutant1Jump; ++_animationFrame; if (_animationFrame == 9) { Sound_Play(kSfxHURT1M1, 100, 0, 0, 50); } if (_animationFrame >= Slice_Animation_Query_Number_Of_Frames(kModelAnimationMutant1Jump)) { Actor_Change_Animation_Mode(kActorMutant1, kAnimationModeIdle); } break; default: break; } *frame = _animationFrame; return true; } bool AIScriptMutant1::ChangeAnimationMode(int mode) { switch (mode) { case 0: if (_animationState >= 3 && _animationState <= 5) { _resumeIdleAfterFramesetCompletesFlag = true; } else { _animationState = 0; _animationFrame = 0; } break; case 1: // fall through case 7: _animationState = 1; _animationFrame = 0; break; case 2: _animationState = 2; _animationFrame = 0; break; case 3: _animationState = 3; _animationFrame = 0; _resumeIdleAfterFramesetCompletesFlag = false; break; case 4: if (_animationState >= 3 && _animationState <= 5) { _resumeIdleAfterFramesetCompletesFlag = true; } else { _animationState = 0; _animationFrame = 0; } break; case 6: _animationState = 6; _animationFrame = 0; break; case 8: _animationState = 2; _animationFrame = 0; break; case 12: _animationState = 3; _animationFrame = 0; _resumeIdleAfterFramesetCompletesFlag = false; break; case 13: _animationState = 3; _animationFrame = 0; _resumeIdleAfterFramesetCompletesFlag = false; break; case 21: // fall through case 22: _animationState = 10; _animationFrame = 0; break; case kAnimationModeDie: _animationState = 8; _animationFrame = 0; break; case 88: _animationState = 9; _animationFrame = Slice_Animation_Query_Number_Of_Frames(kModelAnimationMutant1ShotDead) - 1; break; } return true; } void AIScriptMutant1::QueryAnimationState(int *animationState, int *animationFrame, int *animationStateNext, int *animationNext) { *animationState = _animationState; *animationFrame = _animationFrame; *animationStateNext = _animationStateNext; *animationNext = _animationNext; } void AIScriptMutant1::SetAnimationState(int animationState, int animationFrame, int animationStateNext, int animationNext) { _animationState = animationState; _animationFrame = animationFrame; _animationStateNext = animationStateNext; _animationNext = animationNext; } bool AIScriptMutant1::ReachedMovementTrackWaypoint(int waypointId) { return true; } void AIScriptMutant1::FledCombat() { Actor_Set_Goal_Number(kActorMutant1, 403); } } // End of namespace BladeRunner
gpl-2.0
EuroPlusFinance/Software
Quantum Trading Platforms/QuantLib-1.4/ql/methods/lattices/trinomialtree.cpp
4
2581
/* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Copyright (C) 2001, 2002, 2003 Sadruddin Rejeb Copyright (C) 2005 StatPro Italia srl This file is part of QuantLib, a free-software/open-source library for financial quantitative analysts and developers - http://quantlib.org/ QuantLib is free software: you can redistribute it and/or modify it under the terms of the QuantLib license. You should have received a copy of the license along with this program; if not, please email <quantlib-dev@lists.sf.net>. The license is also available online at <http://quantlib.org/license.shtml>. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the license for more details. */ #include <ql/methods/lattices/trinomialtree.hpp> #include <ql/stochasticprocess.hpp> namespace QuantLib { TrinomialTree::TrinomialTree( const boost::shared_ptr<StochasticProcess1D>& process, const TimeGrid& timeGrid, bool isPositive) : Tree<TrinomialTree>(timeGrid.size()), dx_(1, 0.0), timeGrid_(timeGrid) { x0_ = process->x0(); Size nTimeSteps = timeGrid.size() - 1; Integer jMin = 0; Integer jMax = 0; for (Size i=0; i<nTimeSteps; i++) { Time t = timeGrid[i]; Time dt = timeGrid.dt(i); //Variance must be independent of x Real v2 = process->variance(t, 0.0, dt); Volatility v = std::sqrt(v2); dx_.push_back(v*std::sqrt(3.0)); Branching branching; for (Integer j=jMin; j<=jMax; j++) { Real x = x0_ + j*dx_[i]; Real m = process->expectation(t, x, dt); Integer temp = Integer(std::floor((m-x0_)/dx_[i+1] + 0.5)); if (isPositive) { while (x0_+(temp-1)*dx_[i+1]<=0) { temp++; } } Real e = m - (x0_ + temp*dx_[i+1]); Real e2 = e*e; Real e3 = e*std::sqrt(3.0); Real p1 = (1.0 + e2/v2 - e3/v)/6.0; Real p2 = (2.0 - e2/v2)/3.0; Real p3 = (1.0 + e2/v2 + e3/v)/6.0; branching.add(temp, p1, p2, p3); } branchings_.push_back(branching); jMin = branching.jMin(); jMax = branching.jMax(); } } }
gpl-2.0
sjp38/linux.doc_trans_membarrier
mm/memory.c
4
112190
/* * linux/mm/memory.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* * demand-loading started 01.12.91 - seems it is high on the list of * things wanted, and it should be easy to implement. - Linus */ /* * Ok, demand-loading was easy, shared pages a little bit tricker. Shared * pages started 02.12.91, seems to work. - Linus. * * Tested sharing by executing about 30 /bin/sh: under the old kernel it * would have taken more than the 6M I have free, but it worked well as * far as I could see. * * Also corrected some "invalidate()"s - I wasn't doing enough of them. */ /* * Real VM (paging to/from disk) started 18.12.91. Much more work and * thought has to go into this. Oh, well.. * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. * Found it. Everything seems to work now. * 20.12.91 - Ok, making the swap-device changeable like the root. */ /* * 05.04.94 - Multi-page memory management added for v1.1. * Idea by Alex Bligh (alex@cconcepts.co.uk) * * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG * (Gerhard.Wichert@pdb.siemens.de) * * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) */ #include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/mman.h> #include <linux/swap.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/export.h> #include <linux/delayacct.h> #include <linux/init.h> #include <linux/pfn_t.h> #include <linux/writeback.h> #include <linux/memcontrol.h> #include <linux/mmu_notifier.h> #include <linux/kallsyms.h> #include <linux/swapops.h> #include <linux/elf.h> #include <linux/gfp.h> #include <linux/migrate.h> #include <linux/string.h> #include <linux/dma-debug.h> #include <linux/debugfs.h> #include <linux/userfaultfd_k.h> #include <linux/dax.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <asm/uaccess.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> #include "internal.h" #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. #endif #ifndef CONFIG_NEED_MULTIPLE_NODES /* use the per-pgdat data instead for discontigmem - mbligh */ unsigned long max_mapnr; struct page *mem_map; EXPORT_SYMBOL(max_mapnr); EXPORT_SYMBOL(mem_map); #endif /* * A number of key systems in x86 including ioremap() rely on the assumption * that high_memory defines the upper bound on direct map memory, then end * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL * and ZONE_HIGHMEM. */ void * high_memory; EXPORT_SYMBOL(high_memory); /* * Randomize the address space (stacks, mmaps, brk, etc.). * * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, * as ancient (libc5 based) binaries can segfault. ) */ int randomize_va_space __read_mostly = #ifdef CONFIG_COMPAT_BRK 1; #else 2; #endif static int __init disable_randmaps(char *s) { randomize_va_space = 0; return 1; } __setup("norandmaps", disable_randmaps); unsigned long zero_pfn __read_mostly; unsigned long highest_memmap_pfn __read_mostly; EXPORT_SYMBOL(zero_pfn); /* * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() */ static int __init init_zero_pfn(void) { zero_pfn = page_to_pfn(ZERO_PAGE(0)); return 0; } core_initcall(init_zero_pfn); #if defined(SPLIT_RSS_COUNTING) void sync_mm_rss(struct mm_struct *mm) { int i; for (i = 0; i < NR_MM_COUNTERS; i++) { if (current->rss_stat.count[i]) { add_mm_counter(mm, i, current->rss_stat.count[i]); current->rss_stat.count[i] = 0; } } current->rss_stat.events = 0; } static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) { struct task_struct *task = current; if (likely(task->mm == mm)) task->rss_stat.count[member] += val; else add_mm_counter(mm, member, val); } #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) /* sync counter once per 64 page faults */ #define TASK_RSS_EVENTS_THRESH (64) static void check_sync_rss_stat(struct task_struct *task) { if (unlikely(task != current)) return; if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH)) sync_mm_rss(task->mm); } #else /* SPLIT_RSS_COUNTING */ #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) static void check_sync_rss_stat(struct task_struct *task) { } #endif /* SPLIT_RSS_COUNTING */ #ifdef HAVE_GENERIC_MMU_GATHER static bool tlb_next_batch(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; batch = tlb->active; if (batch->next) { tlb->active = batch->next; return true; } if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) return false; batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); if (!batch) return false; tlb->batch_count++; batch->next = NULL; batch->nr = 0; batch->max = MAX_GATHER_BATCH; tlb->active->next = batch; tlb->active = batch; return true; } /* tlb_gather_mmu * Called to initialize an (on-stack) mmu_gather structure for page-table * tear-down from @mm. The @fullmm argument is used when @mm is without * users and we're going to destroy the full address space (exit/execve). */ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; /* Is it from 0 to ~0? */ tlb->fullmm = !(start | (end+1)); tlb->need_flush_all = 0; tlb->local.next = NULL; tlb->local.nr = 0; tlb->local.max = ARRAY_SIZE(tlb->__pages); tlb->active = &tlb->local; tlb->batch_count = 0; #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb->batch = NULL; #endif tlb->page_size = 0; __tlb_reset_range(tlb); } static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { if (!tlb->end) return; tlb_flush(tlb); mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb_table_flush(tlb); #endif __tlb_reset_range(tlb); } static void tlb_flush_mmu_free(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { free_pages_and_swap_cache(batch->pages, batch->nr); batch->nr = 0; } tlb->active = &tlb->local; } void tlb_flush_mmu(struct mmu_gather *tlb) { tlb_flush_mmu_tlbonly(tlb); tlb_flush_mmu_free(tlb); } /* tlb_finish_mmu * Called at the end of the shootdown operation to free up any resources * that were required. */ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { struct mmu_gather_batch *batch, *next; tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ check_pgt_cache(); for (batch = tlb->local.next; batch; batch = next) { next = batch->next; free_pages((unsigned long)batch, 0); } tlb->local.next = NULL; } /* __tlb_remove_page * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while * handling the additional races in SMP caused by other CPUs caching valid * mappings in their TLBs. Returns the number of free page slots left. * When out of page slots we must call tlb_flush_mmu(). *returns true if the caller should flush. */ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { struct mmu_gather_batch *batch; VM_BUG_ON(!tlb->end); if (!tlb->page_size) tlb->page_size = page_size; else { if (page_size != tlb->page_size) return true; } batch = tlb->active; if (batch->nr == batch->max) { if (!tlb_next_batch(tlb)) return true; batch = tlb->active; } VM_BUG_ON_PAGE(batch->nr > batch->max, page); batch->pages[batch->nr++] = page; return false; } #endif /* HAVE_GENERIC_MMU_GATHER */ #ifdef CONFIG_HAVE_RCU_TABLE_FREE /* * See the comment near struct mmu_table_batch. */ static void tlb_remove_table_smp_sync(void *arg) { /* Simply deliver the interrupt */ } static void tlb_remove_table_one(void *table) { /* * This isn't an RCU grace period and hence the page-tables cannot be * assumed to be actually RCU-freed. * * It is however sufficient for software page-table walkers that rely on * IRQ disabling. See the comment near struct mmu_table_batch. */ smp_call_function(tlb_remove_table_smp_sync, NULL, 1); __tlb_remove_table(table); } static void tlb_remove_table_rcu(struct rcu_head *head) { struct mmu_table_batch *batch; int i; batch = container_of(head, struct mmu_table_batch, rcu); for (i = 0; i < batch->nr; i++) __tlb_remove_table(batch->tables[i]); free_page((unsigned long)batch); } void tlb_table_flush(struct mmu_gather *tlb) { struct mmu_table_batch **batch = &tlb->batch; if (*batch) { call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); *batch = NULL; } } void tlb_remove_table(struct mmu_gather *tlb, void *table) { struct mmu_table_batch **batch = &tlb->batch; /* * When there's less then two users of this mm there cannot be a * concurrent page-table walk. */ if (atomic_read(&tlb->mm->mm_users) < 2) { __tlb_remove_table(table); return; } if (*batch == NULL) { *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); if (*batch == NULL) { tlb_remove_table_one(table); return; } (*batch)->nr = 0; } (*batch)->tables[(*batch)->nr++] = table; if ((*batch)->nr == MAX_TABLE_BATCH) tlb_table_flush(tlb); } #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ /* * Note: this doesn't free the actual pages themselves. That * has been handled earlier when unmapping all the memory regions. */ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) { pgtable_t token = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free_tlb(tlb, token, addr); atomic_long_dec(&tlb->mm->nr_ptes); } static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pmd_t *pmd; unsigned long next; unsigned long start; start = addr; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; free_pte_range(tlb, pmd, addr); } while (pmd++, addr = next, addr != end); start &= PUD_MASK; if (start < floor) return; if (ceiling) { ceiling &= PUD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pmd = pmd_offset(pud, start); pud_clear(pud); pmd_free_tlb(tlb, pmd, start); mm_dec_nr_pmds(tlb->mm); } static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pud_t *pud; unsigned long next; unsigned long start; start = addr; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; free_pmd_range(tlb, pud, addr, next, floor, ceiling); } while (pud++, addr = next, addr != end); start &= PGDIR_MASK; if (start < floor) return; if (ceiling) { ceiling &= PGDIR_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pud = pud_offset(pgd, start); pgd_clear(pgd); pud_free_tlb(tlb, pud, start); } /* * This function frees user-level page tables of a process. */ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pgd_t *pgd; unsigned long next; /* * The next few lines have given us lots of grief... * * Why are we testing PMD* at this top level? Because often * there will be no work to do at all, and we'd prefer not to * go all the way down to the bottom just to discover that. * * Why all these "- 1"s? Because 0 represents both the bottom * of the address space and the top of it (using -1 for the * top wouldn't help much: the masks would do the wrong thing). * The rule is that addr 0 and floor 0 refer to the bottom of * the address space, but end 0 and ceiling 0 refer to the top * Comparisons need to use "end - 1" and "ceiling - 1" (though * that end 0 case should be mythical). * * Wherever addr is brought up or ceiling brought down, we must * be careful to reject "the opposite 0" before it confuses the * subsequent tests. But what about where end is brought down * by PMD_SIZE below? no, end can't go down to 0 there. * * Whereas we round start (addr) and ceiling down, by different * masks at different levels, in order to test whether a table * now has no other vmas using it, so can be freed, we don't * bother to round floor or end up - the tests don't need that. */ addr &= PMD_MASK; if (addr < floor) { addr += PMD_SIZE; if (!addr) return; } if (ceiling) { ceiling &= PMD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) end -= PMD_SIZE; if (addr > end - 1) return; pgd = pgd_offset(tlb->mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; free_pud_range(tlb, pgd, addr, next, floor, ceiling); } while (pgd++, addr = next, addr != end); } void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long floor, unsigned long ceiling) { while (vma) { struct vm_area_struct *next = vma->vm_next; unsigned long addr = vma->vm_start; /* * Hide vma from rmap and truncate_pagecache before freeing * pgtables */ unlink_anon_vmas(vma); unlink_file_vma(vma); if (is_vm_hugetlb_page(vma)) { hugetlb_free_pgd_range(tlb, addr, vma->vm_end, floor, next? next->vm_start: ceiling); } else { /* * Optimization: gather nearby vmas into one call down */ while (next && next->vm_start <= vma->vm_end + PMD_SIZE && !is_vm_hugetlb_page(next)) { vma = next; next = vma->vm_next; unlink_anon_vmas(vma); unlink_file_vma(vma); } free_pgd_range(tlb, addr, vma->vm_end, floor, next? next->vm_start: ceiling); } vma = next; } } int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { spinlock_t *ptl; pgtable_t new = pte_alloc_one(mm, address); if (!new) return -ENOMEM; /* * Ensure all pte setup (eg. pte page lock and page clearing) are * visible before the pte is made visible to other CPUs by being * put into page tables. * * The other side of the story is the pointer chasing in the page * table walking code (when walking the page table without locking; * ie. most of the time). Fortunately, these data accesses consist * of a chain of data-dependent loads, meaning most CPUs (alpha * being the notable exception) will already guarantee loads are * seen in-order. See the alpha page table accessors for the * smp_read_barrier_depends() barriers in page table walking code. */ smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ ptl = pmd_lock(mm, pmd); if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ atomic_long_inc(&mm->nr_ptes); pmd_populate(mm, pmd, new); new = NULL; } spin_unlock(ptl); if (new) pte_free(mm, new); return 0; } int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) { pte_t *new = pte_alloc_one_kernel(&init_mm, address); if (!new) return -ENOMEM; smp_wmb(); /* See comment in __pte_alloc */ spin_lock(&init_mm.page_table_lock); if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ pmd_populate_kernel(&init_mm, pmd, new); new = NULL; } spin_unlock(&init_mm.page_table_lock); if (new) pte_free_kernel(&init_mm, new); return 0; } static inline void init_rss_vec(int *rss) { memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); } static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) { int i; if (current->mm == mm) sync_mm_rss(mm); for (i = 0; i < NR_MM_COUNTERS; i++) if (rss[i]) add_mm_counter(mm, i, rss[i]); } /* * This function is called to print an error when a bad pte * is found. For example, we might have a PFN-mapped pte in * a region that doesn't allow it. * * The calling function must still handle the error. */ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page *page) { pgd_t *pgd = pgd_offset(vma->vm_mm, addr); pud_t *pud = pud_offset(pgd, addr); pmd_t *pmd = pmd_offset(pud, addr); struct address_space *mapping; pgoff_t index; static unsigned long resume; static unsigned long nr_shown; static unsigned long nr_unshown; /* * Allow a burst of 60 reports, then keep quiet for that minute; * or allow a steady drip of one report per second. */ if (nr_shown == 60) { if (time_before(jiffies, resume)) { nr_unshown++; return; } if (nr_unshown) { pr_alert("BUG: Bad page map: %lu messages suppressed\n", nr_unshown); nr_unshown = 0; } nr_shown = 0; } if (nr_shown++ == 0) resume = jiffies + 60 * HZ; mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; index = linear_page_index(vma, addr); pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", current->comm, (long long)pte_val(pte), (long long)pmd_val(*pmd)); if (page) dump_page(page, "bad pte"); pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); /* * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y */ pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n", vma->vm_file, vma->vm_ops ? vma->vm_ops->fault : NULL, vma->vm_file ? vma->vm_file->f_op->mmap : NULL, mapping ? mapping->a_ops->readpage : NULL); dump_stack(); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); } /* * vm_normal_page -- This function gets the "struct page" associated with a pte. * * "Special" mappings do not wish to be associated with a "struct page" (either * it doesn't exist, or it exists but they don't want to touch it). In this * case, NULL is returned here. "Normal" mappings do have a struct page. * * There are 2 broad cases. Firstly, an architecture may define a pte_special() * pte bit, in which case this function is trivial. Secondly, an architecture * may not have a spare pte bit, which requires a more complicated scheme, * described below. * * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a * special mapping (even if there are underlying and valid "struct pages"). * COWed pages of a VM_PFNMAP are always normal. * * The way we recognize COWed pages within VM_PFNMAP mappings is through the * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit * set, and the vm_pgoff will point to the first PFN mapped: thus every special * mapping will always honor the rule * * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) * * And for normal mappings this is false. * * This restricts such mappings to be a linear translation from virtual address * to pfn. To get around this restriction, we allow arbitrary mappings so long * as the vma is not a COW mapping; in that case, we know that all ptes are * special (because none can have been COWed). * * * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. * * VM_MIXEDMAP mappings can likewise contain memory with or without "struct * page" backing, however the difference is that _all_ pages with a struct * page (that is, those where pfn_valid is true) are refcounted and considered * normal pages by the VM. The disadvantage is that pages are refcounted * (which can be slower and simply not an option for some PFNMAP users). The * advantage is that we don't have to follow the strict linearity rule of * PFNMAP mappings in order to support COWable mappings. * */ #ifdef __HAVE_ARCH_PTE_SPECIAL # define HAVE_PTE_SPECIAL 1 #else # define HAVE_PTE_SPECIAL 0 #endif struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { unsigned long pfn = pte_pfn(pte); if (HAVE_PTE_SPECIAL) { if (likely(!pte_special(pte))) goto check_pfn; if (vma->vm_ops && vma->vm_ops->find_special_page) return vma->vm_ops->find_special_page(vma, addr); if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) return NULL; if (!is_zero_pfn(pfn)) print_bad_pte(vma, addr, pte, NULL); return NULL; } /* !HAVE_PTE_SPECIAL case follows: */ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { if (vma->vm_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn)) return NULL; goto out; } else { unsigned long off; off = (addr - vma->vm_start) >> PAGE_SHIFT; if (pfn == vma->vm_pgoff + off) return NULL; if (!is_cow_mapping(vma->vm_flags)) return NULL; } } if (is_zero_pfn(pfn)) return NULL; check_pfn: if (unlikely(pfn > highest_memmap_pfn)) { print_bad_pte(vma, addr, pte, NULL); return NULL; } /* * NOTE! We still have PageReserved() pages in the page tables. * eg. VDSO mappings can cause them to exist. */ out: return pfn_to_page(pfn); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) { unsigned long pfn = pmd_pfn(pmd); /* * There is no pmd_special() but there may be special pmds, e.g. * in a direct-access (dax) mapping, so let's just replicate the * !HAVE_PTE_SPECIAL case from vm_normal_page() here. */ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { if (vma->vm_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn)) return NULL; goto out; } else { unsigned long off; off = (addr - vma->vm_start) >> PAGE_SHIFT; if (pfn == vma->vm_pgoff + off) return NULL; if (!is_cow_mapping(vma->vm_flags)) return NULL; } } if (is_zero_pfn(pfn)) return NULL; if (unlikely(pfn > highest_memmap_pfn)) return NULL; /* * NOTE! We still have PageReserved() pages in the page tables. * eg. VDSO mappings can cause them to exist. */ out: return pfn_to_page(pfn); } #endif /* * copy one vm_area from one task to the other. Assumes the page tables * already present in the new task to be cleared in the whole range * covered by this vma. */ static inline unsigned long copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr, int *rss) { unsigned long vm_flags = vma->vm_flags; pte_t pte = *src_pte; struct page *page; /* pte contains position in swap or file, so copy. */ if (unlikely(!pte_present(pte))) { swp_entry_t entry = pte_to_swp_entry(pte); if (likely(!non_swap_entry(entry))) { if (swap_duplicate(entry) < 0) return entry.val; /* make sure dst_mm is on swapoff's mmlist. */ if (unlikely(list_empty(&dst_mm->mmlist))) { spin_lock(&mmlist_lock); if (list_empty(&dst_mm->mmlist)) list_add(&dst_mm->mmlist, &src_mm->mmlist); spin_unlock(&mmlist_lock); } rss[MM_SWAPENTS]++; } else if (is_migration_entry(entry)) { page = migration_entry_to_page(entry); rss[mm_counter(page)]++; if (is_write_migration_entry(entry) && is_cow_mapping(vm_flags)) { /* * COW mappings require pages in both * parent and child to be set to read. */ make_migration_entry_read(&entry); pte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(*src_pte)) pte = pte_swp_mksoft_dirty(pte); set_pte_at(src_mm, addr, src_pte, pte); } } goto out_set_pte; } /* * If it's a COW mapping, write protect it both * in the parent and the child */ if (is_cow_mapping(vm_flags)) { ptep_set_wrprotect(src_mm, addr, src_pte); pte = pte_wrprotect(pte); } /* * If it's a shared mapping, mark it clean in * the child */ if (vm_flags & VM_SHARED) pte = pte_mkclean(pte); pte = pte_mkold(pte); page = vm_normal_page(vma, addr, pte); if (page) { get_page(page); page_dup_rmap(page, false); rss[mm_counter(page)]++; } out_set_pte: set_pte_at(dst_mm, addr, dst_pte, pte); return 0; } static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { pte_t *orig_src_pte, *orig_dst_pte; pte_t *src_pte, *dst_pte; spinlock_t *src_ptl, *dst_ptl; int progress = 0; int rss[NR_MM_COUNTERS]; swp_entry_t entry = (swp_entry_t){0}; again: init_rss_vec(rss); dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); if (!dst_pte) return -ENOMEM; src_pte = pte_offset_map(src_pmd, addr); src_ptl = pte_lockptr(src_mm, src_pmd); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); orig_src_pte = src_pte; orig_dst_pte = dst_pte; arch_enter_lazy_mmu_mode(); do { /* * We are holding two locks at this point - either of them * could generate latencies in another task on another CPU. */ if (progress >= 32) { progress = 0; if (need_resched() || spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) break; } if (pte_none(*src_pte)) { progress++; continue; } entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); if (entry.val) break; progress += 8; } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); spin_unlock(src_ptl); pte_unmap(orig_src_pte); add_mm_rss_vec(dst_mm, rss); pte_unmap_unlock(orig_dst_pte, dst_ptl); cond_resched(); if (entry.val) { if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) return -ENOMEM; progress = 0; } if (addr != end) goto again; return 0; } static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { pmd_t *src_pmd, *dst_pmd; unsigned long next; dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); if (!dst_pmd) return -ENOMEM; src_pmd = pmd_offset(src_pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) { int err; VM_BUG_ON(next-addr != HPAGE_PMD_SIZE); err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd, addr, vma); if (err == -ENOMEM) return -ENOMEM; if (!err) continue; /* fall through */ } if (pmd_none_or_clear_bad(src_pmd)) continue; if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, vma, addr, next)) return -ENOMEM; } while (dst_pmd++, src_pmd++, addr = next, addr != end); return 0; } static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { pud_t *src_pud, *dst_pud; unsigned long next; dst_pud = pud_alloc(dst_mm, dst_pgd, addr); if (!dst_pud) return -ENOMEM; src_pud = pud_offset(src_pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(src_pud)) continue; if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, vma, addr, next)) return -ENOMEM; } while (dst_pud++, src_pud++, addr = next, addr != end); return 0; } int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, struct vm_area_struct *vma) { pgd_t *src_pgd, *dst_pgd; unsigned long next; unsigned long addr = vma->vm_start; unsigned long end = vma->vm_end; unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */ bool is_cow; int ret; /* * Don't copy ptes where a page fault will fill them correctly. * Fork becomes much lighter when there are big shared or private * readonly mappings. The tradeoff is that copy_page_range is more * efficient than faulting. */ if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) && !vma->anon_vma) return 0; if (is_vm_hugetlb_page(vma)) return copy_hugetlb_page_range(dst_mm, src_mm, vma); if (unlikely(vma->vm_flags & VM_PFNMAP)) { /* * We do not free on error cases below as remove_vma * gets called on error from higher level routine */ ret = track_pfn_copy(vma); if (ret) return ret; } /* * We need to invalidate the secondary MMU mappings only when * there could be a permission downgrade on the ptes of the * parent mm. And a permission downgrade will only happen if * is_cow_mapping() returns true. */ is_cow = is_cow_mapping(vma->vm_flags); mmun_start = addr; mmun_end = end; if (is_cow) mmu_notifier_invalidate_range_start(src_mm, mmun_start, mmun_end); ret = 0; dst_pgd = pgd_offset(dst_mm, addr); src_pgd = pgd_offset(src_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(src_pgd)) continue; if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, vma, addr, next))) { ret = -ENOMEM; break; } } while (dst_pgd++, src_pgd++, addr = next, addr != end); if (is_cow) mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end); return ret; } static unsigned long zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) { struct mm_struct *mm = tlb->mm; int force_flush = 0; int rss[NR_MM_COUNTERS]; spinlock_t *ptl; pte_t *start_pte; pte_t *pte; swp_entry_t entry; struct page *pending_page = NULL; again: init_rss_vec(rss); start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pte = start_pte; arch_enter_lazy_mmu_mode(); do { pte_t ptent = *pte; if (pte_none(ptent)) { continue; } if (pte_present(ptent)) { struct page *page; page = vm_normal_page(vma, addr, ptent); if (unlikely(details) && page) { /* * unmap_shared_mapping_pages() wants to * invalidate cache without truncating: * unmap shared but keep private pages. */ if (details->check_mapping && details->check_mapping != page_rmapping(page)) continue; } ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); tlb_remove_tlb_entry(tlb, pte, addr); if (unlikely(!page)) continue; if (!PageAnon(page)) { if (pte_dirty(ptent)) { /* * oom_reaper cannot tear down dirty * pages */ if (unlikely(details && details->ignore_dirty)) continue; force_flush = 1; set_page_dirty(page); } if (pte_young(ptent) && likely(!(vma->vm_flags & VM_SEQ_READ))) mark_page_accessed(page); } rss[mm_counter(page)]--; page_remove_rmap(page, false); if (unlikely(page_mapcount(page) < 0)) print_bad_pte(vma, addr, ptent, page); if (unlikely(__tlb_remove_page(tlb, page))) { force_flush = 1; pending_page = page; addr += PAGE_SIZE; break; } continue; } /* only check swap_entries if explicitly asked for in details */ if (unlikely(details && !details->check_swap_entries)) continue; entry = pte_to_swp_entry(ptent); if (!non_swap_entry(entry)) rss[MM_SWAPENTS]--; else if (is_migration_entry(entry)) { struct page *page; page = migration_entry_to_page(entry); rss[mm_counter(page)]--; } if (unlikely(!free_swap_and_cache(entry))) print_bad_pte(vma, addr, ptent, NULL); pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); } while (pte++, addr += PAGE_SIZE, addr != end); add_mm_rss_vec(mm, rss); arch_leave_lazy_mmu_mode(); /* Do the actual TLB flush before dropping ptl */ if (force_flush) tlb_flush_mmu_tlbonly(tlb); pte_unmap_unlock(start_pte, ptl); /* * If we forced a TLB flush (either due to running out of * batch buffers or because we needed to flush dirty TLB * entries before releasing the ptl), free the batched * memory too. Restart if we didn't do everything. */ if (force_flush) { force_flush = 0; tlb_flush_mmu_free(tlb); if (pending_page) { /* remove the page with new size */ __tlb_remove_pte_page(tlb, pending_page); pending_page = NULL; } if (addr != end) goto again; } return addr; } static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, struct zap_details *details) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) { VM_BUG_ON_VMA(vma_is_anonymous(vma) && !rwsem_is_locked(&tlb->mm->mmap_sem), vma); split_huge_pmd(vma, pmd, addr); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) goto next; /* fall through */ } /* * Here there can be other concurrent MADV_DONTNEED or * trans huge page faults running, and if the pmd is * none or trans huge it can change under us. This is * because MADV_DONTNEED holds the mmap_sem in read * mode. */ if (pmd_none_or_trans_huge_or_clear_bad(pmd)) goto next; next = zap_pte_range(tlb, vma, pmd, addr, next, details); next: cond_resched(); } while (pmd++, addr = next, addr != end); return addr; } static inline unsigned long zap_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, struct zap_details *details) { pud_t *pud; unsigned long next; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; next = zap_pmd_range(tlb, vma, pud, addr, next, details); } while (pud++, addr = next, addr != end); return addr; } void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details) { pgd_t *pgd; unsigned long next; BUG_ON(addr >= end); tlb_start_vma(tlb, vma); pgd = pgd_offset(vma->vm_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; next = zap_pud_range(tlb, vma, pgd, addr, next, details); } while (pgd++, addr = next, addr != end); tlb_end_vma(tlb, vma); } static void unmap_single_vma(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) { unsigned long start = max(vma->vm_start, start_addr); unsigned long end; if (start >= vma->vm_end) return; end = min(vma->vm_end, end_addr); if (end <= vma->vm_start) return; if (vma->vm_file) uprobe_munmap(vma, start, end); if (unlikely(vma->vm_flags & VM_PFNMAP)) untrack_pfn(vma, 0, 0); if (start != end) { if (unlikely(is_vm_hugetlb_page(vma))) { /* * It is undesirable to test vma->vm_file as it * should be non-null for valid hugetlb area. * However, vm_file will be NULL in the error * cleanup path of mmap_region. When * hugetlbfs ->mmap method fails, * mmap_region() nullifies vma->vm_file * before calling this function to clean up. * Since no pte has actually been setup, it is * safe to do nothing in this case. */ if (vma->vm_file) { i_mmap_lock_write(vma->vm_file->f_mapping); __unmap_hugepage_range_final(tlb, vma, start, end, NULL); i_mmap_unlock_write(vma->vm_file->f_mapping); } } else unmap_page_range(tlb, vma, start, end, details); } } /** * unmap_vmas - unmap a range of memory covered by a list of vma's * @tlb: address of the caller's struct mmu_gather * @vma: the starting vma * @start_addr: virtual address at which to start unmapping * @end_addr: virtual address at which to end unmapping * * Unmap all pages in the vma list. * * Only addresses between `start' and `end' will be unmapped. * * The VMA list must be sorted in ascending virtual address order. * * unmap_vmas() assumes that the caller will flush the whole unmapped address * range after unmap_vmas() returns. So the only responsibility here is to * ensure that any thus-far unmapped pages are flushed before unmap_vmas() * drops the lock and schedules. */ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr) { struct mm_struct *mm = vma->vm_mm; mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); } /** * zap_page_range - remove user pages in a given range * @vma: vm_area_struct holding the applicable pages * @start: starting address of pages to zap * @size: number of bytes to zap * @details: details of shared cache invalidation * * Caller must protect the VMA list */ void zap_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long size, struct zap_details *details) { struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; unsigned long end = start + size; lru_add_drain(); tlb_gather_mmu(&tlb, mm, start, end); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, start, end); for ( ; vma && vma->vm_start < end; vma = vma->vm_next) unmap_single_vma(&tlb, vma, start, end, details); mmu_notifier_invalidate_range_end(mm, start, end); tlb_finish_mmu(&tlb, start, end); } /** * zap_page_range_single - remove user pages in a given range * @vma: vm_area_struct holding the applicable pages * @address: starting address of pages to zap * @size: number of bytes to zap * @details: details of shared cache invalidation * * The range must fit into one VMA. */ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) { struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; unsigned long end = address + size; lru_add_drain(); tlb_gather_mmu(&tlb, mm, address, end); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, address, end); unmap_single_vma(&tlb, vma, address, end, details); mmu_notifier_invalidate_range_end(mm, address, end); tlb_finish_mmu(&tlb, address, end); } /** * zap_vma_ptes - remove ptes mapping the vma * @vma: vm_area_struct holding ptes to be zapped * @address: starting address of pages to zap * @size: number of bytes to zap * * This function only unmaps ptes assigned to VM_PFNMAP vmas. * * The entire address range must be fully contained within the vma. * * Returns 0 if successful. */ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size) { if (address < vma->vm_start || address + size > vma->vm_end || !(vma->vm_flags & VM_PFNMAP)) return -1; zap_page_range_single(vma, address, size, NULL); return 0; } EXPORT_SYMBOL_GPL(zap_vma_ptes); pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) { pgd_t * pgd = pgd_offset(mm, addr); pud_t * pud = pud_alloc(mm, pgd, addr); if (pud) { pmd_t * pmd = pmd_alloc(mm, pud, addr); if (pmd) { VM_BUG_ON(pmd_trans_huge(*pmd)); return pte_alloc_map_lock(mm, pmd, addr, ptl); } } return NULL; } /* * This is the old fallback for page remapping. * * For historical reasons, it only allows reserved pages. Only * old drivers should use this, and they needed to mark their * pages reserved for the old functions anyway. */ static int insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot) { struct mm_struct *mm = vma->vm_mm; int retval; pte_t *pte; spinlock_t *ptl; retval = -EINVAL; if (PageAnon(page)) goto out; retval = -ENOMEM; flush_dcache_page(page); pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto out; retval = -EBUSY; if (!pte_none(*pte)) goto out_unlock; /* Ok, finally just insert the thing.. */ get_page(page); inc_mm_counter_fast(mm, mm_counter_file(page)); page_add_file_rmap(page, false); set_pte_at(mm, addr, pte, mk_pte(page, prot)); retval = 0; pte_unmap_unlock(pte, ptl); return retval; out_unlock: pte_unmap_unlock(pte, ptl); out: return retval; } /** * vm_insert_page - insert single page into user vma * @vma: user vma to map to * @addr: target user address of this page * @page: source kernel page * * This allows drivers to insert individual pages they've allocated * into a user vma. * * The page has to be a nice clean _individual_ kernel allocation. * If you allocate a compound page, you need to have marked it as * such (__GFP_COMP), or manually just split the page up yourself * (see split_page()). * * NOTE! Traditionally this was done with "remap_pfn_range()" which * took an arbitrary page protection parameter. This doesn't allow * that. Your vma protection will have to be set up correctly, which * means that if you want a shared writable mapping, you'd better * ask for a shared writable mapping! * * The page does not need to be reserved. * * Usually this function is called from f_op->mmap() handler * under mm->mmap_sem write-lock, so it can change vma->vm_flags. * Caller must set VM_MIXEDMAP on vma if it wants to call this * function from other places, for example from page-fault handler. */ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) { if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; if (!page_count(page)) return -EINVAL; if (!(vma->vm_flags & VM_MIXEDMAP)) { BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); BUG_ON(vma->vm_flags & VM_PFNMAP); vma->vm_flags |= VM_MIXEDMAP; } return insert_page(vma, addr, page, vma->vm_page_prot); } EXPORT_SYMBOL(vm_insert_page); static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn, pgprot_t prot) { struct mm_struct *mm = vma->vm_mm; int retval; pte_t *pte, entry; spinlock_t *ptl; retval = -ENOMEM; pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto out; retval = -EBUSY; if (!pte_none(*pte)) goto out_unlock; /* Ok, finally just insert the thing.. */ if (pfn_t_devmap(pfn)) entry = pte_mkdevmap(pfn_t_pte(pfn, prot)); else entry = pte_mkspecial(pfn_t_pte(pfn, prot)); set_pte_at(mm, addr, pte, entry); update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ retval = 0; out_unlock: pte_unmap_unlock(pte, ptl); out: return retval; } /** * vm_insert_pfn - insert single pfn into user vma * @vma: user vma to map to * @addr: target user address of this page * @pfn: source kernel pfn * * Similar to vm_insert_page, this allows drivers to insert individual pages * they've allocated into a user vma. Same comments apply. * * This function should only be called from a vm_ops->fault handler, and * in that case the handler should return NULL. * * vma cannot be a COW mapping. * * As this is called only for pages that do not currently exist, we * do not need to flush old virtual caches or the TLB. */ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); } EXPORT_SYMBOL(vm_insert_pfn); /** * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot * @vma: user vma to map to * @addr: target user address of this page * @pfn: source kernel pfn * @pgprot: pgprot flags for the inserted page * * This is exactly like vm_insert_pfn, except that it allows drivers to * to override pgprot on a per-page basis. * * This only makes sense for IO mappings, and it makes no sense for * cow mappings. In general, using multiple vmas is preferable; * vm_insert_pfn_prot should only be used if using multiple VMAs is * impractical. */ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot) { int ret; /* * Technically, architectures with pte_special can avoid all these * restrictions (same for remap_pfn_range). However we would like * consistency in testing and feature parity among all, so we should * try to keep these invariants in place for everybody. */ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == (VM_PFNMAP|VM_MIXEDMAP)); BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; if (track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV))) return -EINVAL; ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot); return ret; } EXPORT_SYMBOL(vm_insert_pfn_prot); int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn) { BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; /* * If we don't have pte special, then we have to use the pfn_valid() * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* * refcount the page if pfn_valid is true (hence insert_page rather * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP * without pte special, it would there be refcounted as a normal page. */ if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) { struct page *page; /* * At this point we are committed to insert_page() * regardless of whether the caller specified flags that * result in pfn_t_has_page() == false. */ page = pfn_to_page(pfn_t_to_pfn(pfn)); return insert_page(vma, addr, page, vma->vm_page_prot); } return insert_pfn(vma, addr, pfn, vma->vm_page_prot); } EXPORT_SYMBOL(vm_insert_mixed); /* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results * in null mappings (currently treated as "copy-on-access") */ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pte_t *pte; spinlock_t *ptl; pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; arch_enter_lazy_mmu_mode(); do { BUG_ON(!pte_none(*pte)); set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); return 0; } static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pmd_t *pmd; unsigned long next; pfn -= addr >> PAGE_SHIFT; pmd = pmd_alloc(mm, pud, addr); if (!pmd) return -ENOMEM; VM_BUG_ON(pmd_trans_huge(*pmd)); do { next = pmd_addr_end(addr, end); if (remap_pte_range(mm, pmd, addr, next, pfn + (addr >> PAGE_SHIFT), prot)) return -ENOMEM; } while (pmd++, addr = next, addr != end); return 0; } static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pud_t *pud; unsigned long next; pfn -= addr >> PAGE_SHIFT; pud = pud_alloc(mm, pgd, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (remap_pmd_range(mm, pud, addr, next, pfn + (addr >> PAGE_SHIFT), prot)) return -ENOMEM; } while (pud++, addr = next, addr != end); return 0; } /** * remap_pfn_range - remap kernel memory to userspace * @vma: user vma to map to * @addr: target user address to start at * @pfn: physical address of kernel memory * @size: size of map area * @prot: page protection flags for this mapping * * Note: this is only safe if the mm semaphore is held when called. */ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { pgd_t *pgd; unsigned long next; unsigned long end = addr + PAGE_ALIGN(size); struct mm_struct *mm = vma->vm_mm; unsigned long remap_pfn = pfn; int err; /* * Physically remapped pages are special. Tell the * rest of the world about it: * VM_IO tells people not to look at these pages * (accesses can have side effects). * VM_PFNMAP tells the core MM that the base pages are just * raw PFN mappings, and do not have a "struct page" associated * with them. * VM_DONTEXPAND * Disable vma merging and expanding with mremap(). * VM_DONTDUMP * Omit vma from core dump, even when VM_IO turned off. * * There's a horrible special case to handle copy-on-write * behaviour that some programs depend on. We mark the "original" * un-COW'ed pages by matching them up with "vma->vm_pgoff". * See vm_normal_page() for details. */ if (is_cow_mapping(vma->vm_flags)) { if (addr != vma->vm_start || end != vma->vm_end) return -EINVAL; vma->vm_pgoff = pfn; } err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size)); if (err) return -EINVAL; vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; BUG_ON(addr >= end); pfn -= addr >> PAGE_SHIFT; pgd = pgd_offset(mm, addr); flush_cache_range(vma, addr, end); do { next = pgd_addr_end(addr, end); err = remap_pud_range(mm, pgd, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) break; } while (pgd++, addr = next, addr != end); if (err) untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size)); return err; } EXPORT_SYMBOL(remap_pfn_range); /** * vm_iomap_memory - remap memory to userspace * @vma: user vma to map to * @start: start of area * @len: size of area * * This is a simplified io_remap_pfn_range() for common driver use. The * driver just needs to give us the physical memory range to be mapped, * we'll figure out the rest from the vma information. * * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get * whatever write-combining details or similar. */ int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) { unsigned long vm_len, pfn, pages; /* Check that the physical memory area passed in looks valid */ if (start + len < start) return -EINVAL; /* * You *really* shouldn't map things that aren't page-aligned, * but we've historically allowed it because IO memory might * just have smaller alignment. */ len += start & ~PAGE_MASK; pfn = start >> PAGE_SHIFT; pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; if (pfn + pages < pfn) return -EINVAL; /* We start the mapping 'vm_pgoff' pages into the area */ if (vma->vm_pgoff > pages) return -EINVAL; pfn += vma->vm_pgoff; pages -= vma->vm_pgoff; /* Can we fit all of the mapping? */ vm_len = vma->vm_end - vma->vm_start; if (vm_len >> PAGE_SHIFT > pages) return -EINVAL; /* Ok, let it rip */ return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); } EXPORT_SYMBOL(vm_iomap_memory); static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) { pte_t *pte; int err; pgtable_t token; spinlock_t *uninitialized_var(ptl); pte = (mm == &init_mm) ? pte_alloc_kernel(pmd, addr) : pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; BUG_ON(pmd_huge(*pmd)); arch_enter_lazy_mmu_mode(); token = pmd_pgtable(*pmd); do { err = fn(pte++, token, addr, data); if (err) break; } while (addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); if (mm != &init_mm) pte_unmap_unlock(pte-1, ptl); return err; } static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) { pmd_t *pmd; unsigned long next; int err; BUG_ON(pud_huge(*pud)); pmd = pmd_alloc(mm, pud, addr); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); err = apply_to_pte_range(mm, pmd, addr, next, fn, data); if (err) break; } while (pmd++, addr = next, addr != end); return err; } static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) { pud_t *pud; unsigned long next; int err; pud = pud_alloc(mm, pgd, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); err = apply_to_pmd_range(mm, pud, addr, next, fn, data); if (err) break; } while (pud++, addr = next, addr != end); return err; } /* * Scan a region of virtual memory, filling in page tables as necessary * and calling a provided function on each leaf page table. */ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data) { pgd_t *pgd; unsigned long next; unsigned long end = addr + size; int err; if (WARN_ON(addr >= end)) return -EINVAL; pgd = pgd_offset(mm, addr); do { next = pgd_addr_end(addr, end); err = apply_to_pud_range(mm, pgd, addr, next, fn, data); if (err) break; } while (pgd++, addr = next, addr != end); return err; } EXPORT_SYMBOL_GPL(apply_to_page_range); /* * handle_pte_fault chooses page fault handler according to an entry which was * read non-atomically. Before making any commitment, on those architectures * or configurations (e.g. i386 with PAE) which might give a mix of unmatched * parts, do_swap_page must check under lock before unmapping the pte and * proceeding (but do_wp_page is only called after already making such a check; * and do_anonymous_page can safely check later on). */ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, pte_t *page_table, pte_t orig_pte) { int same = 1; #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) if (sizeof(pte_t) > sizeof(unsigned long)) { spinlock_t *ptl = pte_lockptr(mm, pmd); spin_lock(ptl); same = pte_same(*page_table, orig_pte); spin_unlock(ptl); } #endif pte_unmap(page_table); return same; } static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) { debug_dma_assert_idle(src); /* * If the source page was a PFN mapping, we don't have * a "struct page" for it. We do a best-effort copy by * just copying from the original user address. If that * fails, we just zero-fill it. Live with it. */ if (unlikely(!src)) { void *kaddr = kmap_atomic(dst); void __user *uaddr = (void __user *)(va & PAGE_MASK); /* * This really shouldn't fail, because the page is there * in the page tables. But it might just be unreadable, * in which case we just give up and fill the result with * zeroes. */ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) clear_page(kaddr); kunmap_atomic(kaddr); flush_dcache_page(dst); } else copy_user_highpage(dst, src, va, vma); } static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) { struct file *vm_file = vma->vm_file; if (vm_file) return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; /* * Special mappings (e.g. VDSO) do not have any file so fake * a default GFP_KERNEL for them. */ return GFP_KERNEL; } /* * Notify the address space that the page is about to become writable so that * it can prohibit this or wait for the page to get into an appropriate state. * * We do this without the lock held, so that it can sleep if it needs to. */ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, unsigned long address) { struct vm_fault vmf; int ret; vmf.virtual_address = (void __user *)(address & PAGE_MASK); vmf.pgoff = page->index; vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; vmf.gfp_mask = __get_fault_gfp_mask(vma); vmf.page = page; vmf.cow_page = NULL; ret = vma->vm_ops->page_mkwrite(vma, &vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) return ret; if (unlikely(!(ret & VM_FAULT_LOCKED))) { lock_page(page); if (!page->mapping) { unlock_page(page); return 0; /* retry */ } ret |= VM_FAULT_LOCKED; } else VM_BUG_ON_PAGE(!PageLocked(page), page); return ret; } /* * Handle write page faults for pages that can be reused in the current vma * * This can happen either due to the mapping being with the VM_SHARED flag, * or due to us being the last reference standing to the page. In either * case, all we need to do here is to mark the page as writable and update * any related book-keeping. */ static inline int wp_page_reuse(struct fault_env *fe, pte_t orig_pte, struct page *page, int page_mkwrite, int dirty_shared) __releases(fe->ptl) { struct vm_area_struct *vma = fe->vma; pte_t entry; /* * Clear the pages cpupid information as the existing * information potentially belongs to a now completely * unrelated process. */ if (page) page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); flush_cache_page(vma, fe->address, pte_pfn(orig_pte)); entry = pte_mkyoung(orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, fe->address, fe->pte, entry, 1)) update_mmu_cache(vma, fe->address, fe->pte); pte_unmap_unlock(fe->pte, fe->ptl); if (dirty_shared) { struct address_space *mapping; int dirtied; if (!page_mkwrite) lock_page(page); dirtied = set_page_dirty(page); VM_BUG_ON_PAGE(PageAnon(page), page); mapping = page->mapping; unlock_page(page); put_page(page); if ((dirtied || page_mkwrite) && mapping) { /* * Some device drivers do not set page.mapping * but still dirty their pages */ balance_dirty_pages_ratelimited(mapping); } if (!page_mkwrite) file_update_time(vma->vm_file); } return VM_FAULT_WRITE; } /* * Handle the case of a page which we actually need to copy to a new page. * * Called with mmap_sem locked and the old page referenced, but * without the ptl held. * * High level logic flow: * * - Allocate a page, copy the content of the old page to the new one. * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc. * - Take the PTL. If the pte changed, bail out and release the allocated page * - If the pte is still the way we remember it, update the page table and all * relevant references. This includes dropping the reference the page-table * held to the old page, as well as updating the rmap. * - In any case, unlock the PTL and drop the reference we took to the old page. */ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte, struct page *old_page) { struct vm_area_struct *vma = fe->vma; struct mm_struct *mm = vma->vm_mm; struct page *new_page = NULL; pte_t entry; int page_copied = 0; const unsigned long mmun_start = fe->address & PAGE_MASK; const unsigned long mmun_end = mmun_start + PAGE_SIZE; struct mem_cgroup *memcg; if (unlikely(anon_vma_prepare(vma))) goto oom; if (is_zero_pfn(pte_pfn(orig_pte))) { new_page = alloc_zeroed_user_highpage_movable(vma, fe->address); if (!new_page) goto oom; } else { new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, fe->address); if (!new_page) goto oom; cow_user_page(new_page, old_page, fe->address, vma); } if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) goto oom_free_new; __SetPageUptodate(new_page); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); /* * Re-check the pte - we dropped the lock */ fe->pte = pte_offset_map_lock(mm, fe->pmd, fe->address, &fe->ptl); if (likely(pte_same(*fe->pte, orig_pte))) { if (old_page) { if (!PageAnon(old_page)) { dec_mm_counter_fast(mm, mm_counter_file(old_page)); inc_mm_counter_fast(mm, MM_ANONPAGES); } } else { inc_mm_counter_fast(mm, MM_ANONPAGES); } flush_cache_page(vma, fe->address, pte_pfn(orig_pte)); entry = mk_pte(new_page, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); /* * Clear the pte entry and flush it first, before updating the * pte with the new entry. This will avoid a race condition * seen in the presence of one thread doing SMC and another * thread doing COW. */ ptep_clear_flush_notify(vma, fe->address, fe->pte); page_add_new_anon_rmap(new_page, vma, fe->address, false); mem_cgroup_commit_charge(new_page, memcg, false, false); lru_cache_add_active_or_unevictable(new_page, vma); /* * We call the notify macro here because, when using secondary * mmu page tables (such as kvm shadow page tables), we want the * new page to be mapped directly into the secondary page table. */ set_pte_at_notify(mm, fe->address, fe->pte, entry); update_mmu_cache(vma, fe->address, fe->pte); if (old_page) { /* * Only after switching the pte to the new page may * we remove the mapcount here. Otherwise another * process may come and find the rmap count decremented * before the pte is switched to the new page, and * "reuse" the old page writing into it while our pte * here still points into it and can be read by other * threads. * * The critical issue is to order this * page_remove_rmap with the ptp_clear_flush above. * Those stores are ordered by (if nothing else,) * the barrier present in the atomic_add_negative * in page_remove_rmap. * * Then the TLB flush in ptep_clear_flush ensures that * no process can access the old page before the * decremented mapcount is visible. And the old page * cannot be reused until after the decremented * mapcount is visible. So transitively, TLBs to * old page will be flushed before it can be reused. */ page_remove_rmap(old_page, false); } /* Free the old page.. */ new_page = old_page; page_copied = 1; } else { mem_cgroup_cancel_charge(new_page, memcg, false); } if (new_page) put_page(new_page); pte_unmap_unlock(fe->pte, fe->ptl); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); if (old_page) { /* * Don't let another task, with possibly unlocked vma, * keep the mlocked page. */ if (page_copied && (vma->vm_flags & VM_LOCKED)) { lock_page(old_page); /* LRU manipulation */ if (PageMlocked(old_page)) munlock_vma_page(old_page); unlock_page(old_page); } put_page(old_page); } return page_copied ? VM_FAULT_WRITE : 0; oom_free_new: put_page(new_page); oom: if (old_page) put_page(old_page); return VM_FAULT_OOM; } /* * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED * mapping */ static int wp_pfn_shared(struct fault_env *fe, pte_t orig_pte) { struct vm_area_struct *vma = fe->vma; if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { struct vm_fault vmf = { .page = NULL, .pgoff = linear_page_index(vma, fe->address), .virtual_address = (void __user *)(fe->address & PAGE_MASK), .flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE, }; int ret; pte_unmap_unlock(fe->pte, fe->ptl); ret = vma->vm_ops->pfn_mkwrite(vma, &vmf); if (ret & VM_FAULT_ERROR) return ret; fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, &fe->ptl); /* * We might have raced with another page fault while we * released the pte_offset_map_lock. */ if (!pte_same(*fe->pte, orig_pte)) { pte_unmap_unlock(fe->pte, fe->ptl); return 0; } } return wp_page_reuse(fe, orig_pte, NULL, 0, 0); } static int wp_page_shared(struct fault_env *fe, pte_t orig_pte, struct page *old_page) __releases(fe->ptl) { struct vm_area_struct *vma = fe->vma; int page_mkwrite = 0; get_page(old_page); if (vma->vm_ops && vma->vm_ops->page_mkwrite) { int tmp; pte_unmap_unlock(fe->pte, fe->ptl); tmp = do_page_mkwrite(vma, old_page, fe->address); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { put_page(old_page); return tmp; } /* * Since we dropped the lock we need to revalidate * the PTE as someone else may have changed it. If * they did, we just return, as we can count on the * MMU to tell us if they didn't also make it writable. */ fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, &fe->ptl); if (!pte_same(*fe->pte, orig_pte)) { unlock_page(old_page); pte_unmap_unlock(fe->pte, fe->ptl); put_page(old_page); return 0; } page_mkwrite = 1; } return wp_page_reuse(fe, orig_pte, old_page, page_mkwrite, 1); } /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address * and decrementing the shared-page counter for the old page. * * Note that this routine assumes that the protection checks have been * done by the caller (the low-level page fault routine in most cases). * Thus we can safely just mark it writable once we've done any necessary * COW. * * We also mark the page dirty at this point even though the page will * change only once the write actually happens. This avoids a few races, * and potentially makes it more efficient. * * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), with pte both mapped and locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ static int do_wp_page(struct fault_env *fe, pte_t orig_pte) __releases(fe->ptl) { struct vm_area_struct *vma = fe->vma; struct page *old_page; old_page = vm_normal_page(vma, fe->address, orig_pte); if (!old_page) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a * VM_PFNMAP VMA. * * We should not cow pages in a shared writeable mapping. * Just mark the pages writable and/or call ops->pfn_mkwrite. */ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) return wp_pfn_shared(fe, orig_pte); pte_unmap_unlock(fe->pte, fe->ptl); return wp_page_copy(fe, orig_pte, old_page); } /* * Take out anonymous pages first, anonymous shared vmas are * not dirty accountable. */ if (PageAnon(old_page) && !PageKsm(old_page)) { int total_mapcount; if (!trylock_page(old_page)) { get_page(old_page); pte_unmap_unlock(fe->pte, fe->ptl); lock_page(old_page); fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, &fe->ptl); if (!pte_same(*fe->pte, orig_pte)) { unlock_page(old_page); pte_unmap_unlock(fe->pte, fe->ptl); put_page(old_page); return 0; } put_page(old_page); } if (reuse_swap_page(old_page, &total_mapcount)) { if (total_mapcount == 1) { /* * The page is all ours. Move it to * our anon_vma so the rmap code will * not search our parent or siblings. * Protected against the rmap code by * the page lock. */ page_move_anon_rmap(old_page, vma); } unlock_page(old_page); return wp_page_reuse(fe, orig_pte, old_page, 0, 0); } unlock_page(old_page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { return wp_page_shared(fe, orig_pte, old_page); } /* * Ok, we need to copy. Oh, well.. */ get_page(old_page); pte_unmap_unlock(fe->pte, fe->ptl); return wp_page_copy(fe, orig_pte, old_page); } static void unmap_mapping_range_vma(struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) { zap_page_range_single(vma, start_addr, end_addr - start_addr, details); } static inline void unmap_mapping_range_tree(struct rb_root *root, struct zap_details *details) { struct vm_area_struct *vma; pgoff_t vba, vea, zba, zea; vma_interval_tree_foreach(vma, root, details->first_index, details->last_index) { vba = vma->vm_pgoff; vea = vba + vma_pages(vma) - 1; zba = details->first_index; if (zba < vba) zba = vba; zea = details->last_index; if (zea > vea) zea = vea; unmap_mapping_range_vma(vma, ((zba - vba) << PAGE_SHIFT) + vma->vm_start, ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, details); } } /** * unmap_mapping_range - unmap the portion of all mmaps in the specified * address_space corresponding to the specified page range in the underlying * file. * * @mapping: the address space containing mmaps to be unmapped. * @holebegin: byte in first page to unmap, relative to the start of * the underlying file. This will be rounded down to a PAGE_SIZE * boundary. Note that this is different from truncate_pagecache(), which * must keep the partial page. In contrast, we must get rid of * partial pages. * @holelen: size of prospective hole in bytes. This will be rounded * up to a PAGE_SIZE boundary. A holelen of zero truncates to the * end of the file. * @even_cows: 1 when truncating a file, unmap even private COWed pages; * but 0 when invalidating pagecache, don't throw away private data. */ void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows) { struct zap_details details = { }; pgoff_t hba = holebegin >> PAGE_SHIFT; pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; /* Check for overflow. */ if (sizeof(holelen) > sizeof(hlen)) { long long holeend = (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; if (holeend & ~(long long)ULONG_MAX) hlen = ULONG_MAX - hba + 1; } details.check_mapping = even_cows? NULL: mapping; details.first_index = hba; details.last_index = hba + hlen - 1; if (details.last_index < details.first_index) details.last_index = ULONG_MAX; i_mmap_lock_write(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) unmap_mapping_range_tree(&mapping->i_mmap, &details); i_mmap_unlock_write(mapping); } EXPORT_SYMBOL(unmap_mapping_range); /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with pte unmapped and unlocked. * * We return with the mmap_sem locked or unlocked in the same cases * as does filemap_fault(). */ int do_swap_page(struct fault_env *fe, pte_t orig_pte) { struct vm_area_struct *vma = fe->vma; struct page *page, *swapcache; struct mem_cgroup *memcg; swp_entry_t entry; pte_t pte; int locked; int exclusive = 0; int ret = 0; if (!pte_unmap_same(vma->vm_mm, fe->pmd, fe->pte, orig_pte)) goto out; entry = pte_to_swp_entry(orig_pte); if (unlikely(non_swap_entry(entry))) { if (is_migration_entry(entry)) { migration_entry_wait(vma->vm_mm, fe->pmd, fe->address); } else if (is_hwpoison_entry(entry)) { ret = VM_FAULT_HWPOISON; } else { print_bad_pte(vma, fe->address, orig_pte, NULL); ret = VM_FAULT_SIGBUS; } goto out; } delayacct_set_flag(DELAYACCT_PF_SWAPIN); page = lookup_swap_cache(entry); if (!page) { page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vma, fe->address); if (!page) { /* * Back out if somebody else faulted in this pte * while we released the pte lock. */ fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, &fe->ptl); if (likely(pte_same(*fe->pte, orig_pte))) ret = VM_FAULT_OOM; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); goto unlock; } /* Had to read the page from swap area: Major fault */ ret = VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); } else if (PageHWPoison(page)) { /* * hwpoisoned dirty swapcache pages are kept for killing * owner processes (which may be unknown at hwpoison time) */ ret = VM_FAULT_HWPOISON; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); swapcache = page; goto out_release; } swapcache = page; locked = lock_page_or_retry(page, vma->vm_mm, fe->flags); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); if (!locked) { ret |= VM_FAULT_RETRY; goto out_release; } /* * Make sure try_to_free_swap or reuse_swap_page or swapoff did not * release the swapcache from under us. The page pin, and pte_same * test below, are not enough to exclude that. Even if it is still * swapcache, we need to check that the page's swap has not changed. */ if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val)) goto out_page; page = ksm_might_need_to_copy(page, vma, fe->address); if (unlikely(!page)) { ret = VM_FAULT_OOM; page = swapcache; goto out_page; } if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) { ret = VM_FAULT_OOM; goto out_page; } /* * Back out if somebody else already faulted in this pte. */ fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, &fe->ptl); if (unlikely(!pte_same(*fe->pte, orig_pte))) goto out_nomap; if (unlikely(!PageUptodate(page))) { ret = VM_FAULT_SIGBUS; goto out_nomap; } /* * The page isn't present yet, go ahead with the fault. * * Be careful about the sequence of operations here. * To get its accounting right, reuse_swap_page() must be called * while the page is counted on swap but not yet in mapcount i.e. * before page_add_anon_rmap() and swap_free(); try_to_free_swap() * must be called after the swap_free(), or it will never succeed. */ inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); pte = mk_pte(page, vma->vm_page_prot); if ((fe->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { pte = maybe_mkwrite(pte_mkdirty(pte), vma); fe->flags &= ~FAULT_FLAG_WRITE; ret |= VM_FAULT_WRITE; exclusive = RMAP_EXCLUSIVE; } flush_icache_page(vma, page); if (pte_swp_soft_dirty(orig_pte)) pte = pte_mksoft_dirty(pte); set_pte_at(vma->vm_mm, fe->address, fe->pte, pte); if (page == swapcache) { do_page_add_anon_rmap(page, vma, fe->address, exclusive); mem_cgroup_commit_charge(page, memcg, true, false); } else { /* ksm created a completely new copy */ page_add_new_anon_rmap(page, vma, fe->address, false); mem_cgroup_commit_charge(page, memcg, false, false); lru_cache_add_active_or_unevictable(page, vma); } swap_free(entry); if (mem_cgroup_swap_full(page) || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) try_to_free_swap(page); unlock_page(page); if (page != swapcache) { /* * Hold the lock to avoid the swap entry to be reused * until we take the PT lock for the pte_same() check * (to avoid false positives from pte_same). For * further safety release the lock after the swap_free * so that the swap count won't change under a * parallel locked swapcache. */ unlock_page(swapcache); put_page(swapcache); } if (fe->flags & FAULT_FLAG_WRITE) { ret |= do_wp_page(fe, pte); if (ret & VM_FAULT_ERROR) ret &= VM_FAULT_ERROR; goto out; } /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, fe->address, fe->pte); unlock: pte_unmap_unlock(fe->pte, fe->ptl); out: return ret; out_nomap: mem_cgroup_cancel_charge(page, memcg, false); pte_unmap_unlock(fe->pte, fe->ptl); out_page: unlock_page(page); out_release: put_page(page); if (page != swapcache) { unlock_page(swapcache); put_page(swapcache); } return ret; } /* * This is like a special single-page "expand_{down|up}wards()", * except we must first make sure that 'address{-|+}PAGE_SIZE' * doesn't hit another vma. */ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) { address &= PAGE_MASK; if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { struct vm_area_struct *prev = vma->vm_prev; /* * Is there a mapping abutting this one below? * * That's only ok if it's the same stack mapping * that has gotten split.. */ if (prev && prev->vm_end == address) return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; return expand_downwards(vma, address - PAGE_SIZE); } if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { struct vm_area_struct *next = vma->vm_next; /* As VM_GROWSDOWN but s/below/above/ */ if (next && next->vm_start == address + PAGE_SIZE) return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; return expand_upwards(vma, address + PAGE_SIZE); } return 0; } /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ static int do_anonymous_page(struct fault_env *fe) { struct vm_area_struct *vma = fe->vma; struct mem_cgroup *memcg; struct page *page; pte_t entry; /* File mapping without ->vm_ops ? */ if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; /* Check if we need to add a guard page to the stack */ if (check_stack_guard_page(vma, fe->address) < 0) return VM_FAULT_SIGSEGV; /* * Use pte_alloc() instead of pte_alloc_map(). We can't run * pte_offset_map() on pmds where a huge pmd might be created * from a different thread. * * pte_alloc_map() is safe to use under down_write(mmap_sem) or when * parallel threads are excluded by other means. * * Here we only have down_read(mmap_sem). */ if (pte_alloc(vma->vm_mm, fe->pmd, fe->address)) return VM_FAULT_OOM; /* See the comment in pte_alloc_one_map() */ if (unlikely(pmd_trans_unstable(fe->pmd))) return 0; /* Use the zero-page for reads */ if (!(fe->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(fe->address), vma->vm_page_prot)); fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, &fe->ptl); if (!pte_none(*fe->pte)) goto unlock; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { pte_unmap_unlock(fe->pte, fe->ptl); return handle_userfault(fe, VM_UFFD_MISSING); } goto setpte; } /* Allocate our own private page. */ if (unlikely(anon_vma_prepare(vma))) goto oom; page = alloc_zeroed_user_highpage_movable(vma, fe->address); if (!page) goto oom; if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) goto oom_free_page; /* * The memory barrier inside __SetPageUptodate makes sure that * preceeding stores to the page contents become visible before * the set_pte_at() write. */ __SetPageUptodate(page); entry = mk_pte(page, vma->vm_page_prot); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, &fe->ptl); if (!pte_none(*fe->pte)) goto release; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { pte_unmap_unlock(fe->pte, fe->ptl); mem_cgroup_cancel_charge(page, memcg, false); put_page(page); return handle_userfault(fe, VM_UFFD_MISSING); } inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, fe->address, false); mem_cgroup_commit_charge(page, memcg, false, false); lru_cache_add_active_or_unevictable(page, vma); setpte: set_pte_at(vma->vm_mm, fe->address, fe->pte, entry); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, fe->address, fe->pte); unlock: pte_unmap_unlock(fe->pte, fe->ptl); return 0; release: mem_cgroup_cancel_charge(page, memcg, false); put_page(page); goto unlock; oom_free_page: put_page(page); oom: return VM_FAULT_OOM; } /* * The mmap_sem must have been held on entry, and may have been * released depending on flags and vma->vm_ops->fault() return value. * See filemap_fault() and __lock_page_retry(). */ static int __do_fault(struct fault_env *fe, pgoff_t pgoff, struct page *cow_page, struct page **page, void **entry) { struct vm_area_struct *vma = fe->vma; struct vm_fault vmf; int ret; vmf.virtual_address = (void __user *)(fe->address & PAGE_MASK); vmf.pgoff = pgoff; vmf.flags = fe->flags; vmf.page = NULL; vmf.gfp_mask = __get_fault_gfp_mask(vma); vmf.cow_page = cow_page; ret = vma->vm_ops->fault(vma, &vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; if (ret & VM_FAULT_DAX_LOCKED) { *entry = vmf.entry; return ret; } if (unlikely(PageHWPoison(vmf.page))) { if (ret & VM_FAULT_LOCKED) unlock_page(vmf.page); put_page(vmf.page); return VM_FAULT_HWPOISON; } if (unlikely(!(ret & VM_FAULT_LOCKED))) lock_page(vmf.page); else VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); *page = vmf.page; return ret; } static int pte_alloc_one_map(struct fault_env *fe) { struct vm_area_struct *vma = fe->vma; if (!pmd_none(*fe->pmd)) goto map_pte; if (fe->prealloc_pte) { fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); if (unlikely(!pmd_none(*fe->pmd))) { spin_unlock(fe->ptl); goto map_pte; } atomic_long_inc(&vma->vm_mm->nr_ptes); pmd_populate(vma->vm_mm, fe->pmd, fe->prealloc_pte); spin_unlock(fe->ptl); fe->prealloc_pte = 0; } else if (unlikely(pte_alloc(vma->vm_mm, fe->pmd, fe->address))) { return VM_FAULT_OOM; } map_pte: /* * If a huge pmd materialized under us just retry later. Use * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd * didn't become pmd_trans_huge under us and then back to pmd_none, as * a result of MADV_DONTNEED running immediately after a huge pmd fault * in a different thread of this mm, in turn leading to a misleading * pmd_trans_huge() retval. All we have to ensure is that it is a * regular pmd that we can walk with pte_offset_map() and we can do that * through an atomic read in C, which is what pmd_trans_unstable() * provides. */ if (pmd_trans_unstable(fe->pmd) || pmd_devmap(*fe->pmd)) return VM_FAULT_NOPAGE; fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, &fe->ptl); return 0; } #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, unsigned long haddr) { if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) return false; if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) return false; return true; } static int do_set_pmd(struct fault_env *fe, struct page *page) { struct vm_area_struct *vma = fe->vma; bool write = fe->flags & FAULT_FLAG_WRITE; unsigned long haddr = fe->address & HPAGE_PMD_MASK; pmd_t entry; int i, ret; if (!transhuge_vma_suitable(vma, haddr)) return VM_FAULT_FALLBACK; ret = VM_FAULT_FALLBACK; page = compound_head(page); fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); if (unlikely(!pmd_none(*fe->pmd))) goto out; for (i = 0; i < HPAGE_PMD_NR; i++) flush_icache_page(vma, page + i); entry = mk_huge_pmd(page, vma->vm_page_prot); if (write) entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR); page_add_file_rmap(page, true); set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); update_mmu_cache_pmd(vma, haddr, fe->pmd); /* fault is handled */ ret = 0; count_vm_event(THP_FILE_MAPPED); out: spin_unlock(fe->ptl); return ret; } #else static int do_set_pmd(struct fault_env *fe, struct page *page) { BUILD_BUG(); return 0; } #endif /** * alloc_set_pte - setup new PTE entry for given page and add reverse page * mapping. If needed, the fucntion allocates page table or use pre-allocated. * * @fe: fault environment * @memcg: memcg to charge page (only for private mappings) * @page: page to map * * Caller must take care of unlocking fe->ptl, if fe->pte is non-NULL on return. * * Target users are page handler itself and implementations of * vm_ops->map_pages. */ int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, struct page *page) { struct vm_area_struct *vma = fe->vma; bool write = fe->flags & FAULT_FLAG_WRITE; pte_t entry; int ret; if (pmd_none(*fe->pmd) && PageTransCompound(page) && IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { /* THP on COW? */ VM_BUG_ON_PAGE(memcg, page); ret = do_set_pmd(fe, page); if (ret != VM_FAULT_FALLBACK) return ret; } if (!fe->pte) { ret = pte_alloc_one_map(fe); if (ret) return ret; } /* Re-check under ptl */ if (unlikely(!pte_none(*fe->pte))) return VM_FAULT_NOPAGE; flush_icache_page(vma, page); entry = mk_pte(page, vma->vm_page_prot); if (write) entry = maybe_mkwrite(pte_mkdirty(entry), vma); /* copy-on-write page */ if (write && !(vma->vm_flags & VM_SHARED)) { inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, fe->address, false); mem_cgroup_commit_charge(page, memcg, false, false); lru_cache_add_active_or_unevictable(page, vma); } else { inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); page_add_file_rmap(page, false); } set_pte_at(vma->vm_mm, fe->address, fe->pte, entry); /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache(vma, fe->address, fe->pte); return 0; } static unsigned long fault_around_bytes __read_mostly = rounddown_pow_of_two(65536); #ifdef CONFIG_DEBUG_FS static int fault_around_bytes_get(void *data, u64 *val) { *val = fault_around_bytes; return 0; } /* * fault_around_pages() and fault_around_mask() expects fault_around_bytes * rounded down to nearest page order. It's what do_fault_around() expects to * see. */ static int fault_around_bytes_set(void *data, u64 val) { if (val / PAGE_SIZE > PTRS_PER_PTE) return -EINVAL; if (val > PAGE_SIZE) fault_around_bytes = rounddown_pow_of_two(val); else fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */ return 0; } DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops, fault_around_bytes_get, fault_around_bytes_set, "%llu\n"); static int __init fault_around_debugfs(void) { void *ret; ret = debugfs_create_file("fault_around_bytes", 0644, NULL, NULL, &fault_around_bytes_fops); if (!ret) pr_warn("Failed to create fault_around_bytes in debugfs"); return 0; } late_initcall(fault_around_debugfs); #endif /* * do_fault_around() tries to map few pages around the fault address. The hope * is that the pages will be needed soon and this will lower the number of * faults to handle. * * It uses vm_ops->map_pages() to map the pages, which skips the page if it's * not ready to be mapped: not up-to-date, locked, etc. * * This function is called with the page table lock taken. In the split ptlock * case the page table lock only protects only those entries which belong to * the page table corresponding to the fault address. * * This function doesn't cross the VMA boundaries, in order to call map_pages() * only once. * * fault_around_pages() defines how many pages we'll try to map. * do_fault_around() expects it to return a power of two less than or equal to * PTRS_PER_PTE. * * The virtual address of the area that we map is naturally aligned to the * fault_around_pages() value (and therefore to page order). This way it's * easier to guarantee that we don't cross page table boundaries. */ static int do_fault_around(struct fault_env *fe, pgoff_t start_pgoff) { unsigned long address = fe->address, nr_pages, mask; pgoff_t end_pgoff; int off, ret = 0; nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; fe->address = max(address & mask, fe->vma->vm_start); off = ((address - fe->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); start_pgoff -= off; /* * end_pgoff is either end of page table or end of vma * or fault_around_pages() from start_pgoff, depending what is nearest. */ end_pgoff = start_pgoff - ((fe->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + PTRS_PER_PTE - 1; end_pgoff = min3(end_pgoff, vma_pages(fe->vma) + fe->vma->vm_pgoff - 1, start_pgoff + nr_pages - 1); if (pmd_none(*fe->pmd)) { fe->prealloc_pte = pte_alloc_one(fe->vma->vm_mm, fe->address); smp_wmb(); /* See comment in __pte_alloc() */ } fe->vma->vm_ops->map_pages(fe, start_pgoff, end_pgoff); /* preallocated pagetable is unused: free it */ if (fe->prealloc_pte) { pte_free(fe->vma->vm_mm, fe->prealloc_pte); fe->prealloc_pte = 0; } /* Huge page is mapped? Page fault is solved */ if (pmd_trans_huge(*fe->pmd)) { ret = VM_FAULT_NOPAGE; goto out; } /* ->map_pages() haven't done anything useful. Cold page cache? */ if (!fe->pte) goto out; /* check if the page fault is solved */ fe->pte -= (fe->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT); if (!pte_none(*fe->pte)) ret = VM_FAULT_NOPAGE; pte_unmap_unlock(fe->pte, fe->ptl); out: fe->address = address; fe->pte = NULL; return ret; } static int do_read_fault(struct fault_env *fe, pgoff_t pgoff) { struct vm_area_struct *vma = fe->vma; struct page *fault_page; int ret = 0; /* * Let's call ->map_pages() first and use ->fault() as fallback * if page by the offset is not ready to be mapped (cold cache or * something). */ if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { ret = do_fault_around(fe, pgoff); if (ret) return ret; } ret = __do_fault(fe, pgoff, NULL, &fault_page, NULL); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; ret |= alloc_set_pte(fe, NULL, fault_page); if (fe->pte) pte_unmap_unlock(fe->pte, fe->ptl); unlock_page(fault_page); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) put_page(fault_page); return ret; } static int do_cow_fault(struct fault_env *fe, pgoff_t pgoff) { struct vm_area_struct *vma = fe->vma; struct page *fault_page, *new_page; void *fault_entry; struct mem_cgroup *memcg; int ret; if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, fe->address); if (!new_page) return VM_FAULT_OOM; if (mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg, false)) { put_page(new_page); return VM_FAULT_OOM; } ret = __do_fault(fe, pgoff, new_page, &fault_page, &fault_entry); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; if (!(ret & VM_FAULT_DAX_LOCKED)) copy_user_highpage(new_page, fault_page, fe->address, vma); __SetPageUptodate(new_page); ret |= alloc_set_pte(fe, memcg, new_page); if (fe->pte) pte_unmap_unlock(fe->pte, fe->ptl); if (!(ret & VM_FAULT_DAX_LOCKED)) { unlock_page(fault_page); put_page(fault_page); } else { dax_unlock_mapping_entry(vma->vm_file->f_mapping, pgoff); } if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; return ret; uncharge_out: mem_cgroup_cancel_charge(new_page, memcg, false); put_page(new_page); return ret; } static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff) { struct vm_area_struct *vma = fe->vma; struct page *fault_page; struct address_space *mapping; int dirtied = 0; int ret, tmp; ret = __do_fault(fe, pgoff, NULL, &fault_page, NULL); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; /* * Check if the backing address space wants to know that the page is * about to become writable */ if (vma->vm_ops->page_mkwrite) { unlock_page(fault_page); tmp = do_page_mkwrite(vma, fault_page, fe->address); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { put_page(fault_page); return tmp; } } ret |= alloc_set_pte(fe, NULL, fault_page); if (fe->pte) pte_unmap_unlock(fe->pte, fe->ptl); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) { unlock_page(fault_page); put_page(fault_page); return ret; } if (set_page_dirty(fault_page)) dirtied = 1; /* * Take a local copy of the address_space - page.mapping may be zeroed * by truncate after unlock_page(). The address_space itself remains * pinned by vma->vm_file's reference. We rely on unlock_page()'s * release semantics to prevent the compiler from undoing this copying. */ mapping = page_rmapping(fault_page); unlock_page(fault_page); if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { /* * Some device drivers do not set page.mapping but still * dirty their pages */ balance_dirty_pages_ratelimited(mapping); } if (!vma->vm_ops->page_mkwrite) file_update_time(vma->vm_file); return ret; } /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults). * The mmap_sem may have been released depending on flags and our * return value. See filemap_fault() and __lock_page_or_retry(). */ static int do_fault(struct fault_env *fe) { struct vm_area_struct *vma = fe->vma; pgoff_t pgoff = linear_page_index(vma, fe->address); /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ if (!vma->vm_ops->fault) return VM_FAULT_SIGBUS; if (!(fe->flags & FAULT_FLAG_WRITE)) return do_read_fault(fe, pgoff); if (!(vma->vm_flags & VM_SHARED)) return do_cow_fault(fe, pgoff); return do_shared_fault(fe, pgoff); } static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags) { get_page(page); count_vm_numa_event(NUMA_HINT_FAULTS); if (page_nid == numa_node_id()) { count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); *flags |= TNF_FAULT_LOCAL; } return mpol_misplaced(page, vma, addr); } static int do_numa_page(struct fault_env *fe, pte_t pte) { struct vm_area_struct *vma = fe->vma; struct page *page = NULL; int page_nid = -1; int last_cpupid; int target_nid; bool migrated = false; bool was_writable = pte_write(pte); int flags = 0; /* A PROT_NONE fault should not end up here */ BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); /* * The "pte" at this point cannot be used safely without * validation through pte_unmap_same(). It's of NUMA type but * the pfn may be screwed if the read is non atomic. * * We can safely just do a "set_pte_at()", because the old * page table entry is not accessible, so there would be no * concurrent hardware modifications to the PTE. */ fe->ptl = pte_lockptr(vma->vm_mm, fe->pmd); spin_lock(fe->ptl); if (unlikely(!pte_same(*fe->pte, pte))) { pte_unmap_unlock(fe->pte, fe->ptl); goto out; } /* Make it present again */ pte = pte_modify(pte, vma->vm_page_prot); pte = pte_mkyoung(pte); if (was_writable) pte = pte_mkwrite(pte); set_pte_at(vma->vm_mm, fe->address, fe->pte, pte); update_mmu_cache(vma, fe->address, fe->pte); page = vm_normal_page(vma, fe->address, pte); if (!page) { pte_unmap_unlock(fe->pte, fe->ptl); return 0; } /* TODO: handle PTE-mapped THP */ if (PageCompound(page)) { pte_unmap_unlock(fe->pte, fe->ptl); return 0; } /* * Avoid grouping on RO pages in general. RO pages shouldn't hurt as * much anyway since they can be in shared cache state. This misses * the case where a mapping is writable but the process never writes * to it but pte_write gets cleared during protection updates and * pte_dirty has unpredictable behaviour between PTE scan updates, * background writeback, dirty balancing and application behaviour. */ if (!(vma->vm_flags & VM_WRITE)) flags |= TNF_NO_GROUP; /* * Flag if the page is shared between multiple address spaces. This * is later used when determining whether to group tasks together */ if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) flags |= TNF_SHARED; last_cpupid = page_cpupid_last(page); page_nid = page_to_nid(page); target_nid = numa_migrate_prep(page, vma, fe->address, page_nid, &flags); pte_unmap_unlock(fe->pte, fe->ptl); if (target_nid == -1) { put_page(page); goto out; } /* Migrate to the requested node */ migrated = migrate_misplaced_page(page, vma, target_nid); if (migrated) { page_nid = target_nid; flags |= TNF_MIGRATED; } else flags |= TNF_MIGRATE_FAIL; out: if (page_nid != -1) task_numa_fault(last_cpupid, page_nid, 1, flags); return 0; } static int create_huge_pmd(struct fault_env *fe) { struct vm_area_struct *vma = fe->vma; if (vma_is_anonymous(vma)) return do_huge_pmd_anonymous_page(fe); if (vma->vm_ops->pmd_fault) return vma->vm_ops->pmd_fault(vma, fe->address, fe->pmd, fe->flags); return VM_FAULT_FALLBACK; } static int wp_huge_pmd(struct fault_env *fe, pmd_t orig_pmd) { if (vma_is_anonymous(fe->vma)) return do_huge_pmd_wp_page(fe, orig_pmd); if (fe->vma->vm_ops->pmd_fault) return fe->vma->vm_ops->pmd_fault(fe->vma, fe->address, fe->pmd, fe->flags); /* COW handled on pte level: split pmd */ VM_BUG_ON_VMA(fe->vma->vm_flags & VM_SHARED, fe->vma); split_huge_pmd(fe->vma, fe->pmd, fe->address); return VM_FAULT_FALLBACK; } /* * These routines also need to handle stuff like marking pages dirty * and/or accessed for architectures that don't do it in hardware (most * RISC architectures). The early dirtying is also good on the i386. * * There is also a hook called "update_mmu_cache()" that architectures * with external mmu caches can use to update those (ie the Sparc or * PowerPC hashed page tables that act as extended TLBs). * * We enter with non-exclusive mmap_sem (to exclude vma changes, but allow * concurrent faults). * * The mmap_sem may have been released depending on flags and our return value. * See filemap_fault() and __lock_page_or_retry(). */ static int handle_pte_fault(struct fault_env *fe) { pte_t entry; if (unlikely(pmd_none(*fe->pmd))) { /* * Leave __pte_alloc() until later: because vm_ops->fault may * want to allocate huge page, and if we expose page table * for an instant, it will be difficult to retract from * concurrent faults and from rmap lookups. */ fe->pte = NULL; } else { /* See comment in pte_alloc_one_map() */ if (pmd_trans_unstable(fe->pmd) || pmd_devmap(*fe->pmd)) return 0; /* * A regular pmd is established and it can't morph into a huge * pmd from under us anymore at this point because we hold the * mmap_sem read mode and khugepaged takes it in write mode. * So now it's safe to run pte_offset_map(). */ fe->pte = pte_offset_map(fe->pmd, fe->address); entry = *fe->pte; /* * some architectures can have larger ptes than wordsize, * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and * CONFIG_32BIT=y, so READ_ONCE or ACCESS_ONCE cannot guarantee * atomic accesses. The code below just needs a consistent * view for the ifs and we later double check anyway with the * ptl lock held. So here a barrier will do. */ barrier(); if (pte_none(entry)) { pte_unmap(fe->pte); fe->pte = NULL; } } if (!fe->pte) { if (vma_is_anonymous(fe->vma)) return do_anonymous_page(fe); else return do_fault(fe); } if (!pte_present(entry)) return do_swap_page(fe, entry); if (pte_protnone(entry)) return do_numa_page(fe, entry); fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd); spin_lock(fe->ptl); if (unlikely(!pte_same(*fe->pte, entry))) goto unlock; if (fe->flags & FAULT_FLAG_WRITE) { if (!pte_write(entry)) return do_wp_page(fe, entry); entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); if (ptep_set_access_flags(fe->vma, fe->address, fe->pte, entry, fe->flags & FAULT_FLAG_WRITE)) { update_mmu_cache(fe->vma, fe->address, fe->pte); } else { /* * This is needed only for protection faults but the arch code * is not yet telling us if this is a protection fault or not. * This still avoids useless tlb flushes for .text page faults * with threads. */ if (fe->flags & FAULT_FLAG_WRITE) flush_tlb_fix_spurious_fault(fe->vma, fe->address); } unlock: pte_unmap_unlock(fe->pte, fe->ptl); return 0; } /* * By the time we get here, we already hold the mm semaphore * * The mmap_sem may have been released depending on flags and our * return value. See filemap_fault() and __lock_page_or_retry(). */ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags) { struct fault_env fe = { .vma = vma, .address = address, .flags = flags, }; struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; pud_t *pud; pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); if (!pud) return VM_FAULT_OOM; fe.pmd = pmd_alloc(mm, pud, address); if (!fe.pmd) return VM_FAULT_OOM; if (pmd_none(*fe.pmd) && transparent_hugepage_enabled(vma)) { int ret = create_huge_pmd(&fe); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { pmd_t orig_pmd = *fe.pmd; int ret; barrier(); if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { if (pmd_protnone(orig_pmd)) return do_huge_pmd_numa_page(&fe, orig_pmd); if ((fe.flags & FAULT_FLAG_WRITE) && !pmd_write(orig_pmd)) { ret = wp_huge_pmd(&fe, orig_pmd); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { huge_pmd_set_accessed(&fe, orig_pmd); return 0; } } } return handle_pte_fault(&fe); } /* * By the time we get here, we already hold the mm semaphore * * The mmap_sem may have been released depending on flags and our * return value. See filemap_fault() and __lock_page_or_retry(). */ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags) { int ret; __set_current_state(TASK_RUNNING); count_vm_event(PGFAULT); mem_cgroup_count_vm_event(vma->vm_mm, PGFAULT); /* do counter updates before entering really critical section. */ check_sync_rss_stat(current); /* * Enable the memcg OOM handling for faults triggered in user * space. Kernel faults are handled more gracefully. */ if (flags & FAULT_FLAG_USER) mem_cgroup_oom_enable(); if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, flags & FAULT_FLAG_INSTRUCTION, flags & FAULT_FLAG_REMOTE)) return VM_FAULT_SIGSEGV; if (unlikely(is_vm_hugetlb_page(vma))) ret = hugetlb_fault(vma->vm_mm, vma, address, flags); else ret = __handle_mm_fault(vma, address, flags); if (flags & FAULT_FLAG_USER) { mem_cgroup_oom_disable(); /* * The task may have entered a memcg OOM situation but * if the allocation error was handled gracefully (no * VM_FAULT_OOM), there is no need to kill anything. * Just clean up the OOM state peacefully. */ if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) mem_cgroup_oom_synchronize(false); } return ret; } EXPORT_SYMBOL_GPL(handle_mm_fault); #ifndef __PAGETABLE_PUD_FOLDED /* * Allocate page upper directory. * We've already handled the fast-path in-line. */ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { pud_t *new = pud_alloc_one(mm, address); if (!new) return -ENOMEM; smp_wmb(); /* See comment in __pte_alloc */ spin_lock(&mm->page_table_lock); if (pgd_present(*pgd)) /* Another has populated it */ pud_free(mm, new); else pgd_populate(mm, pgd, new); spin_unlock(&mm->page_table_lock); return 0; } #endif /* __PAGETABLE_PUD_FOLDED */ #ifndef __PAGETABLE_PMD_FOLDED /* * Allocate page middle directory. * We've already handled the fast-path in-line. */ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { pmd_t *new = pmd_alloc_one(mm, address); if (!new) return -ENOMEM; smp_wmb(); /* See comment in __pte_alloc */ spin_lock(&mm->page_table_lock); #ifndef __ARCH_HAS_4LEVEL_HACK if (!pud_present(*pud)) { mm_inc_nr_pmds(mm); pud_populate(mm, pud, new); } else /* Another has populated it */ pmd_free(mm, new); #else if (!pgd_present(*pud)) { mm_inc_nr_pmds(mm); pgd_populate(mm, pud, new); } else /* Another has populated it */ pmd_free(mm, new); #endif /* __ARCH_HAS_4LEVEL_HACK */ spin_unlock(&mm->page_table_lock); return 0; } #endif /* __PAGETABLE_PMD_FOLDED */ static int __follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *ptep; pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) goto out; pud = pud_offset(pgd, address); if (pud_none(*pud) || unlikely(pud_bad(*pud))) goto out; pmd = pmd_offset(pud, address); VM_BUG_ON(pmd_trans_huge(*pmd)); if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) goto out; /* We cannot handle huge page PFN maps. Luckily they don't exist. */ if (pmd_huge(*pmd)) goto out; ptep = pte_offset_map_lock(mm, pmd, address, ptlp); if (!ptep) goto out; if (!pte_present(*ptep)) goto unlock; *ptepp = ptep; return 0; unlock: pte_unmap_unlock(ptep, *ptlp); out: return -EINVAL; } static inline int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp) { int res; /* (void) is needed to make gcc happy */ (void) __cond_lock(*ptlp, !(res = __follow_pte(mm, address, ptepp, ptlp))); return res; } /** * follow_pfn - look up PFN at a user virtual address * @vma: memory mapping * @address: user virtual address * @pfn: location to store found PFN * * Only IO mappings and raw PFN mappings are allowed. * * Returns zero and the pfn at @pfn on success, -ve otherwise. */ int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn) { int ret = -EINVAL; spinlock_t *ptl; pte_t *ptep; if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) return ret; ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); if (ret) return ret; *pfn = pte_pfn(*ptep); pte_unmap_unlock(ptep, ptl); return 0; } EXPORT_SYMBOL(follow_pfn); #ifdef CONFIG_HAVE_IOREMAP_PROT int follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot, resource_size_t *phys) { int ret = -EINVAL; pte_t *ptep, pte; spinlock_t *ptl; if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) goto out; if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) goto out; pte = *ptep; if ((flags & FOLL_WRITE) && !pte_write(pte)) goto unlock; *prot = pgprot_val(pte_pgprot(pte)); *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; ret = 0; unlock: pte_unmap_unlock(ptep, ptl); out: return ret; } int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { resource_size_t phys_addr; unsigned long prot = 0; void __iomem *maddr; int offset = addr & (PAGE_SIZE-1); if (follow_phys(vma, addr, write, &prot, &phys_addr)) return -EINVAL; maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); if (write) memcpy_toio(maddr + offset, buf, len); else memcpy_fromio(buf, maddr + offset, len); iounmap(maddr); return len; } EXPORT_SYMBOL_GPL(generic_access_phys); #endif /* * Access another process' address space as given in mm. If non-NULL, use the * given task for page fault accounting. */ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) { struct vm_area_struct *vma; void *old_buf = buf; down_read(&mm->mmap_sem); /* ignore errors, just check how much was successfully transferred */ while (len) { int bytes, ret, offset; void *maddr; struct page *page = NULL; ret = get_user_pages_remote(tsk, mm, addr, 1, write, 1, &page, &vma); if (ret <= 0) { #ifndef CONFIG_HAVE_IOREMAP_PROT break; #else /* * Check if this is a VM_IO | VM_PFNMAP VMA, which * we can access using slightly different code. */ vma = find_vma(mm, addr); if (!vma || vma->vm_start > addr) break; if (vma->vm_ops && vma->vm_ops->access) ret = vma->vm_ops->access(vma, addr, buf, len, write); if (ret <= 0) break; bytes = ret; #endif } else { bytes = len; offset = addr & (PAGE_SIZE-1); if (bytes > PAGE_SIZE-offset) bytes = PAGE_SIZE-offset; maddr = kmap(page); if (write) { copy_to_user_page(vma, page, addr, maddr + offset, buf, bytes); set_page_dirty_lock(page); } else { copy_from_user_page(vma, page, addr, buf, maddr + offset, bytes); } kunmap(page); put_page(page); } len -= bytes; buf += bytes; addr += bytes; } up_read(&mm->mmap_sem); return buf - old_buf; } /** * access_remote_vm - access another process' address space * @mm: the mm_struct of the target address space * @addr: start address to access * @buf: source or destination buffer * @len: number of bytes to transfer * @write: whether the access is a write * * The caller must hold a reference on @mm. */ int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) { return __access_remote_vm(NULL, mm, addr, buf, len, write); } /* * Access another process' address space. * Source/target buffer must be kernel space, * Do not walk the page table directly, use get_user_pages */ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) { struct mm_struct *mm; int ret; mm = get_task_mm(tsk); if (!mm) return 0; ret = __access_remote_vm(tsk, mm, addr, buf, len, write); mmput(mm); return ret; } /* * Print the name of a VMA. */ void print_vma_addr(char *prefix, unsigned long ip) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; /* * Do not print if we are in atomic * contexts (in exception stacks, etc.): */ if (preempt_count()) return; down_read(&mm->mmap_sem); vma = find_vma(mm, ip); if (vma && vma->vm_file) { struct file *f = vma->vm_file; char *buf = (char *)__get_free_page(GFP_KERNEL); if (buf) { char *p; p = file_path(f, buf, PAGE_SIZE); if (IS_ERR(p)) p = "?"; printk("%s%s[%lx+%lx]", prefix, kbasename(p), vma->vm_start, vma->vm_end - vma->vm_start); free_page((unsigned long)buf); } } up_read(&mm->mmap_sem); } #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) void __might_fault(const char *file, int line) { /* * Some code (nfs/sunrpc) uses socket ops on kernel memory while * holding the mmap_sem, this is safe because kernel memory doesn't * get paged out, therefore we'll never actually fault, and the * below annotations will generate false positives. */ if (segment_eq(get_fs(), KERNEL_DS)) return; if (pagefault_disabled()) return; __might_sleep(file, line, 0); #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) if (current->mm) might_lock_read(&current->mm->mmap_sem); #endif } EXPORT_SYMBOL(__might_fault); #endif #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) static void clear_gigantic_page(struct page *page, unsigned long addr, unsigned int pages_per_huge_page) { int i; struct page *p = page; might_sleep(); for (i = 0; i < pages_per_huge_page; i++, p = mem_map_next(p, page, i)) { cond_resched(); clear_user_highpage(p, addr + i * PAGE_SIZE); } } void clear_huge_page(struct page *page, unsigned long addr, unsigned int pages_per_huge_page) { int i; if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { clear_gigantic_page(page, addr, pages_per_huge_page); return; } might_sleep(); for (i = 0; i < pages_per_huge_page; i++) { cond_resched(); clear_user_highpage(page + i, addr + i * PAGE_SIZE); } } static void copy_user_gigantic_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) { int i; struct page *dst_base = dst; struct page *src_base = src; for (i = 0; i < pages_per_huge_page; ) { cond_resched(); copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); i++; dst = mem_map_next(dst, dst_base, i); src = mem_map_next(src, src_base, i); } } void copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) { int i; if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { copy_user_gigantic_page(dst, src, addr, vma, pages_per_huge_page); return; } might_sleep(); for (i = 0; i < pages_per_huge_page; i++) { cond_resched(); copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); } } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS static struct kmem_cache *page_ptl_cachep; void __init ptlock_cache_init(void) { page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, SLAB_PANIC, NULL); } bool ptlock_alloc(struct page *page) { spinlock_t *ptl; ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); if (!ptl) return false; page->ptl = ptl; return true; } void ptlock_free(struct page *page) { kmem_cache_free(page_ptl_cachep, page->ptl); } #endif
gpl-2.0
andronmobi/vlc-parrot-asteroid
src/input/control.c
4
19375
/***************************************************************************** * control.c ***************************************************************************** * Copyright (C) 1999-2004 VLC authors and VideoLAN * $Id$ * * Authors: Gildas Bazin <gbazin@videolan.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include <vlc_common.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "input_internal.h" #include "event.h" #include "resource.h" #include "es_out.h" static void UpdateBookmarksOption( input_thread_t * ); /**************************************************************************** * input_Control ****************************************************************************/ /** * Control function for inputs. * \param p_input input handle * \param i_query query type * \return VLC_SUCCESS if ok */ int input_Control( input_thread_t *p_input, int i_query, ... ) { va_list args; int i_result; va_start( args, i_query ); i_result = input_vaControl( p_input, i_query, args ); va_end( args ); return i_result; } int input_vaControl( input_thread_t *p_input, int i_query, va_list args ) { seekpoint_t *p_bkmk, ***ppp_bkmk; int i_bkmk = 0; int *pi_bkmk; int i_int, *pi_int; bool b_bool, *pb_bool; double f, *pf; int64_t i_64, *pi_64; char *psz; vlc_value_t val; switch( i_query ) { case INPUT_GET_POSITION: pf = (double*)va_arg( args, double * ); *pf = var_GetFloat( p_input, "position" ); return VLC_SUCCESS; case INPUT_SET_POSITION: f = (double)va_arg( args, double ); return var_SetFloat( p_input, "position", f ); case INPUT_GET_LENGTH: pi_64 = (int64_t*)va_arg( args, int64_t * ); *pi_64 = var_GetTime( p_input, "length" ); return VLC_SUCCESS; case INPUT_GET_TIME: pi_64 = (int64_t*)va_arg( args, int64_t * ); *pi_64 = var_GetTime( p_input, "time" ); return VLC_SUCCESS; case INPUT_SET_TIME: i_64 = (int64_t)va_arg( args, int64_t ); return var_SetTime( p_input, "time", i_64 ); case INPUT_GET_RATE: pi_int = (int*)va_arg( args, int * ); *pi_int = INPUT_RATE_DEFAULT / var_GetFloat( p_input, "rate" ); return VLC_SUCCESS; case INPUT_SET_RATE: i_int = (int)va_arg( args, int ); return var_SetFloat( p_input, "rate", (float)INPUT_RATE_DEFAULT / (float)i_int ); case INPUT_GET_STATE: pi_int = (int*)va_arg( args, int * ); *pi_int = var_GetInteger( p_input, "state" ); return VLC_SUCCESS; case INPUT_SET_STATE: i_int = (int)va_arg( args, int ); return var_SetInteger( p_input, "state", i_int ); case INPUT_GET_AUDIO_DELAY: pi_64 = (int64_t*)va_arg( args, int64_t * ); *pi_64 = var_GetTime( p_input, "audio-delay" ); return VLC_SUCCESS; case INPUT_GET_SPU_DELAY: pi_64 = (int64_t*)va_arg( args, int64_t * ); *pi_64 = var_GetTime( p_input, "spu-delay" ); return VLC_SUCCESS; case INPUT_SET_AUDIO_DELAY: i_64 = (int64_t)va_arg( args, int64_t ); return var_SetTime( p_input, "audio-delay", i_64 ); case INPUT_SET_SPU_DELAY: i_64 = (int64_t)va_arg( args, int64_t ); return var_SetTime( p_input, "spu-delay", i_64 ); case INPUT_NAV_ACTIVATE: case INPUT_NAV_UP: case INPUT_NAV_DOWN: case INPUT_NAV_LEFT: case INPUT_NAV_RIGHT: input_ControlPush( p_input, i_query - INPUT_NAV_ACTIVATE + INPUT_CONTROL_NAV_ACTIVATE, NULL ); return VLC_SUCCESS; case INPUT_ADD_INFO: { char *psz_cat = (char *)va_arg( args, char * ); char *psz_name = (char *)va_arg( args, char * ); char *psz_format = (char *)va_arg( args, char * ); char *psz_value; if( vasprintf( &psz_value, psz_format, args ) == -1 ) return VLC_EGENERIC; int i_ret = input_item_AddInfo( p_input->p->p_item, psz_cat, psz_name, "%s", psz_value ); free( psz_value ); if( !p_input->b_preparsing && !i_ret ) input_SendEventMetaInfo( p_input ); return i_ret; } case INPUT_REPLACE_INFOS: case INPUT_MERGE_INFOS: { info_category_t *p_cat = va_arg( args, info_category_t * ); if( i_query == INPUT_REPLACE_INFOS ) input_item_ReplaceInfos( p_input->p->p_item, p_cat ); else input_item_MergeInfos( p_input->p->p_item, p_cat ); if( !p_input->b_preparsing ) input_SendEventMetaInfo( p_input ); return VLC_SUCCESS; } case INPUT_DEL_INFO: { char *psz_cat = (char *)va_arg( args, char * ); char *psz_name = (char *)va_arg( args, char * ); int i_ret = input_item_DelInfo( p_input->p->p_item, psz_cat, psz_name ); if( !p_input->b_preparsing && !i_ret ) input_SendEventMetaInfo( p_input ); return i_ret; } case INPUT_GET_INFO: { char *psz_cat = (char *)va_arg( args, char * ); char *psz_name = (char *)va_arg( args, char * ); char **ppsz_value = (char **)va_arg( args, char ** ); int i_ret = VLC_EGENERIC; *ppsz_value = NULL; *ppsz_value = input_item_GetInfo( p_input->p->p_item, psz_cat, psz_name ); return i_ret; } case INPUT_SET_NAME: { char *psz_name = (char *)va_arg( args, char * ); if( !psz_name ) return VLC_EGENERIC; input_item_SetName( p_input->p->p_item, psz_name ); if( !p_input->b_preparsing ) input_SendEventMetaName( p_input, psz_name ); return VLC_SUCCESS; } case INPUT_ADD_BOOKMARK: p_bkmk = (seekpoint_t *)va_arg( args, seekpoint_t * ); p_bkmk = vlc_seekpoint_Duplicate( p_bkmk ); vlc_mutex_lock( &p_input->p->p_item->lock ); if( !p_bkmk->psz_name ) { if( asprintf( &p_bkmk->psz_name, _("Bookmark %i"), p_input->p->i_bookmark ) == -1 ) p_bkmk->psz_name = NULL; } TAB_APPEND( p_input->p->i_bookmark, p_input->p->pp_bookmark, p_bkmk ); vlc_mutex_unlock( &p_input->p->p_item->lock ); UpdateBookmarksOption( p_input ); return VLC_SUCCESS; case INPUT_CHANGE_BOOKMARK: p_bkmk = (seekpoint_t *)va_arg( args, seekpoint_t * ); i_bkmk = (int)va_arg( args, int ); vlc_mutex_lock( &p_input->p->p_item->lock ); if( i_bkmk < p_input->p->i_bookmark ) { vlc_seekpoint_Delete( p_input->p->pp_bookmark[i_bkmk] ); p_input->p->pp_bookmark[i_bkmk] = vlc_seekpoint_Duplicate( p_bkmk ); } vlc_mutex_unlock( &p_input->p->p_item->lock ); UpdateBookmarksOption( p_input ); return VLC_SUCCESS; case INPUT_DEL_BOOKMARK: i_bkmk = (int)va_arg( args, int ); vlc_mutex_lock( &p_input->p->p_item->lock ); if( i_bkmk < p_input->p->i_bookmark ) { p_bkmk = p_input->p->pp_bookmark[i_bkmk]; TAB_REMOVE( p_input->p->i_bookmark, p_input->p->pp_bookmark, p_bkmk ); vlc_seekpoint_Delete( p_bkmk ); vlc_mutex_unlock( &p_input->p->p_item->lock ); UpdateBookmarksOption( p_input ); return VLC_SUCCESS; } vlc_mutex_unlock( &p_input->p->p_item->lock ); return VLC_EGENERIC; case INPUT_GET_BOOKMARKS: ppp_bkmk = (seekpoint_t ***)va_arg( args, seekpoint_t *** ); pi_bkmk = (int *)va_arg( args, int * ); vlc_mutex_lock( &p_input->p->p_item->lock ); if( p_input->p->i_bookmark ) { int i; *pi_bkmk = p_input->p->i_bookmark; *ppp_bkmk = malloc( sizeof(seekpoint_t *) * p_input->p->i_bookmark ); for( i = 0; i < p_input->p->i_bookmark; i++ ) { (*ppp_bkmk)[i] = vlc_seekpoint_Duplicate( p_input->p->pp_bookmark[i] ); } vlc_mutex_unlock( &p_input->p->p_item->lock ); return VLC_SUCCESS; } else { *ppp_bkmk = NULL; *pi_bkmk = 0; vlc_mutex_unlock( &p_input->p->p_item->lock ); return VLC_EGENERIC; } break; case INPUT_CLEAR_BOOKMARKS: vlc_mutex_lock( &p_input->p->p_item->lock ); while( p_input->p->i_bookmark > 0 ) { p_bkmk = p_input->p->pp_bookmark[p_input->p->i_bookmark-1]; TAB_REMOVE( p_input->p->i_bookmark, p_input->p->pp_bookmark, p_bkmk ); vlc_seekpoint_Delete( p_bkmk ); } vlc_mutex_unlock( &p_input->p->p_item->lock ); UpdateBookmarksOption( p_input ); return VLC_SUCCESS; case INPUT_SET_BOOKMARK: i_bkmk = (int)va_arg( args, int ); val.i_int = i_bkmk; input_ControlPush( p_input, INPUT_CONTROL_SET_BOOKMARK, &val ); return VLC_SUCCESS; case INPUT_GET_BOOKMARK: p_bkmk = (seekpoint_t *)va_arg( args, seekpoint_t * ); vlc_mutex_lock( &p_input->p->p_item->lock ); *p_bkmk = p_input->p->bookmark; vlc_mutex_unlock( &p_input->p->p_item->lock ); return VLC_SUCCESS; case INPUT_GET_TITLE_INFO: { input_title_t **p_title = (input_title_t **)va_arg( args, input_title_t ** ); int *pi_req_title_offset = (int *) va_arg( args, int * ); vlc_mutex_lock( &p_input->p->p_item->lock ); int i_current_title = var_GetInteger( p_input, "title" ); if ( *pi_req_title_offset < 0 ) /* return current title if -1 */ *pi_req_title_offset = i_current_title; if( p_input->p->i_title && p_input->p->i_title > *pi_req_title_offset ) { *p_title = vlc_input_title_Duplicate( p_input->p->title[*pi_req_title_offset] ); vlc_mutex_unlock( &p_input->p->p_item->lock ); return VLC_SUCCESS; } else { vlc_mutex_unlock( &p_input->p->p_item->lock ); return VLC_EGENERIC; } } case INPUT_GET_VIDEO_FPS: pf = (double*)va_arg( args, double * ); vlc_mutex_lock( &p_input->p->p_item->lock ); *pf = p_input->p->f_fps; vlc_mutex_unlock( &p_input->p->p_item->lock ); return VLC_SUCCESS; case INPUT_ADD_SLAVE: psz = (char*)va_arg( args, char * ); if( psz && *psz ) { val.psz_string = strdup( psz ); input_ControlPush( p_input, INPUT_CONTROL_ADD_SLAVE, &val ); } return VLC_SUCCESS; case INPUT_ADD_SUBTITLE: psz = (char*)va_arg( args, char * ); b_bool = (bool)va_arg( args, int ); if( !psz || *psz == '\0' ) return VLC_EGENERIC; if( b_bool && !subtitles_Filter( psz ) ) return VLC_EGENERIC; val.psz_string = strdup( psz ); input_ControlPush( p_input, INPUT_CONTROL_ADD_SUBTITLE, &val ); return VLC_SUCCESS; case INPUT_GET_ATTACHMENTS: /* arg1=input_attachment_t***, arg2=int* res=can fail */ { input_attachment_t ***ppp_attachment = (input_attachment_t***)va_arg( args, input_attachment_t *** ); int *pi_attachment = (int*)va_arg( args, int * ); int i; vlc_mutex_lock( &p_input->p->p_item->lock ); if( p_input->p->i_attachment <= 0 ) { vlc_mutex_unlock( &p_input->p->p_item->lock ); *ppp_attachment = NULL; *pi_attachment = 0; return VLC_EGENERIC; } *pi_attachment = p_input->p->i_attachment; *ppp_attachment = malloc( sizeof(input_attachment_t**) * p_input->p->i_attachment ); for( i = 0; i < p_input->p->i_attachment; i++ ) (*ppp_attachment)[i] = vlc_input_attachment_Duplicate( p_input->p->attachment[i] ); vlc_mutex_unlock( &p_input->p->p_item->lock ); return VLC_SUCCESS; } case INPUT_GET_ATTACHMENT: /* arg1=input_attachment_t**, arg2=char* res=can fail */ { input_attachment_t **pp_attachment = (input_attachment_t**)va_arg( args, input_attachment_t ** ); const char *psz_name = (const char*)va_arg( args, const char * ); int i; vlc_mutex_lock( &p_input->p->p_item->lock ); for( i = 0; i < p_input->p->i_attachment; i++ ) { if( !strcmp( p_input->p->attachment[i]->psz_name, psz_name ) ) { *pp_attachment = vlc_input_attachment_Duplicate( p_input->p->attachment[i] ); vlc_mutex_unlock( &p_input->p->p_item->lock ); return VLC_SUCCESS; } } *pp_attachment = NULL; vlc_mutex_unlock( &p_input->p->p_item->lock ); return VLC_EGENERIC; } case INPUT_SET_RECORD_STATE: b_bool = (bool)va_arg( args, int ); var_SetBool( p_input, "record", b_bool ); return VLC_SUCCESS; case INPUT_GET_RECORD_STATE: pb_bool = (bool*)va_arg( args, bool* ); *pb_bool = var_GetBool( p_input, "record" ); return VLC_SUCCESS; case INPUT_RESTART_ES: val.i_int = (int)va_arg( args, int ); input_ControlPush( p_input, INPUT_CONTROL_RESTART_ES, &val ); return VLC_SUCCESS; case INPUT_GET_AOUT: { audio_output_t *p_aout = input_resource_HoldAout( p_input->p->p_resource ); if( !p_aout ) return VLC_EGENERIC; audio_output_t **pp_aout = (audio_output_t**)va_arg( args, audio_output_t** ); *pp_aout = p_aout; return VLC_SUCCESS; } case INPUT_GET_VOUTS: { vout_thread_t ***ppp_vout = (vout_thread_t***)va_arg( args, vout_thread_t*** ); size_t *pi_vout = va_arg( args, size_t * ); input_resource_HoldVouts( p_input->p->p_resource, ppp_vout, pi_vout ); if( *pi_vout <= 0 ) return VLC_EGENERIC; return VLC_SUCCESS; } case INPUT_GET_ES_OBJECTS: { const int i_id = va_arg( args, int ); vlc_object_t **pp_decoder = va_arg( args, vlc_object_t ** ); vout_thread_t **pp_vout = va_arg( args, vout_thread_t ** ); audio_output_t **pp_aout = va_arg( args, audio_output_t ** ); return es_out_Control( p_input->p->p_es_out_display, ES_OUT_GET_ES_OBJECTS_BY_ID, i_id, pp_decoder, pp_vout, pp_aout ); } case INPUT_GET_PCR_SYSTEM: { mtime_t *pi_system = va_arg( args, mtime_t * ); mtime_t *pi_delay = va_arg( args, mtime_t * ); return es_out_ControlGetPcrSystem( p_input->p->p_es_out_display, pi_system, pi_delay ); } case INPUT_MODIFY_PCR_SYSTEM: { bool b_absolute = va_arg( args, int ); mtime_t i_system = va_arg( args, mtime_t ); return es_out_ControlModifyPcrSystem( p_input->p->p_es_out_display, b_absolute, i_system ); } default: msg_Err( p_input, "unknown query in input_vaControl" ); return VLC_EGENERIC; } } static void UpdateBookmarksOption( input_thread_t *p_input ) { vlc_mutex_lock( &p_input->p->p_item->lock ); /* Update the "bookmark" list */ var_Change( p_input, "bookmark", VLC_VAR_CLEARCHOICES, 0, 0 ); for( int i = 0; i < p_input->p->i_bookmark; i++ ) { vlc_value_t val, text; val.i_int = i; text.psz_string = p_input->p->pp_bookmark[i]->psz_name; var_Change( p_input, "bookmark", VLC_VAR_ADDCHOICE, &val, &text ); } /* Create the "bookmarks" option value */ const char *psz_format = "{name=%s,bytes=%"PRId64",time=%"PRId64"}"; int i_len = strlen( "bookmarks=" ); for( int i = 0; i < p_input->p->i_bookmark; i++ ) { const seekpoint_t *p_bookmark = p_input->p->pp_bookmark[i]; i_len += snprintf( NULL, 0, psz_format, p_bookmark->psz_name, p_bookmark->i_byte_offset, p_bookmark->i_time_offset/1000000 ); } char *psz_value = malloc( i_len + p_input->p->i_bookmark + 1 ); char *psz_next = psz_value; psz_next += sprintf( psz_next, "bookmarks=" ); for( int i = 0; i < p_input->p->i_bookmark && psz_value != NULL; i++ ) { const seekpoint_t *p_bookmark = p_input->p->pp_bookmark[i]; psz_next += sprintf( psz_next, psz_format, p_bookmark->psz_name, p_bookmark->i_byte_offset, p_bookmark->i_time_offset/1000000 ); if( i < p_input->p->i_bookmark - 1) *psz_next++ = ','; } vlc_mutex_unlock( &p_input->p->p_item->lock ); if( psz_value ) input_item_AddOption( p_input->p->p_item, psz_value, VLC_INPUT_OPTION_UNIQUE ); free( psz_value ); input_SendEventBookmark( p_input ); }
gpl-2.0
teamfx/openjfx-8u-dev-rt
modules/media/src/main/native/jfxmedia/MediaManagement/MediaManager.cpp
4
8099
/* * Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ #include "MediaManager.h" #include <Common/ProductFlags.h> #include <Common/VSMemory.h> #include <PipelineManagement/PipelineFactory.h> #include <Locator/Locator.h> #include <platform/gstreamer/GstMediaManager.h> #include <Utils/JfxCriticalSection.h> #include <jfxmedia_errors.h> #if TARGET_OS_WIN32 #include <Utils/win32/WinExceptionHandler.h> #endif CMediaManager::MMSingleton CMediaManager::s_Singleton; //************************************************************************************************* //********** Empty content types list in case PipelineFactory is not available. //************************************************************************************************* const static ContentTypesList EMPTY_LIST; //************************************************************************************************* //********** class CMediaManager //************************************************************************************************* CMediaManager::CMediaManager() : m_uInternalError(ERROR_NONE) {} CMediaManager::~CMediaManager() {} /** * CMediaManager::GetInstance() * * @return CMediaManager* singleton */ uint32_t CMediaManager::GetInstance(CMediaManager** ppMediaManager) { return s_Singleton.GetInstance(ppMediaManager); } /** * CMediaManager::CreateInstance() creates an instance of the class * This method is used by Singleton class to create the actual instace of a class. * When the method is protected of private, Singleton class should be a friend for the class. * * @return CMediaManager* instance */ uint32_t CMediaManager::CreateInstance(CMediaManager** ppMediaManager) { #if ENABLE_PLATFORM_GSTREAMER #if !defined(TARGET_OS_WIN32) && !defined(TARGET_OS_MAC) && !defined(TARGET_OS_LINUX) return ERROR_OS_UNSUPPORTED; #else #if TARGET_OS_WIN32 SetExceptionHandler(); #endif CGstMediaManager* pGstManager = new(nothrow) CGstMediaManager(); if (NULL == pGstManager) return ERROR_MEMORY_ALLOCATION; if (ERROR_NONE != (pGstManager->m_uInternalError = pGstManager->Init())) return ERROR_MANAGER_CREATION; *ppMediaManager = pGstManager; return ERROR_NONE; #endif // !defined ... #else return ERROR_PLATFORM_UNSUPPORTED; #endif //ENABLE_PLATFORM_GSTREAMER } /** * CMediaManager::SetWarningListener(const CMediaWarningListener* pWarningListener) * * Sets the listener to receive notifications of warnings * which are not specific to a given pipeline. * * @param The listener. */ void CMediaManager::SetWarningListener(CMediaWarningListener* pWarningListener) { m_pWarningListener = pWarningListener; } bool CMediaManager::CanPlayContentType(string contentType) { CPipelineFactory* pPipelineFactory = NULL; uint32_t uRetCode; uRetCode = CPipelineFactory::GetInstance(&pPipelineFactory); if (ERROR_NONE != uRetCode) return false; else if (NULL == pPipelineFactory) return false; return pPipelineFactory->CanPlayContentType(contentType); } const ContentTypesList& CMediaManager::GetSupportedContentTypes() { CPipelineFactory* pPipelineFactory = NULL; uint32_t uRetCode; uRetCode = CPipelineFactory::GetInstance(&pPipelineFactory); if (ERROR_NONE != uRetCode) return EMPTY_LIST; else if (NULL == pPipelineFactory) return EMPTY_LIST; return pPipelineFactory->GetSupportedContentTypes(); } /** * CMediaManager::CreatePlayer(CLocator locator) * * @param locator * * @return Pointer to a new CMedia object. */ uint32_t CMediaManager::CreatePlayer(CLocator* pLocator, CPipelineOptions* pOptions, CMedia** ppMedia) { CPipeline* pPipeline = NULL; CPipelineFactory* pPipelineFactory = NULL; uint32_t uRetCode; if (NULL == pLocator) return ERROR_LOCATOR_NULL; uRetCode = CPipelineFactory::GetInstance(&pPipelineFactory); if (ERROR_NONE != uRetCode) return uRetCode; else if (NULL == pPipelineFactory) return ERROR_FACTORY_NULL; //***** Initialize the return value *ppMedia = NULL; //***** If we have a null option object, create one if (NULL == pOptions) { pOptions = new (nothrow)CPipelineOptions(); if (NULL == pOptions) return ERROR_MEMORY_ALLOCATION; } //***** Try to create a pipeline uRetCode = pPipelineFactory->CreatePlayerPipeline(pLocator, pOptions, &pPipeline); //***** Create the new CMedia object if (ERROR_NONE == uRetCode) { //***** Try to create a CMedia to associate with the pipeline *ppMedia = new(nothrow) CMedia(pPipeline); if (NULL == *ppMedia) { //Cleanup if media creation failed. delete pPipeline; uRetCode = ERROR_MEDIA_CREATION; } } return uRetCode; } /** * CMediaManager::CreateMedia(CLocator locator) * * Creates a media object, given a locator and a set of options. * * @param pLocator pointer to a CLocator object * @param pOptions pointer to a CPipelienOptions object * * @return Pointer to a new CMedia object. */ uint32_t CMediaManager::CreateMedia(CLocator* pLocator, CPipelineOptions* pOptions, CMedia** ppMedia) { CPipeline* pPipeline = NULL; CPipelineFactory* pPipelineFactory = NULL; uint32_t uRetCode; if (NULL == pLocator) return ERROR_LOCATOR_NULL; uRetCode = CPipelineFactory::GetInstance(&pPipelineFactory); if (ERROR_NONE != uRetCode) return uRetCode; else if (NULL == pPipelineFactory) return ERROR_FACTORY_NULL; //***** Initialize the return value *ppMedia = NULL; //***** If we have a null option object, create one if (NULL == pOptions) { pOptions = new (nothrow) CPipelineOptions(); if (NULL == pOptions) return ERROR_MEMORY_ALLOCATION; } //***** Do the real work if ((CPipelineOptions::kAudioPlaybackPipeline == pOptions->GetPipelineType()) || (CPipelineOptions::kAVPlaybackPipeline == pOptions->GetPipelineType())) { //***** Create a player pipleine first #if JFXMEDIA_DEBUG printf("-- CreateMedia : create player pipeline\n"); #endif uRetCode = pPipelineFactory->CreatePlayerPipeline(pLocator, pOptions, &pPipeline); //***** Create the new CMedia object if (ERROR_NONE == uRetCode) { //***** Create a media object and attach the pipeline to the media object *ppMedia = new(nothrow) CMedia(pPipeline); if (NULL == *ppMedia) { //Cleanup if media creation failed. delete pPipeline; uRetCode = ERROR_MEDIA_CREATION; } } } #if JFXMEDIA_DEBUG printf("-- CreateMedia : finish\n"); #endif return uRetCode; }
gpl-2.0
WildfireDEV/android_kernel_htc_msm8960
arch/arm/mach-msm/qdsp6v2/audio_amrnb.c
4
4818
/* amrnb audio output device * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "audio_utils_aio.h" static void q6_audio_amrnb_cb(uint32_t opcode, uint32_t token, uint32_t *payload, void *priv) { struct q6audio_aio *audio = (struct q6audio_aio *)priv; pr_debug("%s:opcde = %d token = 0x%x\n", __func__, opcode, token); switch (opcode) { case ASM_DATA_EVENT_WRITE_DONE: case ASM_DATA_EVENT_READ_DONE: case ASM_DATA_CMDRSP_EOS: audio_aio_cb(opcode, token, payload, audio); break; default: pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode); break; } } #ifdef CONFIG_DEBUG_FS static const struct file_operations audio_amrnb_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, }; #endif static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct q6audio_aio *audio = file->private_data; int rc = 0; switch (cmd) { case AUDIO_START: { pr_debug("%s[%p]: AUDIO_START session_id[%d]\n", __func__, audio, audio->ac->session); if (audio->feedback == NON_TUNNEL_MODE) { /* Configure PCM output block */ rc = q6asm_enc_cfg_blk_pcm(audio->ac, audio->pcm_cfg.sample_rate, audio->pcm_cfg.channel_count); if (rc < 0) { pr_err("pcm output block config failed\n"); break; } } rc = audio_aio_enable(audio); audio->eos_rsp = 0; audio->eos_flag = 0; if (!rc) { audio->enabled = 1; } else { audio->enabled = 0; pr_err("Audio Start procedure failed rc=%d\n", rc); break; } pr_debug("AUDIO_START success enable[%d]\n", audio->enabled); if (audio->stopped == 1) audio->stopped = 0; break; } default: pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); } return rc; } static int audio_open(struct inode *inode, struct file *file) { struct q6audio_aio *audio = NULL; int rc = 0; #ifdef CONFIG_DEBUG_FS /* 4 bytes represents decoder number, 1 byte for terminate string */ char name[sizeof "msm_amrnb_" + 5]; #endif audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); if (audio == NULL) { pr_err("Could not allocate memory for wma decode driver\n"); return -ENOMEM; } audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_amrnb_cb, (void *)audio); if (!audio->ac) { pr_err("Could not allocate memory for audio client\n"); kfree(audio); return -ENOMEM; } /* open in T/NT mode */ if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, FORMAT_AMRNB); if (rc < 0) { pr_err("NT mode Open failed rc=%d\n", rc); rc = -ENODEV; goto fail; } audio->feedback = NON_TUNNEL_MODE; audio->buf_cfg.frames_per_buf = 0x01; audio->buf_cfg.meta_info_enable = 0x01; } else if ((file->f_mode & FMODE_WRITE) && !(file->f_mode & FMODE_READ)) { rc = q6asm_open_write(audio->ac, FORMAT_AMRNB); if (rc < 0) { pr_err("T mode Open failed rc=%d\n", rc); rc = -ENODEV; goto fail; } audio->feedback = TUNNEL_MODE; audio->buf_cfg.meta_info_enable = 0x00; } else { pr_err("Not supported mode\n"); rc = -EACCES; goto fail; } rc = audio_aio_open(audio, file); if (IS_ERR_VALUE(rc)) { pr_err("%s: audio_aio_open failed\n", __func__); goto fail; } #ifdef CONFIG_DEBUG_FS snprintf(name, sizeof name, "msm_amrnb_%04x", audio->ac->session); audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, NULL, (void *)audio, &audio_amrnb_debug_fops); if (IS_ERR(audio->dentry)) pr_debug("debugfs_create_file failed\n"); #endif pr_info("%s:amrnb decoder open success, session_id = %d\n", __func__, audio->ac->session); return rc; fail: q6asm_audio_client_free(audio->ac); kfree(audio); return rc; } static const struct file_operations audio_amrnb_fops = { .owner = THIS_MODULE, .open = audio_open, .release = audio_aio_release, .unlocked_ioctl = audio_ioctl, .fsync = audio_aio_fsync, }; struct miscdevice audio_amrnb_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_amrnb", .fops = &audio_amrnb_fops, }; static int __init audio_amrnb_init(void) { return misc_register(&audio_amrnb_misc); } device_initcall(audio_amrnb_init);
gpl-2.0
Thrive-Hackers/tostab3-gnu-linux-kernel
drivers/acpi/acpica/evmisc.c
260
18555
/****************************************************************************** * * Module Name: evmisc - Miscellaneous event manager support functions * *****************************************************************************/ /* * Copyright (C) 2000 - 2011, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #include "acinterp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evmisc") /* Local prototypes */ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); static u32 acpi_ev_global_lock_handler(void *context); static acpi_status acpi_ev_remove_global_lock_handler(void); /******************************************************************************* * * FUNCTION: acpi_ev_is_notify_object * * PARAMETERS: Node - Node to check * * RETURN: TRUE if notifies allowed on this object * * DESCRIPTION: Check type of node for a object that supports notifies. * * TBD: This could be replaced by a flag bit in the node. * ******************************************************************************/ u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node) { switch (node->type) { case ACPI_TYPE_DEVICE: case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: /* * These are the ONLY objects that can receive ACPI notifications */ return (TRUE); default: return (FALSE); } } /******************************************************************************* * * FUNCTION: acpi_ev_queue_notify_request * * PARAMETERS: Node - NS node for the notified object * notify_value - Value from the Notify() request * * RETURN: Status * * DESCRIPTION: Dispatch a device notification event to a previously * installed handler. * ******************************************************************************/ acpi_status acpi_ev_queue_notify_request(struct acpi_namespace_node * node, u32 notify_value) { union acpi_operand_object *obj_desc; union acpi_operand_object *handler_obj = NULL; union acpi_generic_state *notify_info; acpi_status status = AE_OK; ACPI_FUNCTION_NAME(ev_queue_notify_request); /* * For value 3 (Ejection Request), some device method may need to be run. * For value 2 (Device Wake) if _PRW exists, the _PS0 method may need * to be run. * For value 0x80 (Status Change) on the power button or sleep button, * initiate soft-off or sleep operation? */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Dispatching Notify on [%4.4s] Node %p Value 0x%2.2X (%s)\n", acpi_ut_get_node_name(node), node, notify_value, acpi_ut_get_notify_name(notify_value))); /* Get the notify object attached to the NS Node */ obj_desc = acpi_ns_get_attached_object(node); if (obj_desc) { /* We have the notify object, Get the right handler */ switch (node->type) { /* Notify allowed only on these types */ case ACPI_TYPE_DEVICE: case ACPI_TYPE_THERMAL: case ACPI_TYPE_PROCESSOR: if (notify_value <= ACPI_MAX_SYS_NOTIFY) { handler_obj = obj_desc->common_notify.system_notify; } else { handler_obj = obj_desc->common_notify.device_notify; } break; default: /* All other types are not supported */ return (AE_TYPE); } } /* * If there is any handler to run, schedule the dispatcher. * Check for: * 1) Global system notify handler * 2) Global device notify handler * 3) Per-device notify handler */ if ((acpi_gbl_system_notify.handler && (notify_value <= ACPI_MAX_SYS_NOTIFY)) || (acpi_gbl_device_notify.handler && (notify_value > ACPI_MAX_SYS_NOTIFY)) || handler_obj) { notify_info = acpi_ut_create_generic_state(); if (!notify_info) { return (AE_NO_MEMORY); } if (!handler_obj) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Executing system notify handler for Notify (%4.4s, %X) " "node %p\n", acpi_ut_get_node_name(node), notify_value, node)); } notify_info->common.descriptor_type = ACPI_DESC_TYPE_STATE_NOTIFY; notify_info->notify.node = node; notify_info->notify.value = (u16) notify_value; notify_info->notify.handler_obj = handler_obj; status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch, notify_info); if (ACPI_FAILURE(status)) { acpi_ut_delete_generic_state(notify_info); } } else { /* There is no notify handler (per-device or system) for this device */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No notify handler for Notify (%4.4s, %X) node %p\n", acpi_ut_get_node_name(node), notify_value, node)); } return (status); } /******************************************************************************* * * FUNCTION: acpi_ev_notify_dispatch * * PARAMETERS: Context - To be passed to the notify handler * * RETURN: None. * * DESCRIPTION: Dispatch a device notification event to a previously * installed handler. * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) { union acpi_generic_state *notify_info = (union acpi_generic_state *)context; acpi_notify_handler global_handler = NULL; void *global_context = NULL; union acpi_operand_object *handler_obj; ACPI_FUNCTION_ENTRY(); /* * We will invoke a global notify handler if installed. This is done * _before_ we invoke the per-device handler attached to the device. */ if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) { /* Global system notification handler */ if (acpi_gbl_system_notify.handler) { global_handler = acpi_gbl_system_notify.handler; global_context = acpi_gbl_system_notify.context; } } else { /* Global driver notification handler */ if (acpi_gbl_device_notify.handler) { global_handler = acpi_gbl_device_notify.handler; global_context = acpi_gbl_device_notify.context; } } /* Invoke the system handler first, if present */ if (global_handler) { global_handler(notify_info->notify.node, notify_info->notify.value, global_context); } /* Now invoke the per-device handler, if present */ handler_obj = notify_info->notify.handler_obj; if (handler_obj) { struct acpi_object_notify_handler *notifier; notifier = &handler_obj->notify; while (notifier) { notifier->handler(notify_info->notify.node, notify_info->notify.value, notifier->context); notifier = notifier->next; } } /* All done with the info object */ acpi_ut_delete_generic_state(notify_info); } /******************************************************************************* * * FUNCTION: acpi_ev_global_lock_handler * * PARAMETERS: Context - From thread interface, not used * * RETURN: ACPI_INTERRUPT_HANDLED * * DESCRIPTION: Invoked directly from the SCI handler when a global lock * release interrupt occurs. If there's a thread waiting for * the global lock, signal it. * * NOTE: Assumes that the semaphore can be signaled from interrupt level. If * this is not possible for some reason, a separate thread will have to be * scheduled to do this. * ******************************************************************************/ static u8 acpi_ev_global_lock_pending; static u32 acpi_ev_global_lock_handler(void *context) { acpi_status status; acpi_cpu_flags flags; flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); if (!acpi_ev_global_lock_pending) { goto out; } /* Send a unit to the semaphore */ status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore")); } acpi_ev_global_lock_pending = FALSE; out: acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); return (ACPI_INTERRUPT_HANDLED); } /******************************************************************************* * * FUNCTION: acpi_ev_init_global_lock_handler * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Install a handler for the global lock release event * ******************************************************************************/ acpi_status acpi_ev_init_global_lock_handler(void) { acpi_status status; ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); /* Attempt installation of the global lock handler */ status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, acpi_ev_global_lock_handler, NULL); /* * If the global lock does not exist on this platform, the attempt to * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick). * Map to AE_OK, but mark global lock as not present. Any attempt to * actually use the global lock will be flagged with an error. */ if (status == AE_NO_HARDWARE_RESPONSE) { ACPI_ERROR((AE_INFO, "No response from Global Lock hardware, disabling lock")); acpi_gbl_global_lock_present = FALSE; return_ACPI_STATUS(AE_OK); } acpi_gbl_global_lock_present = TRUE; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_remove_global_lock_handler * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Remove the handler for the Global Lock * ******************************************************************************/ static acpi_status acpi_ev_remove_global_lock_handler(void) { acpi_status status; ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler); acpi_gbl_global_lock_present = FALSE; status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL, acpi_ev_global_lock_handler); return_ACPI_STATUS(status); } /****************************************************************************** * * FUNCTION: acpi_ev_acquire_global_lock * * PARAMETERS: Timeout - Max time to wait for the lock, in millisec. * * RETURN: Status * * DESCRIPTION: Attempt to gain ownership of the Global Lock. * * MUTEX: Interpreter must be locked * * Note: The original implementation allowed multiple threads to "acquire" the * Global Lock, and the OS would hold the lock until the last thread had * released it. However, this could potentially starve the BIOS out of the * lock, especially in the case where there is a tight handshake between the * Embedded Controller driver and the BIOS. Therefore, this implementation * allows only one thread to acquire the HW Global Lock at a time, and makes * the global lock appear as a standard mutex on the OS side. * *****************************************************************************/ static acpi_thread_id acpi_ev_global_lock_thread_id; static int acpi_ev_global_lock_acquired; acpi_status acpi_ev_acquire_global_lock(u16 timeout) { acpi_cpu_flags flags; acpi_status status = AE_OK; u8 acquired = FALSE; ACPI_FUNCTION_TRACE(ev_acquire_global_lock); /* * Only one thread can acquire the GL at a time, the global_lock_mutex * enforces this. This interface releases the interpreter if we must wait. */ status = acpi_ex_system_wait_mutex( acpi_gbl_global_lock_mutex->mutex.os_mutex, 0); if (status == AE_TIME) { if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) { acpi_ev_global_lock_acquired++; return AE_OK; } } if (ACPI_FAILURE(status)) { status = acpi_ex_system_wait_mutex( acpi_gbl_global_lock_mutex->mutex.os_mutex, timeout); } if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } acpi_ev_global_lock_thread_id = acpi_os_get_thread_id(); acpi_ev_global_lock_acquired++; /* * Update the global lock handle and check for wraparound. The handle is * only used for the external global lock interfaces, but it is updated * here to properly handle the case where a single thread may acquire the * lock via both the AML and the acpi_acquire_global_lock interfaces. The * handle is therefore updated on the first acquire from a given thread * regardless of where the acquisition request originated. */ acpi_gbl_global_lock_handle++; if (acpi_gbl_global_lock_handle == 0) { acpi_gbl_global_lock_handle = 1; } /* * Make sure that a global lock actually exists. If not, just treat the * lock as a standard mutex. */ if (!acpi_gbl_global_lock_present) { acpi_gbl_global_lock_acquired = TRUE; return_ACPI_STATUS(AE_OK); } flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); do { /* Attempt to acquire the actual hardware lock */ ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); if (acquired) { acpi_gbl_global_lock_acquired = TRUE; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Acquired hardware Global Lock\n")); break; } acpi_ev_global_lock_pending = TRUE; acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); /* * Did not get the lock. The pending bit was set above, and we * must wait until we get the global lock released interrupt. */ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n")); /* * Wait for handshake with the global lock interrupt handler. * This interface releases the interpreter if we must wait. */ status = acpi_ex_system_wait_semaphore( acpi_gbl_global_lock_semaphore, ACPI_WAIT_FOREVER); flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); } while (ACPI_SUCCESS(status)); acpi_ev_global_lock_pending = FALSE; acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_release_global_lock * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Releases ownership of the Global Lock. * ******************************************************************************/ acpi_status acpi_ev_release_global_lock(void) { u8 pending = FALSE; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ev_release_global_lock); /* Lock must be already acquired */ if (!acpi_gbl_global_lock_acquired) { ACPI_WARNING((AE_INFO, "Cannot release the ACPI Global Lock, it has not been acquired")); return_ACPI_STATUS(AE_NOT_ACQUIRED); } acpi_ev_global_lock_acquired--; if (acpi_ev_global_lock_acquired > 0) { return AE_OK; } if (acpi_gbl_global_lock_present) { /* Allow any thread to release the lock */ ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending); /* * If the pending bit was set, we must write GBL_RLS to the control * register */ if (pending) { status = acpi_write_bit_register (ACPI_BITREG_GLOBAL_LOCK_RELEASE, ACPI_ENABLE_EVENT); } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Released hardware Global Lock\n")); } acpi_gbl_global_lock_acquired = FALSE; /* Release the local GL mutex */ acpi_ev_global_lock_thread_id = 0; acpi_ev_global_lock_acquired = 0; acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex); return_ACPI_STATUS(status); } /****************************************************************************** * * FUNCTION: acpi_ev_terminate * * PARAMETERS: none * * RETURN: none * * DESCRIPTION: Disable events and free memory allocated for table storage. * ******************************************************************************/ void acpi_ev_terminate(void) { u32 i; acpi_status status; ACPI_FUNCTION_TRACE(ev_terminate); if (acpi_gbl_events_initialized) { /* * Disable all event-related functionality. In all cases, on error, * print a message but obviously we don't abort. */ /* Disable all fixed events */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { status = acpi_disable_event(i, 0); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not disable fixed event %u", (u32) i)); } } /* Disable all GPEs in all GPE blocks */ status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL); /* Remove SCI handler */ status = acpi_ev_remove_sci_handler(); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not remove SCI handler")); } status = acpi_ev_remove_global_lock_handler(); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not remove Global Lock handler")); } } /* Deallocate all handler objects installed within GPE info structs */ status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL); /* Return to original mode if necessary */ if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) { status = acpi_disable(); if (ACPI_FAILURE(status)) { ACPI_WARNING((AE_INFO, "AcpiDisable failed")); } } return_VOID; }
gpl-2.0
mdeejay/android_kernel_lge_x3
drivers/net/usb/dm9601.c
260
16318
/* * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices * * Peter Korsgaard <jacmet@sunsite.dk> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ //#define DEBUG #include <linux/module.h> #include <linux/sched.h> #include <linux/stddef.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/usb/usbnet.h> #include <linux/slab.h> /* datasheet: http://ptm2.cc.utu.fi/ftp/network/cards/DM9601/From_NET/DM9601-DS-P01-930914.pdf */ /* control requests */ #define DM_READ_REGS 0x00 #define DM_WRITE_REGS 0x01 #define DM_READ_MEMS 0x02 #define DM_WRITE_REG 0x03 #define DM_WRITE_MEMS 0x05 #define DM_WRITE_MEM 0x07 /* registers */ #define DM_NET_CTRL 0x00 #define DM_RX_CTRL 0x05 #define DM_SHARED_CTRL 0x0b #define DM_SHARED_ADDR 0x0c #define DM_SHARED_DATA 0x0d /* low + high */ #define DM_PHY_ADDR 0x10 /* 6 bytes */ #define DM_MCAST_ADDR 0x16 /* 8 bytes */ #define DM_GPR_CTRL 0x1e #define DM_GPR_DATA 0x1f #define DM_MAX_MCAST 64 #define DM_MCAST_SIZE 8 #define DM_EEPROM_LEN 256 #define DM_TX_OVERHEAD 2 /* 2 byte header */ #define DM_RX_OVERHEAD 7 /* 3 byte header + 4 byte crc tail */ #define DM_TIMEOUT 1000 static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data) { void *buf; int err = -ENOMEM; netdev_dbg(dev->net, "dm_read() reg=0x%02x length=%d\n", reg, length); buf = kmalloc(length, GFP_KERNEL); if (!buf) goto out; err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), DM_READ_REGS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, reg, buf, length, USB_CTRL_SET_TIMEOUT); if (err == length) memcpy(data, buf, length); else if (err >= 0) err = -EINVAL; kfree(buf); out: return err; } static int dm_read_reg(struct usbnet *dev, u8 reg, u8 *value) { return dm_read(dev, reg, 1, value); } static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data) { void *buf = NULL; int err = -ENOMEM; netdev_dbg(dev->net, "dm_write() reg=0x%02x, length=%d\n", reg, length); if (data) { buf = kmemdup(data, length, GFP_KERNEL); if (!buf) goto out; } err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), DM_WRITE_REGS, USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE, 0, reg, buf, length, USB_CTRL_SET_TIMEOUT); kfree(buf); if (err >= 0 && err < length) err = -EINVAL; out: return err; } static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value) { netdev_dbg(dev->net, "dm_write_reg() reg=0x%02x, value=0x%02x\n", reg, value); return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), DM_WRITE_REG, USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE, value, reg, NULL, 0, USB_CTRL_SET_TIMEOUT); } static void dm_write_async_callback(struct urb *urb) { struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; int status = urb->status; if (status < 0) printk(KERN_DEBUG "dm_write_async_callback() failed with %d\n", status); kfree(req); usb_free_urb(urb); } static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value, u16 length, void *data) { struct usb_ctrlrequest *req; struct urb *urb; int status; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { netdev_err(dev->net, "Error allocating URB in dm_write_async_helper!\n"); return; } req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); if (!req) { netdev_err(dev->net, "Failed to allocate memory for control request\n"); usb_free_urb(urb); return; } req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE; req->bRequest = length ? DM_WRITE_REGS : DM_WRITE_REG; req->wValue = cpu_to_le16(value); req->wIndex = cpu_to_le16(reg); req->wLength = cpu_to_le16(length); usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), (void *)req, data, length, dm_write_async_callback, req); status = usb_submit_urb(urb, GFP_ATOMIC); if (status < 0) { netdev_err(dev->net, "Error submitting the control message: status=%d\n", status); kfree(req); usb_free_urb(urb); } } static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data) { netdev_dbg(dev->net, "dm_write_async() reg=0x%02x length=%d\n", reg, length); dm_write_async_helper(dev, reg, 0, length, data); } static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value) { netdev_dbg(dev->net, "dm_write_reg_async() reg=0x%02x value=0x%02x\n", reg, value); dm_write_async_helper(dev, reg, value, 0, NULL); } static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *value) { int ret, i; mutex_lock(&dev->phy_mutex); dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg); dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0xc : 0x4); for (i = 0; i < DM_TIMEOUT; i++) { u8 tmp; udelay(1); ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp); if (ret < 0) goto out; /* ready */ if ((tmp & 1) == 0) break; } if (i == DM_TIMEOUT) { netdev_err(dev->net, "%s read timed out!\n", phy ? "phy" : "eeprom"); ret = -EIO; goto out; } dm_write_reg(dev, DM_SHARED_CTRL, 0x0); ret = dm_read(dev, DM_SHARED_DATA, 2, value); netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n", phy, reg, *value, ret); out: mutex_unlock(&dev->phy_mutex); return ret; } static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 value) { int ret, i; mutex_lock(&dev->phy_mutex); ret = dm_write(dev, DM_SHARED_DATA, 2, &value); if (ret < 0) goto out; dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg); dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12); for (i = 0; i < DM_TIMEOUT; i++) { u8 tmp; udelay(1); ret = dm_read_reg(dev, DM_SHARED_CTRL, &tmp); if (ret < 0) goto out; /* ready */ if ((tmp & 1) == 0) break; } if (i == DM_TIMEOUT) { netdev_err(dev->net, "%s write timed out!\n", phy ? "phy" : "eeprom"); ret = -EIO; goto out; } dm_write_reg(dev, DM_SHARED_CTRL, 0x0); out: mutex_unlock(&dev->phy_mutex); return ret; } static int dm_read_eeprom_word(struct usbnet *dev, u8 offset, void *value) { return dm_read_shared_word(dev, 0, offset, value); } static int dm9601_get_eeprom_len(struct net_device *dev) { return DM_EEPROM_LEN; } static int dm9601_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom, u8 * data) { struct usbnet *dev = netdev_priv(net); __le16 *ebuf = (__le16 *) data; int i; /* access is 16bit */ if ((eeprom->offset % 2) || (eeprom->len % 2)) return -EINVAL; for (i = 0; i < eeprom->len / 2; i++) { if (dm_read_eeprom_word(dev, eeprom->offset / 2 + i, &ebuf[i]) < 0) return -EINVAL; } return 0; } static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc) { struct usbnet *dev = netdev_priv(netdev); __le16 res; if (phy_id) { netdev_dbg(dev->net, "Only internal phy supported\n"); return 0; } dm_read_shared_word(dev, 1, loc, &res); netdev_dbg(dev->net, "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", phy_id, loc, le16_to_cpu(res)); return le16_to_cpu(res); } static void dm9601_mdio_write(struct net_device *netdev, int phy_id, int loc, int val) { struct usbnet *dev = netdev_priv(netdev); __le16 res = cpu_to_le16(val); if (phy_id) { netdev_dbg(dev->net, "Only internal phy supported\n"); return; } netdev_dbg(dev->net, "dm9601_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", phy_id, loc, val); dm_write_shared_word(dev, 1, loc, res); } static void dm9601_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { /* Inherit standard device info */ usbnet_get_drvinfo(net, info); info->eedump_len = DM_EEPROM_LEN; } static u32 dm9601_get_link(struct net_device *net) { struct usbnet *dev = netdev_priv(net); return mii_link_ok(&dev->mii); } static int dm9601_ioctl(struct net_device *net, struct ifreq *rq, int cmd) { struct usbnet *dev = netdev_priv(net); return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); } static const struct ethtool_ops dm9601_ethtool_ops = { .get_drvinfo = dm9601_get_drvinfo, .get_link = dm9601_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_eeprom_len = dm9601_get_eeprom_len, .get_eeprom = dm9601_get_eeprom, .get_settings = usbnet_get_settings, .set_settings = usbnet_set_settings, .nway_reset = usbnet_nway_reset, }; static void dm9601_set_multicast(struct net_device *net) { struct usbnet *dev = netdev_priv(net); /* We use the 20 byte dev->data for our 8 byte filter buffer * to avoid allocating memory that is tricky to free later */ u8 *hashes = (u8 *) & dev->data; u8 rx_ctl = 0x31; memset(hashes, 0x00, DM_MCAST_SIZE); hashes[DM_MCAST_SIZE - 1] |= 0x80; /* broadcast address */ if (net->flags & IFF_PROMISC) { rx_ctl |= 0x02; } else if (net->flags & IFF_ALLMULTI || netdev_mc_count(net) > DM_MAX_MCAST) { rx_ctl |= 0x04; } else if (!netdev_mc_empty(net)) { struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, net) { u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26; hashes[crc >> 3] |= 1 << (crc & 0x7); } } dm_write_async(dev, DM_MCAST_ADDR, DM_MCAST_SIZE, hashes); dm_write_reg_async(dev, DM_RX_CTRL, rx_ctl); } static void __dm9601_set_mac_address(struct usbnet *dev) { dm_write_async(dev, DM_PHY_ADDR, ETH_ALEN, dev->net->dev_addr); } static int dm9601_set_mac_address(struct net_device *net, void *p) { struct sockaddr *addr = p; struct usbnet *dev = netdev_priv(net); if (!is_valid_ether_addr(addr->sa_data)) { dev_err(&net->dev, "not setting invalid mac address %pM\n", addr->sa_data); return -EINVAL; } memcpy(net->dev_addr, addr->sa_data, net->addr_len); __dm9601_set_mac_address(dev); return 0; } static const struct net_device_ops dm9601_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = dm9601_ioctl, .ndo_set_multicast_list = dm9601_set_multicast, .ndo_set_mac_address = dm9601_set_mac_address, }; static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) { int ret; u8 mac[ETH_ALEN]; ret = usbnet_get_endpoints(dev, intf); if (ret) goto out; dev->net->netdev_ops = &dm9601_netdev_ops; dev->net->ethtool_ops = &dm9601_ethtool_ops; dev->net->hard_header_len += DM_TX_OVERHEAD; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; dev->mii.dev = dev->net; dev->mii.mdio_read = dm9601_mdio_read; dev->mii.mdio_write = dm9601_mdio_write; dev->mii.phy_id_mask = 0x1f; dev->mii.reg_num_mask = 0x1f; /* reset */ dm_write_reg(dev, DM_NET_CTRL, 1); udelay(20); /* read MAC */ if (dm_read(dev, DM_PHY_ADDR, ETH_ALEN, mac) < 0) { printk(KERN_ERR "Error reading MAC address\n"); ret = -ENODEV; goto out; } /* * Overwrite the auto-generated address only with good ones. */ if (is_valid_ether_addr(mac)) memcpy(dev->net->dev_addr, mac, ETH_ALEN); else { printk(KERN_WARNING "dm9601: No valid MAC address in EEPROM, using %pM\n", dev->net->dev_addr); __dm9601_set_mac_address(dev); } /* power up phy */ dm_write_reg(dev, DM_GPR_CTRL, 1); dm_write_reg(dev, DM_GPR_DATA, 0); /* receive broadcast packets */ dm9601_set_multicast(dev->net); dm9601_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET); dm9601_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); mii_nway_restart(&dev->mii); out: return ret; } static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { u8 status; int len; /* format: b1: rx status b2: packet length (incl crc) low b3: packet length (incl crc) high b4..n-4: packet data bn-3..bn: ethernet crc */ if (unlikely(skb->len < DM_RX_OVERHEAD)) { dev_err(&dev->udev->dev, "unexpected tiny rx frame\n"); return 0; } status = skb->data[0]; len = (skb->data[1] | (skb->data[2] << 8)) - 4; if (unlikely(status & 0xbf)) { if (status & 0x01) dev->net->stats.rx_fifo_errors++; if (status & 0x02) dev->net->stats.rx_crc_errors++; if (status & 0x04) dev->net->stats.rx_frame_errors++; if (status & 0x20) dev->net->stats.rx_missed_errors++; if (status & 0x90) dev->net->stats.rx_length_errors++; return 0; } skb_pull(skb, 3); skb_trim(skb, len); return 1; } static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int len; /* format: b1: packet length low b2: packet length high b3..n: packet data */ len = skb->len; if (skb_headroom(skb) < DM_TX_OVERHEAD) { struct sk_buff *skb2; skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags); dev_kfree_skb_any(skb); skb = skb2; if (!skb) return NULL; } __skb_push(skb, DM_TX_OVERHEAD); /* usbnet adds padding if length is a multiple of packet size if so, adjust length value in header */ if ((skb->len % dev->maxpacket) == 0) len++; skb->data[0] = len; skb->data[1] = len >> 8; return skb; } static void dm9601_status(struct usbnet *dev, struct urb *urb) { int link; u8 *buf; /* format: b0: net status b1: tx status 1 b2: tx status 2 b3: rx status b4: rx overflow b5: rx count b6: tx count b7: gpr */ if (urb->actual_length < 8) return; buf = urb->transfer_buffer; link = !!(buf[0] & 0x40); if (netif_carrier_ok(dev->net) != link) { if (link) { netif_carrier_on(dev->net); usbnet_defer_kevent (dev, EVENT_LINK_RESET); } else netif_carrier_off(dev->net); netdev_dbg(dev->net, "Link Status is: %d\n", link); } } static int dm9601_link_reset(struct usbnet *dev) { struct ethtool_cmd ecmd; mii_check_media(&dev->mii, 1, 1); mii_ethtool_gset(&dev->mii, &ecmd); netdev_dbg(dev->net, "link_reset() speed: %d duplex: %d\n", ecmd.speed, ecmd.duplex); return 0; } static const struct driver_info dm9601_info = { .description = "Davicom DM9601 USB Ethernet", .flags = FLAG_ETHER | FLAG_LINK_INTR, .bind = dm9601_bind, .rx_fixup = dm9601_rx_fixup, .tx_fixup = dm9601_tx_fixup, .status = dm9601_status, .link_reset = dm9601_link_reset, .reset = dm9601_link_reset, }; static const struct usb_device_id products[] = { { USB_DEVICE(0x07aa, 0x9601), /* Corega FEther USB-TXC */ .driver_info = (unsigned long)&dm9601_info, }, { USB_DEVICE(0x0a46, 0x9601), /* Davicom USB-100 */ .driver_info = (unsigned long)&dm9601_info, }, { USB_DEVICE(0x0a46, 0x6688), /* ZT6688 USB NIC */ .driver_info = (unsigned long)&dm9601_info, }, { USB_DEVICE(0x0a46, 0x0268), /* ShanTou ST268 USB NIC */ .driver_info = (unsigned long)&dm9601_info, }, { USB_DEVICE(0x0a46, 0x8515), /* ADMtek ADM8515 USB NIC */ .driver_info = (unsigned long)&dm9601_info, }, { USB_DEVICE(0x0a47, 0x9601), /* Hirose USB-100 */ .driver_info = (unsigned long)&dm9601_info, }, { USB_DEVICE(0x0fe6, 0x8101), /* DM9601 USB to Fast Ethernet Adapter */ .driver_info = (unsigned long)&dm9601_info, }, { USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */ .driver_info = (unsigned long)&dm9601_info, }, { USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ .driver_info = (unsigned long)&dm9601_info, }, {}, // END }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver dm9601_driver = { .name = "dm9601", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, }; static int __init dm9601_init(void) { return usb_register(&dm9601_driver); } static void __exit dm9601_exit(void) { usb_deregister(&dm9601_driver); } module_init(dm9601_init); module_exit(dm9601_exit); MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>"); MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices"); MODULE_LICENSE("GPL");
gpl-2.0
fergy/optimus-l3_e400_kernel
drivers/scsi/3w-9xxx.c
516
78071
/* 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux. Written By: Adam Radford <linuxraid@lsi.com> Modifications By: Tom Couch <linuxraid@lsi.com> Copyright (C) 2004-2009 Applied Micro Circuits Corporation. Copyright (C) 2010 LSI Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. NO WARRANTY THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. DISCLAIMER OF LIABILITY NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Bugs/Comments/Suggestions should be mailed to: linuxraid@lsi.com For more information, goto: http://www.lsi.com Note: This version of the driver does not contain a bundled firmware image. History ------- 2.26.02.000 - Driver cleanup for kernel submission. 2.26.02.001 - Replace schedule_timeout() calls with msleep(). 2.26.02.002 - Add support for PAE mode. Add lun support. Fix twa_remove() to free irq handler/unregister_chrdev() before shutting down card. Change to new 'change_queue_depth' api. Fix 'handled=1' ISR usage, remove bogus IRQ check. Remove un-needed eh_abort handler. Add support for embedded firmware error strings. 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 2.26.02.004 - Add support for 9550SX controllers. 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher. 2.26.02.006 - Fix 9550SX pchip reset timeout. Add big endian support. 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic(). 2.26.02.008 - Free irq handler in __twa_shutdown(). Serialize reset code. Add support for 9650SE controllers. 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails. 2.26.02.010 - Add support for 9690SA controllers. 2.26.02.011 - Increase max AENs drained to 256. Add MSI support and "use_msi" module parameter. Fix bug in twa_get_param() on 4GB+. Use pci_resource_len() for ioremap(). 2.26.02.012 - Add power management support. 2.26.02.013 - Fix bug in twa_load_sgl(). 2.26.02.014 - Force 60 second timeout default. */ #include <linux/module.h> #include <linux/reboot.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/time.h> #include <linux/mutex.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_cmnd.h> #include "3w-9xxx.h" /* Globals */ #define TW_DRIVER_VERSION "2.26.02.014" static DEFINE_MUTEX(twa_chrdev_mutex); static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; static unsigned int twa_device_extension_count; static int twa_major = -1; extern struct timezone sys_tz; /* Module parameters */ MODULE_AUTHOR ("LSI"); MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(TW_DRIVER_VERSION); static int use_msi = 0; module_param(use_msi, int, S_IRUGO); MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0"); /* Function prototypes */ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header); static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); static char *twa_aen_severity_lookup(unsigned char severity_code); static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id); static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static int twa_chrdev_open(struct inode *inode, struct file *file); static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host); static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id); static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id); static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, u32 set_features, unsigned short current_fw_srl, unsigned short current_fw_arch_id, unsigned short current_fw_branch, unsigned short current_fw_build, unsigned short *fw_on_ctlr_srl, unsigned short *fw_on_ctlr_arch_id, unsigned short *fw_on_ctlr_branch, unsigned short *fw_on_ctlr_build, u32 *init_connect_result); static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length); static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds); static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds); static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal); static int twa_reset_device_extension(TW_Device_Extension *tw_dev); static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id); /* Functions */ /* Show some statistics about the card */ static ssize_t twa_show_stats(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *host = class_to_shost(dev); TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; unsigned long flags = 0; ssize_t len; spin_lock_irqsave(tw_dev->host->host_lock, flags); len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n" "Current commands posted: %4d\n" "Max commands posted: %4d\n" "Current pending commands: %4d\n" "Max pending commands: %4d\n" "Last sgl length: %4d\n" "Max sgl length: %4d\n" "Last sector count: %4d\n" "Max sector count: %4d\n" "SCSI Host Resets: %4d\n" "AEN's: %4d\n", TW_DRIVER_VERSION, tw_dev->posted_request_count, tw_dev->max_posted_request_count, tw_dev->pending_request_count, tw_dev->max_pending_request_count, tw_dev->sgl_entries, tw_dev->max_sgl_entries, tw_dev->sector_count, tw_dev->max_sector_count, tw_dev->num_resets, tw_dev->aen_count); spin_unlock_irqrestore(tw_dev->host->host_lock, flags); return len; } /* End twa_show_stats() */ /* This function will set a devices queue depth */ static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth, int reason) { if (reason != SCSI_QDEPTH_DEFAULT) return -EOPNOTSUPP; if (queue_depth > TW_Q_LENGTH-2) queue_depth = TW_Q_LENGTH-2; scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); return queue_depth; } /* End twa_change_queue_depth() */ /* Create sysfs 'stats' entry */ static struct device_attribute twa_host_stats_attr = { .attr = { .name = "stats", .mode = S_IRUGO, }, .show = twa_show_stats }; /* Host attributes initializer */ static struct device_attribute *twa_host_attrs[] = { &twa_host_stats_attr, NULL, }; /* File operations struct for character device */ static const struct file_operations twa_fops = { .owner = THIS_MODULE, .unlocked_ioctl = twa_chrdev_ioctl, .open = twa_chrdev_open, .release = NULL, .llseek = noop_llseek, }; /* This function will complete an aen request from the isr */ static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) { TW_Command_Full *full_command_packet; TW_Command *command_packet; TW_Command_Apache_Header *header; unsigned short aen; int retval = 1; header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; tw_dev->posted_request_count--; aen = le16_to_cpu(header->status_block.error); full_command_packet = tw_dev->command_packet_virt[request_id]; command_packet = &full_command_packet->command.oldcommand; /* First check for internal completion of set param for time sync */ if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) { /* Keep reading the queue in case there are more aen's */ if (twa_aen_read_queue(tw_dev, request_id)) goto out2; else { retval = 0; goto out; } } switch (aen) { case TW_AEN_QUEUE_EMPTY: /* Quit reading the queue if this is the last one */ break; case TW_AEN_SYNC_TIME_WITH_HOST: twa_aen_sync_time(tw_dev, request_id); retval = 0; goto out; default: twa_aen_queue_event(tw_dev, header); /* If there are more aen's, keep reading the queue */ if (twa_aen_read_queue(tw_dev, request_id)) goto out2; else { retval = 0; goto out; } } retval = 0; out2: tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); out: return retval; } /* End twa_aen_complete() */ /* This function will drain aen queue */ static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) { int request_id = 0; char cdb[TW_MAX_CDB_LEN]; TW_SG_Entry sglist[1]; int finished = 0, count = 0; TW_Command_Full *full_command_packet; TW_Command_Apache_Header *header; unsigned short aen; int first_reset = 0, queue = 0, retval = 1; if (no_check_reset) first_reset = 0; else first_reset = 1; full_command_packet = tw_dev->command_packet_virt[request_id]; memset(full_command_packet, 0, sizeof(TW_Command_Full)); /* Initialize cdb */ memset(&cdb, 0, TW_MAX_CDB_LEN); cdb[0] = REQUEST_SENSE; /* opcode */ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ /* Initialize sglist */ memset(&sglist, 0, sizeof(TW_SG_Entry)); sglist[0].length = TW_SECTOR_SIZE; sglist[0].address = tw_dev->generic_buffer_phys[request_id]; if (sglist[0].address & TW_ALIGNMENT_9000_SGL) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain"); goto out; } /* Mark internal command */ tw_dev->srb[request_id] = NULL; do { /* Send command to the board */ if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense"); goto out; } /* Now poll for completion */ if (twa_poll_response(tw_dev, request_id, 30)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue"); tw_dev->posted_request_count--; goto out; } tw_dev->posted_request_count--; header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; aen = le16_to_cpu(header->status_block.error); queue = 0; count++; switch (aen) { case TW_AEN_QUEUE_EMPTY: if (first_reset != 1) goto out; else finished = 1; break; case TW_AEN_SOFT_RESET: if (first_reset == 0) first_reset = 1; else queue = 1; break; case TW_AEN_SYNC_TIME_WITH_HOST: break; default: queue = 1; } /* Now queue an event info */ if (queue) twa_aen_queue_event(tw_dev, header); } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN)); if (count == TW_MAX_AEN_DRAIN) goto out; retval = 0; out: tw_dev->state[request_id] = TW_S_INITIAL; return retval; } /* End twa_aen_drain_queue() */ /* This function will queue an event */ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) { u32 local_time; struct timeval time; TW_Event *event; unsigned short aen; char host[16]; char *error_str; tw_dev->aen_count++; /* Fill out event info */ event = tw_dev->event_queue[tw_dev->error_index]; /* Check for clobber */ host[0] = '\0'; if (tw_dev->host) { sprintf(host, " scsi%d:", tw_dev->host->host_no); if (event->retrieved == TW_AEN_NOT_RETRIEVED) tw_dev->aen_clobber = 1; } aen = le16_to_cpu(header->status_block.error); memset(event, 0, sizeof(TW_Event)); event->severity = TW_SEV_OUT(header->status_block.severity__reserved); do_gettimeofday(&time); local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60)); event->time_stamp_sec = local_time; event->aen_code = aen; event->retrieved = TW_AEN_NOT_RETRIEVED; event->sequence_id = tw_dev->error_sequence_id; tw_dev->error_sequence_id++; /* Check for embedded error string */ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]); header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0'; event->parameter_len = strlen(header->err_specific_desc); memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str)))); if (event->severity != TW_AEN_SEVERITY_DEBUG) printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n", host, twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)), TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str, header->err_specific_desc); else tw_dev->aen_count--; if ((tw_dev->error_index + 1) == TW_Q_LENGTH) tw_dev->event_queue_wrapped = 1; tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH; } /* End twa_aen_queue_event() */ /* This function will read the aen queue from the isr */ static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) { char cdb[TW_MAX_CDB_LEN]; TW_SG_Entry sglist[1]; TW_Command_Full *full_command_packet; int retval = 1; full_command_packet = tw_dev->command_packet_virt[request_id]; memset(full_command_packet, 0, sizeof(TW_Command_Full)); /* Initialize cdb */ memset(&cdb, 0, TW_MAX_CDB_LEN); cdb[0] = REQUEST_SENSE; /* opcode */ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ /* Initialize sglist */ memset(&sglist, 0, sizeof(TW_SG_Entry)); sglist[0].length = TW_SECTOR_SIZE; sglist[0].address = tw_dev->generic_buffer_phys[request_id]; /* Mark internal command */ tw_dev->srb[request_id] = NULL; /* Now post the command packet */ if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue"); goto out; } retval = 0; out: return retval; } /* End twa_aen_read_queue() */ /* This function will look up an AEN severity string */ static char *twa_aen_severity_lookup(unsigned char severity_code) { char *retval = NULL; if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) || (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG)) goto out; retval = twa_aen_severity_table[severity_code]; out: return retval; } /* End twa_aen_severity_lookup() */ /* This function will sync firmware time with the host time */ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) { u32 schedulertime; struct timeval utc; TW_Command_Full *full_command_packet; TW_Command *command_packet; TW_Param_Apache *param; u32 local_time; /* Fill out the command packet */ full_command_packet = tw_dev->command_packet_virt[request_id]; memset(full_command_packet, 0, sizeof(TW_Command_Full)); command_packet = &full_command_packet->command.oldcommand; command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM); command_packet->request_id = request_id; command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); command_packet->size = TW_COMMAND_SIZE; command_packet->byte6_offset.parameter_count = cpu_to_le16(1); /* Setup the param */ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; memset(param, 0, TW_SECTOR_SIZE); param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */ param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */ param->parameter_size_bytes = cpu_to_le16(4); /* Convert system time in UTC to local time seconds since last Sunday 12:00AM */ do_gettimeofday(&utc); local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60)); schedulertime = local_time - (3 * 86400); schedulertime = cpu_to_le32(schedulertime % 604800); memcpy(param->data, &schedulertime, sizeof(u32)); /* Mark internal command */ tw_dev->srb[request_id] = NULL; /* Now post the command */ twa_post_command_packet(tw_dev, request_id, 1); } /* End twa_aen_sync_time() */ /* This function will allocate memory and check if it is correctly aligned */ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) { int i; dma_addr_t dma_handle; unsigned long *cpu_addr; int retval = 1; cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle); if (!cpu_addr) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); goto out; } if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory"); pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle); goto out; } memset(cpu_addr, 0, size*TW_Q_LENGTH); for (i = 0; i < TW_Q_LENGTH; i++) { switch(which) { case 0: tw_dev->command_packet_phys[i] = dma_handle+(i*size); tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size)); break; case 1: tw_dev->generic_buffer_phys[i] = dma_handle+(i*size); tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); break; } } retval = 0; out: return retval; } /* End twa_allocate_memory() */ /* This function will check the status register for unexpected bits */ static int twa_check_bits(u32 status_reg_value) { int retval = 1; if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) goto out; if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0) goto out; retval = 0; out: return retval; } /* End twa_check_bits() */ /* This function will check the srl and decide if we are compatible */ static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed) { int retval = 1; unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0; unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0; u32 init_connect_result = 0; if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL, TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH, TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl, &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, &fw_on_ctlr_build, &init_connect_result)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL"); goto out; } tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl; tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch; tw_dev->tw_compat_info.working_build = fw_on_ctlr_build; /* Try base mode compatibility */ if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, TW_EXTENDED_INIT_CONNECT, TW_BASE_FW_SRL, TW_9000_ARCH_ID, TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD, &fw_on_ctlr_srl, &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, &fw_on_ctlr_build, &init_connect_result)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL"); goto out; } if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware"); } else { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver"); } goto out; } tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL; tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH; tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD; } /* Load rest of compatibility struct */ strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION)); tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL; tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH; tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD; tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl; tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch; tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build; retval = 0; out: return retval; } /* End twa_check_srl() */ /* This function handles ioctl for the character device */ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); long timeout; unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; dma_addr_t dma_handle; int request_id = 0; unsigned int sequence_id = 0; unsigned char event_index, start_index; TW_Ioctl_Driver_Command driver_command; TW_Ioctl_Buf_Apache *tw_ioctl; TW_Lock *tw_lock; TW_Command_Full *full_command_packet; TW_Compatibility_Info *tw_compat_info; TW_Event *event; struct timeval current_time; u32 current_time_ms; TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)]; int retval = TW_IOCTL_ERROR_OS_EFAULT; void __user *argp = (void __user *)arg; mutex_lock(&twa_chrdev_mutex); /* Only let one of these through at a time */ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { retval = TW_IOCTL_ERROR_OS_EINTR; goto out; } /* First copy down the driver command */ if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command))) goto out2; /* Check data buffer size */ if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) { retval = TW_IOCTL_ERROR_OS_EINVAL; goto out2; } /* Hardware can only do multiple of 512 byte transfers */ data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511; /* Now allocate ioctl buf memory */ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL); if (!cpu_addr) { retval = TW_IOCTL_ERROR_OS_ENOMEM; goto out2; } tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr; /* Now copy down the entire ioctl */ if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1)) goto out3; /* See which ioctl we are doing */ switch (cmd) { case TW_IOCTL_FIRMWARE_PASS_THROUGH: spin_lock_irqsave(tw_dev->host->host_lock, flags); twa_get_request_id(tw_dev, &request_id); /* Flag internal command */ tw_dev->srb[request_id] = NULL; /* Flag chrdev ioctl */ tw_dev->chrdev_request_id = request_id; full_command_packet = &tw_ioctl->firmware_command; /* Load request id and sglist for both command types */ twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted); memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full)); /* Now post the command packet to the controller */ twa_post_command_packet(tw_dev, request_id, 1); spin_unlock_irqrestore(tw_dev->host->host_lock, flags); timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ; /* Now wait for command to complete */ timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout); /* We timed out, and didn't get an interrupt */ if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) { /* Now we need to reset the board */ printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n", tw_dev->host->host_no, TW_DRIVER, 0x37, cmd); retval = TW_IOCTL_ERROR_OS_EIO; twa_reset_device_extension(tw_dev); goto out3; } /* Now copy in the command packet response */ memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full)); /* Now complete the io */ spin_lock_irqsave(tw_dev->host->host_lock, flags); tw_dev->posted_request_count--; tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); spin_unlock_irqrestore(tw_dev->host->host_lock, flags); break; case TW_IOCTL_GET_COMPATIBILITY_INFO: tw_ioctl->driver_command.status = 0; /* Copy compatibility struct into ioctl data buffer */ tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer; memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info)); break; case TW_IOCTL_GET_LAST_EVENT: if (tw_dev->event_queue_wrapped) { if (tw_dev->aen_clobber) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; tw_dev->aen_clobber = 0; } else tw_ioctl->driver_command.status = 0; } else { if (!tw_dev->error_index) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; break; } tw_ioctl->driver_command.status = 0; } event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH; memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; break; case TW_IOCTL_GET_FIRST_EVENT: if (tw_dev->event_queue_wrapped) { if (tw_dev->aen_clobber) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; tw_dev->aen_clobber = 0; } else tw_ioctl->driver_command.status = 0; event_index = tw_dev->error_index; } else { if (!tw_dev->error_index) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; break; } tw_ioctl->driver_command.status = 0; event_index = 0; } memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; break; case TW_IOCTL_GET_NEXT_EVENT: event = (TW_Event *)tw_ioctl->data_buffer; sequence_id = event->sequence_id; tw_ioctl->driver_command.status = 0; if (tw_dev->event_queue_wrapped) { if (tw_dev->aen_clobber) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; tw_dev->aen_clobber = 0; } start_index = tw_dev->error_index; } else { if (!tw_dev->error_index) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; break; } start_index = 0; } event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH; if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) { if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) tw_dev->aen_clobber = 1; tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; break; } memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; break; case TW_IOCTL_GET_PREVIOUS_EVENT: event = (TW_Event *)tw_ioctl->data_buffer; sequence_id = event->sequence_id; tw_ioctl->driver_command.status = 0; if (tw_dev->event_queue_wrapped) { if (tw_dev->aen_clobber) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; tw_dev->aen_clobber = 0; } start_index = tw_dev->error_index; } else { if (!tw_dev->error_index) { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; break; } start_index = 0; } event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH; if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) { if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) tw_dev->aen_clobber = 1; tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; break; } memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; break; case TW_IOCTL_GET_LOCK: tw_lock = (TW_Lock *)tw_ioctl->data_buffer; do_gettimeofday(&current_time); current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000); if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) { tw_dev->ioctl_sem_lock = 1; tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec; tw_ioctl->driver_command.status = 0; tw_lock->time_remaining_msec = tw_lock->timeout_msec; } else { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED; tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms; } break; case TW_IOCTL_RELEASE_LOCK: if (tw_dev->ioctl_sem_lock == 1) { tw_dev->ioctl_sem_lock = 0; tw_ioctl->driver_command.status = 0; } else { tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED; } break; default: retval = TW_IOCTL_ERROR_OS_ENOTTY; goto out3; } /* Now copy the entire response to userspace */ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0) retval = 0; out3: /* Now free ioctl buf memory */ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle); out2: mutex_unlock(&tw_dev->ioctl_lock); out: mutex_unlock(&twa_chrdev_mutex); return retval; } /* End twa_chrdev_ioctl() */ /* This function handles open for the character device */ /* NOTE that this function will race with remove. */ static int twa_chrdev_open(struct inode *inode, struct file *file) { unsigned int minor_number; int retval = TW_IOCTL_ERROR_OS_ENODEV; minor_number = iminor(inode); if (minor_number >= twa_device_extension_count) goto out; retval = 0; out: return retval; } /* End twa_chrdev_open() */ /* This function will print readable messages from status register errors */ static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value) { int retval = 1; /* Check for various error conditions and handle them appropriately */ if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing"); writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); } if (status_reg_value & TW_STATUS_PCI_ABORT) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing"); writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev)); pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT); } if (status_reg_value & TW_STATUS_QUEUE_ERROR) { if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) && (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) || (!test_bit(TW_IN_RESET, &tw_dev->flags))) TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing"); writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); } if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) { if (tw_dev->reset_print == 0) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing"); tw_dev->reset_print = 1; } goto out; } retval = 0; out: return retval; } /* End twa_decode_bits() */ /* This function will empty the response queue */ static int twa_empty_response_queue(TW_Device_Extension *tw_dev) { u32 status_reg_value, response_que_value; int count = 0, retval = 1; status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) { response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); count++; } if (count == TW_MAX_RESPONSE_DRAIN) goto out; retval = 0; out: return retval; } /* End twa_empty_response_queue() */ /* This function will clear the pchip/response queue on 9550SX */ static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev) { u32 response_que_value = 0; unsigned long before; int retval = 1; if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) { before = jiffies; while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) { response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev)); msleep(1); if (time_after(jiffies, before + HZ * 30)) goto out; } /* P-chip settle time */ msleep(500); retval = 0; } else retval = 0; out: return retval; } /* End twa_empty_response_queue_large() */ /* This function passes sense keys from firmware to scsi layer */ static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host) { TW_Command_Full *full_command_packet; unsigned short error; int retval = 1; char *error_str; full_command_packet = tw_dev->command_packet_virt[request_id]; /* Check for embedded error string */ error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]); /* Don't print error for Logical unit not supported during rollcall */ error = le16_to_cpu(full_command_packet->header.status_block.error); if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) { if (print_host) printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n", tw_dev->host->host_no, TW_MESSAGE_SOURCE_CONTROLLER_ERROR, full_command_packet->header.status_block.error, error_str[0] == '\0' ? twa_string_lookup(twa_error_table, full_command_packet->header.status_block.error) : error_str, full_command_packet->header.err_specific_desc); else printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n", TW_MESSAGE_SOURCE_CONTROLLER_ERROR, full_command_packet->header.status_block.error, error_str[0] == '\0' ? twa_string_lookup(twa_error_table, full_command_packet->header.status_block.error) : error_str, full_command_packet->header.err_specific_desc); } if (copy_sense) { memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH); tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1); retval = TW_ISR_DONT_RESULT; goto out; } retval = 0; out: return retval; } /* End twa_fill_sense() */ /* This function will free up device extension resources */ static void twa_free_device_extension(TW_Device_Extension *tw_dev) { if (tw_dev->command_packet_virt[0]) pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Command_Full)*TW_Q_LENGTH, tw_dev->command_packet_virt[0], tw_dev->command_packet_phys[0]); if (tw_dev->generic_buffer_virt[0]) pci_free_consistent(tw_dev->tw_pci_dev, TW_SECTOR_SIZE*TW_Q_LENGTH, tw_dev->generic_buffer_virt[0], tw_dev->generic_buffer_phys[0]); kfree(tw_dev->event_queue[0]); } /* End twa_free_device_extension() */ /* This function will free a request id */ static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id) { tw_dev->free_queue[tw_dev->free_tail] = request_id; tw_dev->state[request_id] = TW_S_FINISHED; tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; } /* End twa_free_request_id() */ /* This function will get parameter table entries from the firmware */ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) { TW_Command_Full *full_command_packet; TW_Command *command_packet; TW_Param_Apache *param; void *retval = NULL; /* Setup the command packet */ full_command_packet = tw_dev->command_packet_virt[request_id]; memset(full_command_packet, 0, sizeof(TW_Command_Full)); command_packet = &full_command_packet->command.oldcommand; command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); command_packet->size = TW_COMMAND_SIZE; command_packet->request_id = request_id; command_packet->byte6_offset.block_count = cpu_to_le16(1); /* Now setup the param */ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; memset(param, 0, TW_SECTOR_SIZE); param->table_id = cpu_to_le16(table_id | 0x8000); param->parameter_id = cpu_to_le16(parameter_id); param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes); command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); /* Post the command packet to the board */ twa_post_command_packet(tw_dev, request_id, 1); /* Poll for completion */ if (twa_poll_response(tw_dev, request_id, 30)) TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param") else retval = (void *)&(param->data[0]); tw_dev->posted_request_count--; tw_dev->state[request_id] = TW_S_INITIAL; return retval; } /* End twa_get_param() */ /* This function will assign an available request id */ static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id) { *request_id = tw_dev->free_queue[tw_dev->free_head]; tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH; tw_dev->state[*request_id] = TW_S_STARTED; } /* End twa_get_request_id() */ /* This function will send an initconnection command to controller */ static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, u32 set_features, unsigned short current_fw_srl, unsigned short current_fw_arch_id, unsigned short current_fw_branch, unsigned short current_fw_build, unsigned short *fw_on_ctlr_srl, unsigned short *fw_on_ctlr_arch_id, unsigned short *fw_on_ctlr_branch, unsigned short *fw_on_ctlr_build, u32 *init_connect_result) { TW_Command_Full *full_command_packet; TW_Initconnect *tw_initconnect; int request_id = 0, retval = 1; /* Initialize InitConnection command packet */ full_command_packet = tw_dev->command_packet_virt[request_id]; memset(full_command_packet, 0, sizeof(TW_Command_Full)); full_command_packet->header.header_desc.size_header = 128; tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand; tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION); tw_initconnect->request_id = request_id; tw_initconnect->message_credits = cpu_to_le16(message_credits); tw_initconnect->features = set_features; /* Turn on 64-bit sgl support if we need to */ tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0; tw_initconnect->features = cpu_to_le32(tw_initconnect->features); if (set_features & TW_EXTENDED_INIT_CONNECT) { tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED; tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl); tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id); tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch); tw_initconnect->fw_build = cpu_to_le16(current_fw_build); } else tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE; /* Send command packet to the board */ twa_post_command_packet(tw_dev, request_id, 1); /* Poll for completion */ if (twa_poll_response(tw_dev, request_id, 30)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection"); } else { if (set_features & TW_EXTENDED_INIT_CONNECT) { *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl); *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id); *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch); *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build); *init_connect_result = le32_to_cpu(tw_initconnect->result); } retval = 0; } tw_dev->posted_request_count--; tw_dev->state[request_id] = TW_S_INITIAL; return retval; } /* End twa_initconnection() */ /* This function will initialize the fields of a device extension */ static int twa_initialize_device_extension(TW_Device_Extension *tw_dev) { int i, retval = 1; /* Initialize command packet buffers */ if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed"); goto out; } /* Initialize generic buffer */ if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed"); goto out; } /* Allocate event info space */ tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL); if (!tw_dev->event_queue[0]) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed"); goto out; } for (i = 0; i < TW_Q_LENGTH; i++) { tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event))); tw_dev->free_queue[i] = i; tw_dev->state[i] = TW_S_INITIAL; } tw_dev->pending_head = TW_Q_START; tw_dev->pending_tail = TW_Q_START; tw_dev->free_head = TW_Q_START; tw_dev->free_tail = TW_Q_START; tw_dev->error_sequence_id = 1; tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; mutex_init(&tw_dev->ioctl_lock); init_waitqueue_head(&tw_dev->ioctl_wqueue); retval = 0; out: return retval; } /* End twa_initialize_device_extension() */ /* This function is the interrupt service routine */ static irqreturn_t twa_interrupt(int irq, void *dev_instance) { int request_id, error = 0; u32 status_reg_value; TW_Response_Queue response_que; TW_Command_Full *full_command_packet; TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; int handled = 0; /* Get the per adapter lock */ spin_lock(tw_dev->host->host_lock); /* Read the registers */ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); /* Check if this is our interrupt, otherwise bail */ if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT)) goto twa_interrupt_bail; handled = 1; /* If we are resetting, bail */ if (test_bit(TW_IN_RESET, &tw_dev->flags)) goto twa_interrupt_bail; /* Check controller for errors */ if (twa_check_bits(status_reg_value)) { if (twa_decode_bits(tw_dev, status_reg_value)) { TW_CLEAR_ALL_INTERRUPTS(tw_dev); goto twa_interrupt_bail; } } /* Handle host interrupt */ if (status_reg_value & TW_STATUS_HOST_INTERRUPT) TW_CLEAR_HOST_INTERRUPT(tw_dev); /* Handle attention interrupt */ if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) { TW_CLEAR_ATTENTION_INTERRUPT(tw_dev); if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) { twa_get_request_id(tw_dev, &request_id); error = twa_aen_read_queue(tw_dev, request_id); if (error) { tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); } } } /* Handle command interrupt */ if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) { TW_MASK_COMMAND_INTERRUPT(tw_dev); /* Drain as many pending commands as we can */ while (tw_dev->pending_request_count > 0) { request_id = tw_dev->pending_queue[tw_dev->pending_head]; if (tw_dev->state[request_id] != TW_S_PENDING) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending"); TW_CLEAR_ALL_INTERRUPTS(tw_dev); goto twa_interrupt_bail; } if (twa_post_command_packet(tw_dev, request_id, 1)==0) { tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH; tw_dev->pending_request_count--; } else { /* If we get here, we will continue re-posting on the next command interrupt */ break; } } } /* Handle response interrupt */ if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) { /* Drain the response queue from the board */ while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) { /* Complete the response */ response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); request_id = TW_RESID_OUT(response_que.response_id); full_command_packet = tw_dev->command_packet_virt[request_id]; error = 0; /* Check for command packet errors */ if (full_command_packet->command.newcommand.status != 0) { if (tw_dev->srb[request_id] != NULL) { error = twa_fill_sense(tw_dev, request_id, 1, 1); } else { /* Skip ioctl error prints */ if (request_id != tw_dev->chrdev_request_id) { error = twa_fill_sense(tw_dev, request_id, 0, 1); } } } /* Check for correct state */ if (tw_dev->state[request_id] != TW_S_POSTED) { if (tw_dev->srb[request_id] != NULL) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted"); TW_CLEAR_ALL_INTERRUPTS(tw_dev); goto twa_interrupt_bail; } } /* Check for internal command completion */ if (tw_dev->srb[request_id] == NULL) { if (request_id != tw_dev->chrdev_request_id) { if (twa_aen_complete(tw_dev, request_id)) TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt"); } else { tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; wake_up(&tw_dev->ioctl_wqueue); } } else { struct scsi_cmnd *cmd; cmd = tw_dev->srb[request_id]; twa_scsiop_execute_scsi_complete(tw_dev, request_id); /* If no error command was a success */ if (error == 0) { cmd->result = (DID_OK << 16); } /* If error, command failed */ if (error == 1) { /* Ask for a host reset */ cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1); } /* Report residual bytes for single sgl */ if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) { if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id])) scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length); } /* Now complete the io */ tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); tw_dev->posted_request_count--; tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); twa_unmap_scsi_data(tw_dev, request_id); } /* Check for valid status after each drain */ status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); if (twa_check_bits(status_reg_value)) { if (twa_decode_bits(tw_dev, status_reg_value)) { TW_CLEAR_ALL_INTERRUPTS(tw_dev); goto twa_interrupt_bail; } } } } twa_interrupt_bail: spin_unlock(tw_dev->host->host_lock); return IRQ_RETVAL(handled); } /* End twa_interrupt() */ /* This function will load the request id and various sgls for ioctls */ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length) { TW_Command *oldcommand; TW_Command_Apache *newcommand; TW_SG_Entry *sgl; unsigned int pae = 0; if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4)) pae = 1; if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { newcommand = &full_command_packet->command.newcommand; newcommand->request_id__lunl = cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id)); if (length) { newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); newcommand->sg_list[0].length = cpu_to_le32(length); } newcommand->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0)); } else { oldcommand = &full_command_packet->command.oldcommand; oldcommand->request_id = request_id; if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { /* Load the sg list */ if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA) sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae); else sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset)); sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1); sgl->length = cpu_to_le32(length); oldcommand->size += pae; } } } /* End twa_load_sgl() */ /* This function will perform a pci-dma mapping for a scatter gather list */ static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) { int use_sg; struct scsi_cmnd *cmd = tw_dev->srb[request_id]; use_sg = scsi_dma_map(cmd); if (!use_sg) return 0; else if (use_sg < 0) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list"); return 0; } cmd->SCp.phase = TW_PHASE_SGLIST; cmd->SCp.have_data_in = use_sg; return use_sg; } /* End twa_map_scsi_sg_data() */ /* This function will poll for a response interrupt of a request */ static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) { int retval = 1, found = 0, response_request_id; TW_Response_Queue response_queue; TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id]; if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) { response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); response_request_id = TW_RESID_OUT(response_queue.response_id); if (request_id != response_request_id) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response"); goto out; } if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { if (full_command_packet->command.newcommand.status != 0) { /* bad response */ twa_fill_sense(tw_dev, request_id, 0, 0); goto out; } found = 1; } else { if (full_command_packet->command.oldcommand.status != 0) { /* bad response */ twa_fill_sense(tw_dev, request_id, 0, 0); goto out; } found = 1; } } if (found) retval = 0; out: return retval; } /* End twa_poll_response() */ /* This function will poll the status register for a flag */ static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds) { u32 status_reg_value; unsigned long before; int retval = 1; status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); before = jiffies; if (twa_check_bits(status_reg_value)) twa_decode_bits(tw_dev, status_reg_value); while ((status_reg_value & flag) != flag) { status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); if (twa_check_bits(status_reg_value)) twa_decode_bits(tw_dev, status_reg_value); if (time_after(jiffies, before + HZ * seconds)) goto out; msleep(50); } retval = 0; out: return retval; } /* End twa_poll_status() */ /* This function will poll the status register for disappearance of a flag */ static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds) { u32 status_reg_value; unsigned long before; int retval = 1; status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); before = jiffies; if (twa_check_bits(status_reg_value)) twa_decode_bits(tw_dev, status_reg_value); while ((status_reg_value & flag) != 0) { status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); if (twa_check_bits(status_reg_value)) twa_decode_bits(tw_dev, status_reg_value); if (time_after(jiffies, before + HZ * seconds)) goto out; msleep(50); } retval = 0; out: return retval; } /* End twa_poll_status_gone() */ /* This function will attempt to post a command packet to the board */ static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal) { u32 status_reg_value; dma_addr_t command_que_value; int retval = 1; command_que_value = tw_dev->command_packet_phys[request_id]; /* For 9650SE write low 4 bytes first */ if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) || (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) { command_que_value += TW_COMMAND_OFFSET; writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev)); } status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); if (twa_check_bits(status_reg_value)) twa_decode_bits(tw_dev, status_reg_value); if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) { /* Only pend internal driver commands */ if (!internal) { retval = SCSI_MLQUEUE_HOST_BUSY; goto out; } /* Couldn't post the command packet, so we do it later */ if (tw_dev->state[request_id] != TW_S_PENDING) { tw_dev->state[request_id] = TW_S_PENDING; tw_dev->pending_request_count++; if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) { tw_dev->max_pending_request_count = tw_dev->pending_request_count; } tw_dev->pending_queue[tw_dev->pending_tail] = request_id; tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH; } TW_UNMASK_COMMAND_INTERRUPT(tw_dev); goto out; } else { if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) || (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) { /* Now write upper 4 bytes */ writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4); } else { if (sizeof(dma_addr_t) > 4) { command_que_value += TW_COMMAND_OFFSET; writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4); } else { writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); } } tw_dev->state[request_id] = TW_S_POSTED; tw_dev->posted_request_count++; if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) { tw_dev->max_posted_request_count = tw_dev->posted_request_count; } } retval = 0; out: return retval; } /* End twa_post_command_packet() */ /* This function will reset a device extension */ static int twa_reset_device_extension(TW_Device_Extension *tw_dev) { int i = 0; int retval = 1; unsigned long flags = 0; set_bit(TW_IN_RESET, &tw_dev->flags); TW_DISABLE_INTERRUPTS(tw_dev); TW_MASK_COMMAND_INTERRUPT(tw_dev); spin_lock_irqsave(tw_dev->host->host_lock, flags); /* Abort all requests that are in progress */ for (i = 0; i < TW_Q_LENGTH; i++) { if ((tw_dev->state[i] != TW_S_FINISHED) && (tw_dev->state[i] != TW_S_INITIAL) && (tw_dev->state[i] != TW_S_COMPLETED)) { if (tw_dev->srb[i]) { tw_dev->srb[i]->result = (DID_RESET << 16); tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); twa_unmap_scsi_data(tw_dev, i); } } } /* Reset queues and counts */ for (i = 0; i < TW_Q_LENGTH; i++) { tw_dev->free_queue[i] = i; tw_dev->state[i] = TW_S_INITIAL; } tw_dev->free_head = TW_Q_START; tw_dev->free_tail = TW_Q_START; tw_dev->posted_request_count = 0; tw_dev->pending_request_count = 0; tw_dev->pending_head = TW_Q_START; tw_dev->pending_tail = TW_Q_START; tw_dev->reset_print = 0; spin_unlock_irqrestore(tw_dev->host->host_lock, flags); if (twa_reset_sequence(tw_dev, 1)) goto out; TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); clear_bit(TW_IN_RESET, &tw_dev->flags); tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; retval = 0; out: return retval; } /* End twa_reset_device_extension() */ /* This function will reset a controller */ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset) { int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset; while (tries < TW_MAX_RESET_TRIES) { if (do_soft_reset) { TW_SOFT_RESET(tw_dev); /* Clear pchip/response queue on 9550SX */ if (twa_empty_response_queue_large(tw_dev)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence"); do_soft_reset = 1; tries++; continue; } } /* Make sure controller is in a good state */ if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence"); do_soft_reset = 1; tries++; continue; } /* Empty response queue */ if (twa_empty_response_queue(tw_dev)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence"); do_soft_reset = 1; tries++; continue; } flashed = 0; /* Check for compatibility/flash */ if (twa_check_srl(tw_dev, &flashed)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence"); do_soft_reset = 1; tries++; continue; } else { if (flashed) { tries++; continue; } } /* Drain the AEN queue */ if (twa_aen_drain_queue(tw_dev, soft_reset)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence"); do_soft_reset = 1; tries++; continue; } /* If we got here, controller is in a good state */ retval = 0; goto out; } out: return retval; } /* End twa_reset_sequence() */ /* This funciton returns unit geometry in cylinders/heads/sectors */ static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { int heads, sectors, cylinders; TW_Device_Extension *tw_dev; tw_dev = (TW_Device_Extension *)sdev->host->hostdata; if (capacity >= 0x200000) { heads = 255; sectors = 63; cylinders = sector_div(capacity, heads * sectors); } else { heads = 64; sectors = 32; cylinders = sector_div(capacity, heads * sectors); } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return 0; } /* End twa_scsi_biosparam() */ /* This is the new scsi eh reset function */ static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt) { TW_Device_Extension *tw_dev = NULL; int retval = FAILED; tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; tw_dev->num_resets++; sdev_printk(KERN_WARNING, SCpnt->device, "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n", TW_DRIVER, 0x2c, SCpnt->cmnd[0]); /* Make sure we are not issuing an ioctl or resetting from ioctl */ mutex_lock(&tw_dev->ioctl_lock); /* Now reset the card and some of the device extension data */ if (twa_reset_device_extension(tw_dev)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset"); goto out; } retval = SUCCESS; out: mutex_unlock(&tw_dev->ioctl_lock); return retval; } /* End twa_scsi_eh_reset() */ /* This is the main scsi queue function to handle scsi opcodes */ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { int request_id, retval; TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; /* If we are resetting due to timed out ioctl, report as busy */ if (test_bit(TW_IN_RESET, &tw_dev->flags)) { retval = SCSI_MLQUEUE_HOST_BUSY; goto out; } /* Check if this FW supports luns */ if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) { SCpnt->result = (DID_BAD_TARGET << 16); done(SCpnt); retval = 0; goto out; } /* Save done function into scsi_cmnd struct */ SCpnt->scsi_done = done; /* Get a free request id */ twa_get_request_id(tw_dev, &request_id); /* Save the scsi command for use by the ISR */ tw_dev->srb[request_id] = SCpnt; /* Initialize phase to zero */ SCpnt->SCp.phase = TW_PHASE_INITIAL; retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); switch (retval) { case SCSI_MLQUEUE_HOST_BUSY: twa_free_request_id(tw_dev, request_id); twa_unmap_scsi_data(tw_dev, request_id); break; case 1: tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); twa_unmap_scsi_data(tw_dev, request_id); SCpnt->result = (DID_ERROR << 16); done(SCpnt); retval = 0; } out: return retval; } /* End twa_scsi_queue() */ static DEF_SCSI_QCMD(twa_scsi_queue) /* This function hands scsi cdb's to the firmware */ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg) { TW_Command_Full *full_command_packet; TW_Command_Apache *command_packet; u32 num_sectors = 0x0; int i, sg_count; struct scsi_cmnd *srb = NULL; struct scatterlist *sglist = NULL, *sg; int retval = 1; if (tw_dev->srb[request_id]) { srb = tw_dev->srb[request_id]; if (scsi_sglist(srb)) sglist = scsi_sglist(srb); } /* Initialize command packet */ full_command_packet = tw_dev->command_packet_virt[request_id]; full_command_packet->header.header_desc.size_header = 128; full_command_packet->header.status_block.error = 0; full_command_packet->header.status_block.severity__reserved = 0; command_packet = &full_command_packet->command.newcommand; command_packet->status = 0; command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI); /* We forced 16 byte cdb use earlier */ if (!cdb) memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN); else memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN); if (srb) { command_packet->unit = srb->device->id; command_packet->request_id__lunl = cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id)); } else { command_packet->request_id__lunl = cpu_to_le16(TW_REQ_LUN_IN(0, request_id)); command_packet->unit = 0; } command_packet->sgl_offset = 16; if (!sglistarg) { /* Map sglist from scsi layer to cmd packet */ if (scsi_sg_count(srb)) { if ((scsi_sg_count(srb) == 1) && (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) { if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) scsi_sg_copy_to_buffer(srb, tw_dev->generic_buffer_virt[request_id], TW_SECTOR_SIZE); command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); } else { sg_count = twa_map_scsi_sg_data(tw_dev, request_id); if (sg_count == 0) goto out; scsi_for_each_sg(srb, sg, sg_count, i) { command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg)); command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg)); if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi"); goto out; } } } command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]))); } } else { /* Internal cdb post */ for (i = 0; i < use_sg; i++) { command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address); command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length); if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post"); goto out; } } command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg)); } if (srb) { if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6) num_sectors = (u32)srb->cmnd[4]; if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8); } /* Update sector statistic */ tw_dev->sector_count = num_sectors; if (tw_dev->sector_count > tw_dev->max_sector_count) tw_dev->max_sector_count = tw_dev->sector_count; /* Update SG statistics */ if (srb) { tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]); if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) tw_dev->max_sgl_entries = tw_dev->sgl_entries; } /* Now post the command to the board */ if (srb) { retval = twa_post_command_packet(tw_dev, request_id, 0); } else { twa_post_command_packet(tw_dev, request_id, 1); retval = 0; } out: return retval; } /* End twa_scsiop_execute_scsi() */ /* This function completes an execute scsi operation */ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id) { struct scsi_cmnd *cmd = tw_dev->srb[request_id]; if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH && (cmd->sc_data_direction == DMA_FROM_DEVICE || cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { if (scsi_sg_count(cmd) == 1) { void *buf = tw_dev->generic_buffer_virt[request_id]; scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE); } } } /* End twa_scsiop_execute_scsi_complete() */ /* This function tells the controller to shut down */ static void __twa_shutdown(TW_Device_Extension *tw_dev) { /* Disable interrupts */ TW_DISABLE_INTERRUPTS(tw_dev); /* Free up the IRQ */ free_irq(tw_dev->tw_pci_dev->irq, tw_dev); printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no); /* Tell the card we are shutting down */ if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed"); } else { printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n"); } /* Clear all interrupts just before exit */ TW_CLEAR_ALL_INTERRUPTS(tw_dev); } /* End __twa_shutdown() */ /* Wrapper for __twa_shutdown */ static void twa_shutdown(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; __twa_shutdown(tw_dev); } /* End twa_shutdown() */ /* This function will look up a string */ static char *twa_string_lookup(twa_message_type *table, unsigned int code) { int index; for (index = 0; ((code != table[index].code) && (table[index].text != (char *)0)); index++); return(table[index].text); } /* End twa_string_lookup() */ /* This function will perform a pci-dma unmap */ static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) { struct scsi_cmnd *cmd = tw_dev->srb[request_id]; if (cmd->SCp.phase == TW_PHASE_SGLIST) scsi_dma_unmap(cmd); } /* End twa_unmap_scsi_data() */ /* This function gets called when a disk is coming on-line */ static int twa_slave_configure(struct scsi_device *sdev) { /* Force 60 second timeout */ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); return 0; } /* End twa_slave_configure() */ /* scsi_host_template initializer */ static struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = "3ware 9000 Storage Controller", .queuecommand = twa_scsi_queue, .eh_host_reset_handler = twa_scsi_eh_reset, .bios_param = twa_scsi_biosparam, .change_queue_depth = twa_change_queue_depth, .can_queue = TW_Q_LENGTH-2, .slave_configure = twa_slave_configure, .this_id = -1, .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH, .max_sectors = TW_MAX_SECTORS, .cmd_per_lun = TW_MAX_CMDS_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = twa_host_attrs, .emulated = 1 }; /* This function will probe and initialize a card */ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) { struct Scsi_Host *host = NULL; TW_Device_Extension *tw_dev; unsigned long mem_addr, mem_len; int retval = -ENODEV; retval = pci_enable_device(pdev); if (retval) { TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device"); goto out_disable_device; } pci_set_master(pdev); pci_try_set_mwi(pdev); if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); retval = -ENODEV; goto out_disable_device; } host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); if (!host) { TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension"); retval = -ENOMEM; goto out_disable_device; } tw_dev = (TW_Device_Extension *)host->hostdata; /* Save values to device extension */ tw_dev->host = host; tw_dev->tw_pci_dev = pdev; if (twa_initialize_device_extension(tw_dev)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension"); goto out_free_device_extension; } /* Request IO regions */ retval = pci_request_regions(pdev, "3w-9xxx"); if (retval) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region"); goto out_free_device_extension; } if (pdev->device == PCI_DEVICE_ID_3WARE_9000) { mem_addr = pci_resource_start(pdev, 1); mem_len = pci_resource_len(pdev, 1); } else { mem_addr = pci_resource_start(pdev, 2); mem_len = pci_resource_len(pdev, 2); } /* Save base address */ tw_dev->base_addr = ioremap(mem_addr, mem_len); if (!tw_dev->base_addr) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); goto out_release_mem_region; } /* Disable interrupts on the card */ TW_DISABLE_INTERRUPTS(tw_dev); /* Initialize the card */ if (twa_reset_sequence(tw_dev, 0)) goto out_iounmap; /* Set host specific parameters */ if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) || (pdev->device == PCI_DEVICE_ID_3WARE_9690SA)) host->max_id = TW_MAX_UNITS_9650SE; else host->max_id = TW_MAX_UNITS; host->max_cmd_len = TW_MAX_CDB_LEN; /* Channels aren't supported by adapter */ host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl); host->max_channel = 0; /* Register the card with the kernel SCSI layer */ retval = scsi_add_host(host, &pdev->dev); if (retval) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed"); goto out_iounmap; } pci_set_drvdata(pdev, host); printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n", host->host_no, mem_addr, pdev->irq); printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n", host->host_no, (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE, TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH), (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE, TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH), le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE, TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH))); /* Try to enable MSI */ if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) && !pci_enable_msi(pdev)) set_bit(TW_USING_MSI, &tw_dev->flags); /* Now setup the interrupt handler */ retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); if (retval) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ"); goto out_remove_host; } twa_device_extension_list[twa_device_extension_count] = tw_dev; twa_device_extension_count++; /* Re-enable interrupts on the card */ TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); /* Finally, scan the host */ scsi_scan_host(host); if (twa_major == -1) { if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0) TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device"); } return 0; out_remove_host: if (test_bit(TW_USING_MSI, &tw_dev->flags)) pci_disable_msi(pdev); scsi_remove_host(host); out_iounmap: iounmap(tw_dev->base_addr); out_release_mem_region: pci_release_regions(pdev); out_free_device_extension: twa_free_device_extension(tw_dev); scsi_host_put(host); out_disable_device: pci_disable_device(pdev); return retval; } /* End twa_probe() */ /* This function is called to remove a device */ static void twa_remove(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; scsi_remove_host(tw_dev->host); /* Unregister character device */ if (twa_major >= 0) { unregister_chrdev(twa_major, "twa"); twa_major = -1; } /* Shutdown the card */ __twa_shutdown(tw_dev); /* Disable MSI if enabled */ if (test_bit(TW_USING_MSI, &tw_dev->flags)) pci_disable_msi(pdev); /* Free IO remapping */ iounmap(tw_dev->base_addr); /* Free up the mem region */ pci_release_regions(pdev); /* Free up device extension resources */ twa_free_device_extension(tw_dev); scsi_host_put(tw_dev->host); pci_disable_device(pdev); twa_device_extension_count--; } /* End twa_remove() */ #ifdef CONFIG_PM /* This function is called on PCI suspend */ static int twa_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *host = pci_get_drvdata(pdev); TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no); TW_DISABLE_INTERRUPTS(tw_dev); free_irq(tw_dev->tw_pci_dev->irq, tw_dev); if (test_bit(TW_USING_MSI, &tw_dev->flags)) pci_disable_msi(pdev); /* Tell the card we are shutting down */ if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend"); } else { printk(KERN_WARNING "3w-9xxx: Suspend complete.\n"); } TW_CLEAR_ALL_INTERRUPTS(tw_dev); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } /* End twa_suspend() */ /* This function is called on PCI resume */ static int twa_resume(struct pci_dev *pdev) { int retval = 0; struct Scsi_Host *host = pci_get_drvdata(pdev); TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no); pci_set_power_state(pdev, PCI_D0); pci_enable_wake(pdev, PCI_D0, 0); pci_restore_state(pdev); retval = pci_enable_device(pdev); if (retval) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume"); return retval; } pci_set_master(pdev); pci_try_set_mwi(pdev); if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); retval = -ENODEV; goto out_disable_device; } /* Initialize the card */ if (twa_reset_sequence(tw_dev, 0)) { retval = -ENODEV; goto out_disable_device; } /* Now setup the interrupt handler */ retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); if (retval) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume"); retval = -ENODEV; goto out_disable_device; } /* Now enable MSI if enabled */ if (test_bit(TW_USING_MSI, &tw_dev->flags)) pci_enable_msi(pdev); /* Re-enable interrupts on the card */ TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); printk(KERN_WARNING "3w-9xxx: Resume complete.\n"); return 0; out_disable_device: scsi_remove_host(host); pci_disable_device(pdev); return retval; } /* End twa_resume() */ #endif /* PCI Devices supported by this driver */ static struct pci_device_id twa_pci_tbl[] = { { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { } }; MODULE_DEVICE_TABLE(pci, twa_pci_tbl); /* pci_driver initializer */ static struct pci_driver twa_driver = { .name = "3w-9xxx", .id_table = twa_pci_tbl, .probe = twa_probe, .remove = twa_remove, #ifdef CONFIG_PM .suspend = twa_suspend, .resume = twa_resume, #endif .shutdown = twa_shutdown }; /* This function is called on driver initialization */ static int __init twa_init(void) { printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION); return pci_register_driver(&twa_driver); } /* End twa_init() */ /* This function is called on driver exit */ static void __exit twa_exit(void) { pci_unregister_driver(&twa_driver); } /* End twa_exit() */ module_init(twa_init); module_exit(twa_exit);
gpl-2.0
ibrahima/kernel_i9300
arch/arm/mach-exynos/dev-pd-exynos5.c
516
2968
/* linux/arch/arm/mach-exynos/dev-pd-exynos5.c * * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS5 - Power Domain support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <mach/regs-pmu5.h> #include <mach/regs-clock.h> #include <plat/pd.h> struct platform_device exynos5_device_pd[] = { [PD_MFC] = { .name = "samsung-pd", .id = PD_MFC, .dev = { .platform_data = &(struct samsung_pd_info) { .init = exynos_pd_init, .enable = exynos_pd_enable, .disable = exynos_pd_disable, .base = EXYNOS5_MFC_CONFIGURATION, .data = &(struct exynos_pd_data) { .clk_base = EXYNOS5_CLKGATE_IP_MFC, .clksrc_base = EXYNOS5_CLKSRC_TOP3, }, }, }, }, [PD_G3D] = { .name = "samsung-pd", .id = PD_G3D, .dev = { .platform_data = &(struct samsung_pd_info) { .init = exynos_pd_init, .enable = exynos_pd_enable, .disable = exynos_pd_disable, .base = EXYNOS5_G3D_CONFIGURATION, .data = &(struct exynos_pd_data) { .clk_base = EXYNOS5_CLKGATE_IP_G3D, .clksrc_base = EXYNOS5_CLKSRC_TOP3, }, }, }, }, [PD_GPS] = { .name = "samsung-pd", .id = PD_GPS, .dev = { .platform_data = &(struct samsung_pd_info) { .init = exynos_pd_init, .enable = exynos_pd_enable, .disable = exynos_pd_disable, .base = EXYNOS5_GPS_CONFIGURATION, .data = &(struct exynos_pd_data) { .clk_base = EXYNOS5_CLKGATE_IP_GPS, .clksrc_base = EXYNOS5_CLKSRC_TOP3, }, }, }, }, [PD_ISP] = { .name = "samsung-pd", .id = PD_ISP, .dev = { .platform_data = &(struct samsung_pd_info) { .init = exynos_pd_init, .enable = exynos_pd_enable, .disable = exynos_pd_disable, .base = EXYNOS5_ISP_CONFIGURATION, .data = &(struct exynos_pd_data) { .clk_base = NULL, .clksrc_base = EXYNOS5_CLKSRC_TOP3, }, }, }, }, [PD_GSCL] = { .name = "samsung-pd", .id = PD_GSCL, .dev = { .platform_data = &(struct samsung_pd_info) { .init = exynos_pd_init, .enable = exynos_pd_enable, .disable = exynos_pd_disable, .base = EXYNOS5_GSCL_CONFIGURATION, .data = &(struct exynos_pd_data) { .clk_base = EXYNOS5_CLKGATE_IP_GSCL, .clksrc_base = EXYNOS5_CLKSRC_TOP3, }, }, }, }, [PD_DISP1] = { .name = "samsung-pd", .id = PD_DISP1, .dev = { .platform_data = &(struct samsung_pd_info) { .init = exynos_pd_init, .enable = exynos_pd_enable, .disable = exynos_pd_disable, .base = EXYNOS5_DISP1_CONFIGURATION, .data = &(struct exynos_pd_data) { .clk_base = EXYNOS5_CLKGATE_IP_DISP1, .clksrc_base = EXYNOS5_CLKSRC_TOP3, }, }, }, }, };
gpl-2.0
Khaon/android_kernel_samsung_a3xelte
fs/f2fs/checkpoint.c
1540
20322
/* * fs/f2fs/checkpoint.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/fs.h> #include <linux/bio.h> #include <linux/mpage.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/f2fs_fs.h> #include <linux/pagevec.h> #include <linux/swap.h> #include "f2fs.h" #include "node.h" #include "segment.h" #include <trace/events/f2fs.h> static struct kmem_cache *orphan_entry_slab; static struct kmem_cache *inode_entry_slab; /* * We guarantee no failure on the returned page. */ struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) { struct address_space *mapping = sbi->meta_inode->i_mapping; struct page *page = NULL; repeat: page = grab_cache_page(mapping, index); if (!page) { cond_resched(); goto repeat; } /* We wait writeback only inside grab_meta_page() */ wait_on_page_writeback(page); SetPageUptodate(page); return page; } /* * We guarantee no failure on the returned page. */ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) { struct address_space *mapping = sbi->meta_inode->i_mapping; struct page *page; repeat: page = grab_cache_page(mapping, index); if (!page) { cond_resched(); goto repeat; } if (PageUptodate(page)) goto out; if (f2fs_readpage(sbi, page, index, READ_SYNC)) goto repeat; lock_page(page); if (page->mapping != mapping) { f2fs_put_page(page, 1); goto repeat; } out: mark_page_accessed(page); return page; } static int f2fs_write_meta_page(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); /* Should not write any meta pages, if any IO error was occurred */ if (wbc->for_reclaim || is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)) { dec_page_count(sbi, F2FS_DIRTY_META); wbc->pages_skipped++; set_page_dirty(page); return AOP_WRITEPAGE_ACTIVATE; } wait_on_page_writeback(page); write_meta_page(sbi, page); dec_page_count(sbi, F2FS_DIRTY_META); unlock_page(page); return 0; } static int f2fs_write_meta_pages(struct address_space *mapping, struct writeback_control *wbc) { struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); struct block_device *bdev = sbi->sb->s_bdev; long written; if (wbc->for_kupdate) return 0; if (get_pages(sbi, F2FS_DIRTY_META) == 0) return 0; /* if mounting is failed, skip writing node pages */ mutex_lock(&sbi->cp_mutex); written = sync_meta_pages(sbi, META, bio_get_nr_vecs(bdev)); mutex_unlock(&sbi->cp_mutex); wbc->nr_to_write -= written; return 0; } long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, long nr_to_write) { struct address_space *mapping = sbi->meta_inode->i_mapping; pgoff_t index = 0, end = LONG_MAX; struct pagevec pvec; long nwritten = 0; struct writeback_control wbc = { .for_reclaim = 0, }; pagevec_init(&pvec, 0); while (index <= end) { int i, nr_pages; nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; lock_page(page); BUG_ON(page->mapping != mapping); BUG_ON(!PageDirty(page)); clear_page_dirty_for_io(page); if (f2fs_write_meta_page(page, &wbc)) { unlock_page(page); break; } if (nwritten++ >= nr_to_write) break; } pagevec_release(&pvec); cond_resched(); } if (nwritten) f2fs_submit_bio(sbi, type, nr_to_write == LONG_MAX); return nwritten; } static int f2fs_set_meta_page_dirty(struct page *page) { struct address_space *mapping = page->mapping; struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); SetPageUptodate(page); if (!PageDirty(page)) { __set_page_dirty_nobuffers(page); inc_page_count(sbi, F2FS_DIRTY_META); return 1; } return 0; } const struct address_space_operations f2fs_meta_aops = { .writepage = f2fs_write_meta_page, .writepages = f2fs_write_meta_pages, .set_page_dirty = f2fs_set_meta_page_dirty, }; int check_orphan_space(struct f2fs_sb_info *sbi) { unsigned int max_orphans; int err = 0; /* * considering 512 blocks in a segment 5 blocks are needed for cp * and log segment summaries. Remaining blocks are used to keep * orphan entries with the limitation one reserved segment * for cp pack we can have max 1020*507 orphan entries */ max_orphans = (sbi->blocks_per_seg - 5) * F2FS_ORPHANS_PER_BLOCK; mutex_lock(&sbi->orphan_inode_mutex); if (sbi->n_orphans >= max_orphans) err = -ENOSPC; mutex_unlock(&sbi->orphan_inode_mutex); return err; } void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) { struct list_head *head, *this; struct orphan_inode_entry *new = NULL, *orphan = NULL; mutex_lock(&sbi->orphan_inode_mutex); head = &sbi->orphan_inode_list; list_for_each(this, head) { orphan = list_entry(this, struct orphan_inode_entry, list); if (orphan->ino == ino) goto out; if (orphan->ino > ino) break; orphan = NULL; } retry: new = kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC); if (!new) { cond_resched(); goto retry; } new->ino = ino; /* add new_oentry into list which is sorted by inode number */ if (orphan) list_add(&new->list, this->prev); else list_add_tail(&new->list, head); sbi->n_orphans++; out: mutex_unlock(&sbi->orphan_inode_mutex); } void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) { struct list_head *this, *next, *head; struct orphan_inode_entry *orphan; mutex_lock(&sbi->orphan_inode_mutex); head = &sbi->orphan_inode_list; list_for_each_safe(this, next, head) { orphan = list_entry(this, struct orphan_inode_entry, list); if (orphan->ino == ino) { list_del(&orphan->list); kmem_cache_free(orphan_entry_slab, orphan); sbi->n_orphans--; break; } } mutex_unlock(&sbi->orphan_inode_mutex); } static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) { struct inode *inode = f2fs_iget(sbi->sb, ino); BUG_ON(IS_ERR(inode)); clear_nlink(inode); /* truncate all the data during iput */ iput(inode); } int recover_orphan_inodes(struct f2fs_sb_info *sbi) { block_t start_blk, orphan_blkaddr, i, j; if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG)) return 0; sbi->por_doing = 1; start_blk = __start_cp_addr(sbi) + 1; orphan_blkaddr = __start_sum_addr(sbi) - 1; for (i = 0; i < orphan_blkaddr; i++) { struct page *page = get_meta_page(sbi, start_blk + i); struct f2fs_orphan_block *orphan_blk; orphan_blk = (struct f2fs_orphan_block *)page_address(page); for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) { nid_t ino = le32_to_cpu(orphan_blk->ino[j]); recover_orphan_inode(sbi, ino); } f2fs_put_page(page, 1); } /* clear Orphan Flag */ clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG); sbi->por_doing = 0; return 0; } static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) { struct list_head *head, *this, *next; struct f2fs_orphan_block *orphan_blk = NULL; struct page *page = NULL; unsigned int nentries = 0; unsigned short index = 1; unsigned short orphan_blocks; orphan_blocks = (unsigned short)((sbi->n_orphans + (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK); mutex_lock(&sbi->orphan_inode_mutex); head = &sbi->orphan_inode_list; /* loop for each orphan inode entry and write them in Jornal block */ list_for_each_safe(this, next, head) { struct orphan_inode_entry *orphan; orphan = list_entry(this, struct orphan_inode_entry, list); if (nentries == F2FS_ORPHANS_PER_BLOCK) { /* * an orphan block is full of 1020 entries, * then we need to flush current orphan blocks * and bring another one in memory */ orphan_blk->blk_addr = cpu_to_le16(index); orphan_blk->blk_count = cpu_to_le16(orphan_blocks); orphan_blk->entry_count = cpu_to_le32(nentries); set_page_dirty(page); f2fs_put_page(page, 1); index++; start_blk++; nentries = 0; page = NULL; } if (page) goto page_exist; page = grab_meta_page(sbi, start_blk); orphan_blk = (struct f2fs_orphan_block *)page_address(page); memset(orphan_blk, 0, sizeof(*orphan_blk)); page_exist: orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino); } if (!page) goto end; orphan_blk->blk_addr = cpu_to_le16(index); orphan_blk->blk_count = cpu_to_le16(orphan_blocks); orphan_blk->entry_count = cpu_to_le32(nentries); set_page_dirty(page); f2fs_put_page(page, 1); end: mutex_unlock(&sbi->orphan_inode_mutex); } static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr, unsigned long long *version) { struct page *cp_page_1, *cp_page_2 = NULL; unsigned long blk_size = sbi->blocksize; struct f2fs_checkpoint *cp_block; unsigned long long cur_version = 0, pre_version = 0; unsigned int crc = 0; size_t crc_offset; /* Read the 1st cp block in this CP pack */ cp_page_1 = get_meta_page(sbi, cp_addr); /* get the version number */ cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1); crc_offset = le32_to_cpu(cp_block->checksum_offset); if (crc_offset >= blk_size) goto invalid_cp1; crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); if (!f2fs_crc_valid(crc, cp_block, crc_offset)) goto invalid_cp1; pre_version = le64_to_cpu(cp_block->checkpoint_ver); /* Read the 2nd cp block in this CP pack */ cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1; cp_page_2 = get_meta_page(sbi, cp_addr); cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2); crc_offset = le32_to_cpu(cp_block->checksum_offset); if (crc_offset >= blk_size) goto invalid_cp2; crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); if (!f2fs_crc_valid(crc, cp_block, crc_offset)) goto invalid_cp2; cur_version = le64_to_cpu(cp_block->checkpoint_ver); if (cur_version == pre_version) { *version = cur_version; f2fs_put_page(cp_page_2, 1); return cp_page_1; } invalid_cp2: f2fs_put_page(cp_page_2, 1); invalid_cp1: f2fs_put_page(cp_page_1, 1); return NULL; } int get_valid_checkpoint(struct f2fs_sb_info *sbi) { struct f2fs_checkpoint *cp_block; struct f2fs_super_block *fsb = sbi->raw_super; struct page *cp1, *cp2, *cur_page; unsigned long blk_size = sbi->blocksize; unsigned long long cp1_version = 0, cp2_version = 0; unsigned long long cp_start_blk_no; sbi->ckpt = kzalloc(blk_size, GFP_KERNEL); if (!sbi->ckpt) return -ENOMEM; /* * Finding out valid cp block involves read both * sets( cp pack1 and cp pack 2) */ cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr); cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); /* The second checkpoint pack should start at the next segment */ cp_start_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg); cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version); if (cp1 && cp2) { if (ver_after(cp2_version, cp1_version)) cur_page = cp2; else cur_page = cp1; } else if (cp1) { cur_page = cp1; } else if (cp2) { cur_page = cp2; } else { goto fail_no_cp; } cp_block = (struct f2fs_checkpoint *)page_address(cur_page); memcpy(sbi->ckpt, cp_block, blk_size); f2fs_put_page(cp1, 1); f2fs_put_page(cp2, 1); return 0; fail_no_cp: kfree(sbi->ckpt); return -EINVAL; } void set_dirty_dir_page(struct inode *inode, struct page *page) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct list_head *head = &sbi->dir_inode_list; struct dir_inode_entry *new; struct list_head *this; if (!S_ISDIR(inode->i_mode)) return; retry: new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS); if (!new) { cond_resched(); goto retry; } new->inode = inode; INIT_LIST_HEAD(&new->list); spin_lock(&sbi->dir_inode_lock); list_for_each(this, head) { struct dir_inode_entry *entry; entry = list_entry(this, struct dir_inode_entry, list); if (entry->inode == inode) { kmem_cache_free(inode_entry_slab, new); goto out; } } list_add_tail(&new->list, head); sbi->n_dirty_dirs++; BUG_ON(!S_ISDIR(inode->i_mode)); out: inc_page_count(sbi, F2FS_DIRTY_DENTS); inode_inc_dirty_dents(inode); SetPagePrivate(page); spin_unlock(&sbi->dir_inode_lock); } void remove_dirty_dir_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct list_head *head = &sbi->dir_inode_list; struct list_head *this; if (!S_ISDIR(inode->i_mode)) return; spin_lock(&sbi->dir_inode_lock); if (atomic_read(&F2FS_I(inode)->dirty_dents)) goto out; list_for_each(this, head) { struct dir_inode_entry *entry; entry = list_entry(this, struct dir_inode_entry, list); if (entry->inode == inode) { list_del(&entry->list); kmem_cache_free(inode_entry_slab, entry); sbi->n_dirty_dirs--; break; } } out: spin_unlock(&sbi->dir_inode_lock); } void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi) { struct list_head *head = &sbi->dir_inode_list; struct dir_inode_entry *entry; struct inode *inode; retry: spin_lock(&sbi->dir_inode_lock); if (list_empty(head)) { spin_unlock(&sbi->dir_inode_lock); return; } entry = list_entry(head->next, struct dir_inode_entry, list); inode = igrab(entry->inode); spin_unlock(&sbi->dir_inode_lock); if (inode) { filemap_flush(inode->i_mapping); iput(inode); } else { /* * We should submit bio, since it exists several * wribacking dentry pages in the freeing inode. */ f2fs_submit_bio(sbi, DATA, true); } goto retry; } /* * Freeze all the FS-operations for checkpoint. */ static void block_operations(struct f2fs_sb_info *sbi) { struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, .for_reclaim = 0, }; struct blk_plug plug; blk_start_plug(&plug); retry_flush_dents: mutex_lock_all(sbi); /* write all the dirty dentry pages */ if (get_pages(sbi, F2FS_DIRTY_DENTS)) { mutex_unlock_all(sbi); sync_dirty_dir_inodes(sbi); goto retry_flush_dents; } /* * POR: we should ensure that there is no dirty node pages * until finishing nat/sit flush. */ retry_flush_nodes: mutex_lock(&sbi->node_write); if (get_pages(sbi, F2FS_DIRTY_NODES)) { mutex_unlock(&sbi->node_write); sync_node_pages(sbi, 0, &wbc); goto retry_flush_nodes; } blk_finish_plug(&plug); } static void unblock_operations(struct f2fs_sb_info *sbi) { mutex_unlock(&sbi->node_write); mutex_unlock_all(sbi); } static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); nid_t last_nid = 0; block_t start_blk; struct page *cp_page; unsigned int data_sum_blocks, orphan_blocks; unsigned int crc32 = 0; void *kaddr; int i; /* Flush all the NAT/SIT pages */ while (get_pages(sbi, F2FS_DIRTY_META)) sync_meta_pages(sbi, META, LONG_MAX); next_free_nid(sbi, &last_nid); /* * modify checkpoint * version number is already updated */ ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi)); ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); for (i = 0; i < 3; i++) { ckpt->cur_node_segno[i] = cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); ckpt->cur_node_blkoff[i] = cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_NODE)); ckpt->alloc_type[i + CURSEG_HOT_NODE] = curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); } for (i = 0; i < 3; i++) { ckpt->cur_data_segno[i] = cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); ckpt->cur_data_blkoff[i] = cpu_to_le16(curseg_blkoff(sbi, i + CURSEG_HOT_DATA)); ckpt->alloc_type[i + CURSEG_HOT_DATA] = curseg_alloc_type(sbi, i + CURSEG_HOT_DATA); } ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); ckpt->next_free_nid = cpu_to_le32(last_nid); /* 2 cp + n data seg summary + orphan inode blocks */ data_sum_blocks = npages_for_summary_flush(sbi); if (data_sum_blocks < 3) set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); else clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1) / F2FS_ORPHANS_PER_BLOCK; ckpt->cp_pack_start_sum = cpu_to_le32(1 + orphan_blocks); if (is_umount) { set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); ckpt->cp_pack_total_block_count = cpu_to_le32(2 + data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE); } else { clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); ckpt->cp_pack_total_block_count = cpu_to_le32(2 + data_sum_blocks + orphan_blocks); } if (sbi->n_orphans) set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); else clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); /* update SIT/NAT bitmap */ get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset)); *(__le32 *)((unsigned char *)ckpt + le32_to_cpu(ckpt->checksum_offset)) = cpu_to_le32(crc32); start_blk = __start_cp_addr(sbi); /* write out checkpoint buffer at block 0 */ cp_page = grab_meta_page(sbi, start_blk++); kaddr = page_address(cp_page); memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); set_page_dirty(cp_page); f2fs_put_page(cp_page, 1); if (sbi->n_orphans) { write_orphan_inodes(sbi, start_blk); start_blk += orphan_blocks; } write_data_summaries(sbi, start_blk); start_blk += data_sum_blocks; if (is_umount) { write_node_summaries(sbi, start_blk); start_blk += NR_CURSEG_NODE_TYPE; } /* writeout checkpoint block */ cp_page = grab_meta_page(sbi, start_blk); kaddr = page_address(cp_page); memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); set_page_dirty(cp_page); f2fs_put_page(cp_page, 1); /* wait for previous submitted node/meta pages writeback */ while (get_pages(sbi, F2FS_WRITEBACK)) congestion_wait(BLK_RW_ASYNC, HZ / 50); filemap_fdatawait_range(sbi->node_inode->i_mapping, 0, LONG_MAX); filemap_fdatawait_range(sbi->meta_inode->i_mapping, 0, LONG_MAX); /* update user_block_counts */ sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->alloc_valid_block_count = 0; /* Here, we only have one bio having CP pack */ sync_meta_pages(sbi, META_FLUSH, LONG_MAX); if (!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) { clear_prefree_segments(sbi); F2FS_RESET_SB_DIRT(sbi); } } /* * We guarantee that this checkpoint procedure should not fail. */ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); unsigned long long ckpt_ver; trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops"); mutex_lock(&sbi->cp_mutex); block_operations(sbi); trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops"); f2fs_submit_bio(sbi, DATA, true); f2fs_submit_bio(sbi, NODE, true); f2fs_submit_bio(sbi, META, true); /* * update checkpoint pack index * Increase the version number so that * SIT entries and seg summaries are written at correct place */ ckpt_ver = le64_to_cpu(ckpt->checkpoint_ver); ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver); /* write cached NAT/SIT entries to NAT/SIT area */ flush_nat_entries(sbi); flush_sit_entries(sbi); /* unlock all the fs_lock[] in do_checkpoint() */ do_checkpoint(sbi, is_umount); unblock_operations(sbi); mutex_unlock(&sbi->cp_mutex); trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint"); } void init_orphan_info(struct f2fs_sb_info *sbi) { mutex_init(&sbi->orphan_inode_mutex); INIT_LIST_HEAD(&sbi->orphan_inode_list); sbi->n_orphans = 0; } int __init create_checkpoint_caches(void) { orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry", sizeof(struct orphan_inode_entry), NULL); if (unlikely(!orphan_entry_slab)) return -ENOMEM; inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry", sizeof(struct dir_inode_entry), NULL); if (unlikely(!inode_entry_slab)) { kmem_cache_destroy(orphan_entry_slab); return -ENOMEM; } return 0; } void destroy_checkpoint_caches(void) { kmem_cache_destroy(orphan_entry_slab); kmem_cache_destroy(inode_entry_slab); }
gpl-2.0
choco81/0E_kernel
drivers/staging/iio/trigger/iio-trig-gpio.c
2308
4158
/* * Industrial I/O - gpio based trigger support * * Copyright (c) 2008 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * Currently this is more of a functioning proof of concept that a fully * fledged trigger driver. * * TODO: * * Add board config elements to allow specification of startup settings. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/slab.h> #include "../iio.h" #include "../trigger.h" static LIST_HEAD(iio_gpio_trigger_list); static DEFINE_MUTEX(iio_gpio_trigger_list_lock); struct iio_gpio_trigger_info { struct mutex in_use; unsigned int irq; }; /* * Need to reference count these triggers and only enable gpio interrupts * as appropriate. */ /* So what functionality do we want in here?... */ /* set high / low as interrupt type? */ static irqreturn_t iio_gpio_trigger_poll(int irq, void *private) { /* Timestamp not currently provided */ iio_trigger_poll(private, 0); return IRQ_HANDLED; } static int iio_gpio_trigger_probe(struct platform_device *pdev) { struct iio_gpio_trigger_info *trig_info; struct iio_trigger *trig, *trig2; unsigned long irqflags; struct resource *irq_res; int irq, ret = 0, irq_res_cnt = 0; do { irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, irq_res_cnt); if (irq_res == NULL) { if (irq_res_cnt == 0) dev_err(&pdev->dev, "No GPIO IRQs specified"); break; } irqflags = (irq_res->flags & IRQF_TRIGGER_MASK) | IRQF_SHARED; for (irq = irq_res->start; irq <= irq_res->end; irq++) { trig = iio_allocate_trigger("irqtrig%d", irq); if (!trig) { ret = -ENOMEM; goto error_free_completed_registrations; } trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL); if (!trig_info) { ret = -ENOMEM; goto error_put_trigger; } trig->private_data = trig_info; trig_info->irq = irq; trig->owner = THIS_MODULE; ret = request_irq(irq, iio_gpio_trigger_poll, irqflags, trig->name, trig); if (ret) { dev_err(&pdev->dev, "request IRQ-%d failed", irq); goto error_free_trig_info; } ret = iio_trigger_register(trig); if (ret) goto error_release_irq; list_add_tail(&trig->alloc_list, &iio_gpio_trigger_list); } irq_res_cnt++; } while (irq_res != NULL); return 0; /* First clean up the partly allocated trigger */ error_release_irq: free_irq(irq, trig); error_free_trig_info: kfree(trig_info); error_put_trigger: iio_put_trigger(trig); error_free_completed_registrations: /* The rest should have been added to the iio_gpio_trigger_list */ list_for_each_entry_safe(trig, trig2, &iio_gpio_trigger_list, alloc_list) { trig_info = trig->private_data; free_irq(gpio_to_irq(trig_info->irq), trig); kfree(trig_info); iio_trigger_unregister(trig); } return ret; } static int iio_gpio_trigger_remove(struct platform_device *pdev) { struct iio_trigger *trig, *trig2; struct iio_gpio_trigger_info *trig_info; mutex_lock(&iio_gpio_trigger_list_lock); list_for_each_entry_safe(trig, trig2, &iio_gpio_trigger_list, alloc_list) { trig_info = trig->private_data; iio_trigger_unregister(trig); free_irq(trig_info->irq, trig); kfree(trig_info); iio_put_trigger(trig); } mutex_unlock(&iio_gpio_trigger_list_lock); return 0; } static struct platform_driver iio_gpio_trigger_driver = { .probe = iio_gpio_trigger_probe, .remove = iio_gpio_trigger_remove, .driver = { .name = "iio_gpio_trigger", .owner = THIS_MODULE, }, }; static int __init iio_gpio_trig_init(void) { return platform_driver_register(&iio_gpio_trigger_driver); } module_init(iio_gpio_trig_init); static void __exit iio_gpio_trig_exit(void) { platform_driver_unregister(&iio_gpio_trigger_driver); } module_exit(iio_gpio_trig_exit); MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>"); MODULE_DESCRIPTION("Example gpio trigger for the iio subsystem"); MODULE_LICENSE("GPL v2");
gpl-2.0
virt2real/linux-davinci
drivers/usb/serial/hp4x.c
2308
1314
/* * HP4x Calculators Serial USB driver * * Copyright (C) 2005 Arthur Huillet (ahuillet@users.sf.net) * Copyright (C) 2001-2005 Greg Kroah-Hartman (greg@kroah.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * See Documentation/usb/usb-serial.txt for more information on using this * driver */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define DRIVER_DESC "HP4x (48/49) Generic Serial driver" #define HP_VENDOR_ID 0x03f0 #define HP49GP_PRODUCT_ID 0x0121 static const struct usb_device_id id_table[] = { { USB_DEVICE(HP_VENDOR_ID, HP49GP_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver hp49gp_device = { .driver = { .owner = THIS_MODULE, .name = "hp4X", }, .id_table = id_table, .num_ports = 1, }; static struct usb_serial_driver * const serial_drivers[] = { &hp49gp_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
chhapil/Dorimanx-SG2-I9100-Kernel
arch/powerpc/kernel/dma-swiotlb.c
2820
3346
/* * Contains routines needed to support swiotlb for ppc. * * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. * Author: Becky Bruce * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/dma-mapping.h> #include <linux/memblock.h> #include <linux/pfn.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <asm/machdep.h> #include <asm/swiotlb.h> #include <asm/dma.h> unsigned int ppc_swiotlb_enable; static u64 swiotlb_powerpc_get_required(struct device *dev) { u64 end, mask, max_direct_dma_addr = dev->archdata.max_direct_dma_addr; end = memblock_end_of_DRAM(); if (max_direct_dma_addr && end > max_direct_dma_addr) end = max_direct_dma_addr; end += get_dma_offset(dev); mask = 1ULL << (fls64(end) - 1); mask += mask - 1; return mask; } /* * At the moment, all platforms that use this code only require * swiotlb to be used if we're operating on HIGHMEM. Since * we don't ever call anything other than map_sg, unmap_sg, * map_page, and unmap_page on highmem, use normal dma_ops * for everything else. */ struct dma_map_ops swiotlb_dma_ops = { .alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, .map_sg = swiotlb_map_sg_attrs, .unmap_sg = swiotlb_unmap_sg_attrs, .dma_supported = swiotlb_dma_supported, .map_page = swiotlb_map_page, .unmap_page = swiotlb_unmap_page, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_device = swiotlb_sync_single_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, .mapping_error = swiotlb_dma_mapping_error, .get_required_mask = swiotlb_powerpc_get_required, }; void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev) { struct pci_controller *hose; struct dev_archdata *sd; hose = pci_bus_to_host(pdev->bus); sd = &pdev->dev.archdata; sd->max_direct_dma_addr = hose->dma_window_base_cur + hose->dma_window_size; } static int ppc_swiotlb_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct dev_archdata *sd; /* We are only intereted in device addition */ if (action != BUS_NOTIFY_ADD_DEVICE) return 0; sd = &dev->archdata; sd->max_direct_dma_addr = 0; /* May need to bounce if the device can't address all of DRAM */ if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM()) set_dma_ops(dev, &swiotlb_dma_ops); return NOTIFY_DONE; } static struct notifier_block ppc_swiotlb_plat_bus_notifier = { .notifier_call = ppc_swiotlb_bus_notify, .priority = 0, }; int __init swiotlb_setup_bus_notifier(void) { bus_register_notifier(&platform_bus_type, &ppc_swiotlb_plat_bus_notifier); return 0; } void swiotlb_detect_4g(void) { if ((memblock_end_of_DRAM() - 1) > 0xffffffff) ppc_swiotlb_enable = 1; } static int __init swiotlb_late_init(void) { if (ppc_swiotlb_enable) { swiotlb_print_info(); set_pci_dma_ops(&swiotlb_dma_ops); ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; } else { swiotlb_free(); } return 0; } subsys_initcall(swiotlb_late_init);
gpl-2.0
invisiblek/android_kernel_lge_dory
drivers/hid/hid-sjoy.c
3332
4980
/* * Force feedback support for SmartJoy PLUS PS2->USB adapter * * Copyright (c) 2009 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * Based of hid-pl.c and hid-gaff.c * Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com> * Copyright (c) 2008 Lukasz Lubojanski <lukasz@lubojanski.info> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define DEBUG */ #include <linux/input.h> #include <linux/slab.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #ifdef CONFIG_SMARTJOYPLUS_FF struct sjoyff_device { struct hid_report *report; }; static int hid_sjoyff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct sjoyff_device *sjoyff = data; u32 left, right; left = effect->u.rumble.strong_magnitude; right = effect->u.rumble.weak_magnitude; dev_dbg(&dev->dev, "called with 0x%08x 0x%08x\n", left, right); left = left * 0xff / 0xffff; right = (right != 0); /* on/off only */ sjoyff->report->field[0]->value[1] = right; sjoyff->report->field[0]->value[2] = left; dev_dbg(&dev->dev, "running with 0x%02x 0x%02x\n", left, right); hid_hw_request(hid, sjoyff->report, HID_REQ_SET_REPORT); return 0; } static int sjoyff_init(struct hid_device *hid) { struct sjoyff_device *sjoyff; struct hid_report *report; struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct list_head *report_ptr = report_list; struct input_dev *dev; int error; if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; } list_for_each_entry(hidinput, &hid->inputs, list) { report_ptr = report_ptr->next; if (report_ptr == report_list) { hid_err(hid, "required output report is missing\n"); return -ENODEV; } report = list_entry(report_ptr, struct hid_report, list); if (report->maxfield < 1) { hid_err(hid, "no fields in the report\n"); return -ENODEV; } if (report->field[0]->report_count < 3) { hid_err(hid, "not enough values in the field\n"); return -ENODEV; } sjoyff = kzalloc(sizeof(struct sjoyff_device), GFP_KERNEL); if (!sjoyff) return -ENOMEM; dev = hidinput->input; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, sjoyff, hid_sjoyff_play); if (error) { kfree(sjoyff); return error; } sjoyff->report = report; sjoyff->report->field[0]->value[0] = 0x01; sjoyff->report->field[0]->value[1] = 0x00; sjoyff->report->field[0]->value[2] = 0x00; hid_hw_request(hid, sjoyff->report, HID_REQ_SET_REPORT); } hid_info(hid, "Force feedback for SmartJoy PLUS PS2/USB adapter\n"); return 0; } #else static inline int sjoyff_init(struct hid_device *hid) { return 0; } #endif static int sjoy_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; hdev->quirks |= id->driver_data; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } sjoyff_init(hdev); return 0; err: return ret; } static const struct hid_device_id sjoy_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO), .driver_data = HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO), .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO), .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD), .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { } }; MODULE_DEVICE_TABLE(hid, sjoy_devices); static struct hid_driver sjoy_driver = { .name = "smartjoyplus", .id_table = sjoy_devices, .probe = sjoy_probe, }; module_hid_driver(sjoy_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jussi Kivilinna");
gpl-2.0
AndroPlus-org/android_kernel_sony_msm8994
arch/ia64/sn/kernel/sn2/prominfo_proc.c
4356
6236
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1999,2001-2004, 2006 Silicon Graphics, Inc. All Rights Reserved. * * Module to export the system's Firmware Interface Tables, including * PROM revision numbers and banners, in /proc */ #include <linux/module.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/nodemask.h> #include <asm/io.h> #include <asm/sn/sn_sal.h> #include <asm/sn/sn_cpuid.h> #include <asm/sn/addrs.h> MODULE_DESCRIPTION("PROM version reporting for /proc"); MODULE_AUTHOR("Chad Talbott"); MODULE_LICENSE("GPL"); /* Standard Intel FIT entry types */ #define FIT_ENTRY_FIT_HEADER 0x00 /* FIT header entry */ #define FIT_ENTRY_PAL_B 0x01 /* PAL_B entry */ /* Entries 0x02 through 0x0D reserved by Intel */ #define FIT_ENTRY_PAL_A_PROC 0x0E /* Processor-specific PAL_A entry */ #define FIT_ENTRY_PAL_A 0x0F /* PAL_A entry, same as... */ #define FIT_ENTRY_PAL_A_GEN 0x0F /* ...Generic PAL_A entry */ #define FIT_ENTRY_UNUSED 0x7F /* Unused (reserved by Intel?) */ /* OEM-defined entries range from 0x10 to 0x7E. */ #define FIT_ENTRY_SAL_A 0x10 /* SAL_A entry */ #define FIT_ENTRY_SAL_B 0x11 /* SAL_B entry */ #define FIT_ENTRY_SALRUNTIME 0x12 /* SAL runtime entry */ #define FIT_ENTRY_EFI 0x1F /* EFI entry */ #define FIT_ENTRY_FPSWA 0x20 /* embedded fpswa entry */ #define FIT_ENTRY_VMLINUX 0x21 /* embedded vmlinux entry */ #define FIT_MAJOR_SHIFT (32 + 8) #define FIT_MAJOR_MASK ((1 << 8) - 1) #define FIT_MINOR_SHIFT 32 #define FIT_MINOR_MASK ((1 << 8) - 1) #define FIT_MAJOR(q) \ ((unsigned) ((q) >> FIT_MAJOR_SHIFT) & FIT_MAJOR_MASK) #define FIT_MINOR(q) \ ((unsigned) ((q) >> FIT_MINOR_SHIFT) & FIT_MINOR_MASK) #define FIT_TYPE_SHIFT (32 + 16) #define FIT_TYPE_MASK ((1 << 7) - 1) #define FIT_TYPE(q) \ ((unsigned) ((q) >> FIT_TYPE_SHIFT) & FIT_TYPE_MASK) struct fit_type_map_t { unsigned char type; const char *name; }; static const struct fit_type_map_t fit_entry_types[] = { {FIT_ENTRY_FIT_HEADER, "FIT Header"}, {FIT_ENTRY_PAL_A_GEN, "Generic PAL_A"}, {FIT_ENTRY_PAL_A_PROC, "Processor-specific PAL_A"}, {FIT_ENTRY_PAL_A, "PAL_A"}, {FIT_ENTRY_PAL_B, "PAL_B"}, {FIT_ENTRY_SAL_A, "SAL_A"}, {FIT_ENTRY_SAL_B, "SAL_B"}, {FIT_ENTRY_SALRUNTIME, "SAL runtime"}, {FIT_ENTRY_EFI, "EFI"}, {FIT_ENTRY_VMLINUX, "Embedded Linux"}, {FIT_ENTRY_FPSWA, "Embedded FPSWA"}, {FIT_ENTRY_UNUSED, "Unused"}, {0xff, "Error"}, }; static const char *fit_type_name(unsigned char type) { struct fit_type_map_t const *mapp; for (mapp = fit_entry_types; mapp->type != 0xff; mapp++) if (type == mapp->type) return mapp->name; if ((type > FIT_ENTRY_PAL_A) && (type < FIT_ENTRY_UNUSED)) return "OEM type"; if ((type > FIT_ENTRY_PAL_B) && (type < FIT_ENTRY_PAL_A)) return "Reserved"; return "Unknown type"; } static int get_fit_entry(unsigned long nasid, int index, unsigned long *fentry, char *banner, int banlen) { return ia64_sn_get_fit_compt(nasid, index, fentry, banner, banlen); } /* * These two routines display the FIT table for each node. */ static void dump_fit_entry(struct seq_file *m, unsigned long *fentry) { unsigned type; type = FIT_TYPE(fentry[1]); seq_printf(m, "%02x %-25s %x.%02x %016lx %u\n", type, fit_type_name(type), FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1]), fentry[0], /* mult by sixteen to get size in bytes */ (unsigned)(fentry[1] & 0xffffff) * 16); } /* * We assume that the fit table will be small enough that we can print * the whole thing into one page. (This is true for our default 16kB * pages -- each entry is about 60 chars wide when printed.) I read * somewhere that the maximum size of the FIT is 128 entries, so we're * OK except for 4kB pages (and no one is going to do that on SN * anyway). */ static int proc_fit_show(struct seq_file *m, void *v) { unsigned long nasid = (unsigned long)m->private; unsigned long fentry[2]; int index; for (index=0;;index++) { BUG_ON(index * 60 > PAGE_SIZE); if (get_fit_entry(nasid, index, fentry, NULL, 0)) break; dump_fit_entry(m, fentry); } return 0; } static int proc_fit_open(struct inode *inode, struct file *file) { return single_open(file, proc_fit_show, PDE_DATA(inode)); } static const struct file_operations proc_fit_fops = { .open = proc_fit_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int proc_version_show(struct seq_file *m, void *v) { unsigned long nasid = (unsigned long)m->private; unsigned long fentry[2]; char banner[128]; int index; for (index = 0; ; index++) { if (get_fit_entry(nasid, index, fentry, banner, sizeof(banner))) return 0; if (FIT_TYPE(fentry[1]) == FIT_ENTRY_SAL_A) break; } seq_printf(m, "%x.%02x\n", FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1])); if (banner[0]) seq_printf(m, "%s\n", banner); return 0; } static int proc_version_open(struct inode *inode, struct file *file) { return single_open(file, proc_version_show, PDE_DATA(inode)); } static const struct file_operations proc_version_fops = { .open = proc_version_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* module entry points */ int __init prominfo_init(void); void __exit prominfo_exit(void); module_init(prominfo_init); module_exit(prominfo_exit); #define NODE_NAME_LEN 11 int __init prominfo_init(void) { struct proc_dir_entry *sgi_prominfo_entry; cnodeid_t cnodeid; if (!ia64_platform_is("sn2")) return 0; sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL); if (!sgi_prominfo_entry) return -ENOMEM; for_each_online_node(cnodeid) { struct proc_dir_entry *dir; unsigned long nasid; char name[NODE_NAME_LEN]; sprintf(name, "node%d", cnodeid); dir = proc_mkdir(name, sgi_prominfo_entry); if (!dir) continue; nasid = cnodeid_to_nasid(cnodeid); proc_create_data("fit", 0, dir, &proc_fit_fops, (void *)nasid); proc_create_data("version", 0, dir, &proc_version_fops, (void *)nasid); } return 0; } void __exit prominfo_exit(void) { remove_proc_subtree("sgi_prominfo", NULL); }
gpl-2.0
L-Aosp/android_kernel_motorola_msm8226
arch/arm/mach-omap2/clock2430.c
4868
1880
/* * clock2430.c - OMAP2430-specific clock integration code * * Copyright (C) 2005-2008 Texas Instruments, Inc. * Copyright (C) 2004-2010 Nokia Corporation * * Contacts: * Richard Woodruff <r-woodruff2@ti.com> * Paul Walmsley * * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, * Gordon McNutt and RidgeRun, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/clk.h> #include <linux/io.h> #include <plat/hardware.h> #include <plat/clock.h> #include "iomap.h" #include "clock.h" #include "clock2xxx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" /** * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS * @clk: struct clk * being enabled * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator * * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE. This custom function * passes back the correct CM_IDLEST register address for I2CHS * modules. No return value. */ static void omap2430_clk_i2chs_find_idlest(struct clk *clk, void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val) { *idlest_reg = OMAP2430_CM_REGADDR(CORE_MOD, CM_IDLEST); *idlest_bit = clk->enable_bit; *idlest_val = OMAP24XX_CM_IDLEST_VAL; } /* 2430 I2CHS has non-standard IDLEST register */ const struct clkops clkops_omap2430_i2chs_wait = { .enable = omap2_dflt_clk_enable, .disable = omap2_dflt_clk_disable, .find_idlest = omap2430_clk_i2chs_find_idlest, .find_companion = omap2_clk_dflt_find_companion, };
gpl-2.0
byzvulture/android_kernel_nubia_nx507j
arch/arm/mach-omap2/opp2420_data.c
4868
5022
/* * opp2420_data.c - old-style "OPP" table for OMAP2420 * * Copyright (C) 2005-2009 Texas Instruments, Inc. * Copyright (C) 2004-2009 Nokia Corporation * * Richard Woodruff <r-woodruff2@ti.com> * * The OMAP2 processor can be run at several discrete 'PRCM configurations'. * These configurations are characterized by voltage and speed for clocks. * The device is only validated for certain combinations. One way to express * these combinations is via the 'ratios' which the clocks operate with * respect to each other. These ratio sets are for a given voltage/DPLL * setting. All configurations can be described by a DPLL setting and a ratio. * * XXX Missing voltage data. * XXX Missing 19.2MHz sys_clk rate sets (needed for N800/N810) * * THe format described in this file is deprecated. Once a reasonable * OPP API exists, the data in this file should be converted to use it. * * This is technically part of the OMAP2xxx clock code. * * Considerable work is still needed to fully support dynamic frequency * changes on OMAP2xxx-series chips. Readers interested in such a * project are encouraged to review the Maemo Diablo RX-34 and RX-44 * kernel source at: * http://repository.maemo.org/pool/diablo/free/k/kernel-source-diablo/ */ #include <plat/hardware.h> #include "opp2xxx.h" #include "sdrc.h" #include "clock.h" /* * Key dividers which make up a PRCM set. Ratios for a PRCM are mandated. * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU, * CM_CLKSEL_DSP, CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL, * CM_CLKSEL2_PLL, CM_CLKSEL_MDM * * Filling in table based on H4 boards available. There are quite a * few more rate combinations which could be defined. * * When multiple values are defined the start up will try and choose * the fastest one. If a 'fast' value is defined, then automatically, * the /2 one should be included as it can be used. Generally having * more than one fast set does not make sense, as static timings need * to be changed to change the set. The exception is the bypass * setting which is available for low power bypass. * * Note: This table needs to be sorted, fastest to slowest. **/ const struct prcm_config omap2420_rate_table[] = { /* PRCM I - FAST */ {S12M, S660M, S330M, RI_CM_CLKSEL_MPU_VAL, /* 330MHz ARM */ RI_CM_CLKSEL_DSP_VAL, RI_CM_CLKSEL_GFX_VAL, RI_CM_CLKSEL1_CORE_VAL, MI_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_165MHz, RATE_IN_242X}, /* PRCM II - FAST */ {S12M, S600M, S300M, RII_CM_CLKSEL_MPU_VAL, /* 300MHz ARM */ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL, RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz, RATE_IN_242X}, {S13M, S600M, S300M, RII_CM_CLKSEL_MPU_VAL, /* 300MHz ARM */ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL, RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz, RATE_IN_242X}, /* PRCM III - FAST */ {S12M, S532M, S266M, RIII_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL, RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz, RATE_IN_242X}, {S13M, S532M, S266M, RIII_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL, RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz, RATE_IN_242X}, /* PRCM II - SLOW */ {S12M, S300M, S150M, RII_CM_CLKSEL_MPU_VAL, /* 150MHz ARM */ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL, RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz, RATE_IN_242X}, {S13M, S300M, S150M, RII_CM_CLKSEL_MPU_VAL, /* 150MHz ARM */ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL, RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz, RATE_IN_242X}, /* PRCM III - SLOW */ {S12M, S266M, S133M, RIII_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL, RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz, RATE_IN_242X}, {S13M, S266M, S133M, RIII_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL, RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz, RATE_IN_242X}, /* PRCM-VII (boot-bypass) */ {S12M, S12M, S12M, RVII_CM_CLKSEL_MPU_VAL, /* 12MHz ARM*/ RVII_CM_CLKSEL_DSP_VAL, RVII_CM_CLKSEL_GFX_VAL, RVII_CM_CLKSEL1_CORE_VAL, MVII_CM_CLKSEL1_PLL_12_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_BYPASS, RATE_IN_242X}, /* PRCM-VII (boot-bypass) */ {S13M, S13M, S13M, RVII_CM_CLKSEL_MPU_VAL, /* 13MHz ARM */ RVII_CM_CLKSEL_DSP_VAL, RVII_CM_CLKSEL_GFX_VAL, RVII_CM_CLKSEL1_CORE_VAL, MVII_CM_CLKSEL1_PLL_13_VAL, MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_BYPASS, RATE_IN_242X}, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, };
gpl-2.0
Snuzzo/funky_dna_old
drivers/staging/iio/accel/adis16204_trigger.c
5124
1684
#include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/export.h> #include "../iio.h" #include "../trigger.h" #include "adis16204.h" /** * adis16204_data_rdy_trigger_set_state() set datardy interrupt state **/ static int adis16204_data_rdy_trigger_set_state(struct iio_trigger *trig, bool state) { struct iio_dev *indio_dev = trig->private_data; dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state); return adis16204_set_irq(indio_dev, state); } static const struct iio_trigger_ops adis16204_trigger_ops = { .owner = THIS_MODULE, .set_trigger_state = &adis16204_data_rdy_trigger_set_state, }; int adis16204_probe_trigger(struct iio_dev *indio_dev) { int ret; struct adis16204_state *st = iio_priv(indio_dev); st->trig = iio_allocate_trigger("adis16204-dev%d", indio_dev->id); if (st->trig == NULL) { ret = -ENOMEM; goto error_ret; } ret = request_irq(st->us->irq, &iio_trigger_generic_data_rdy_poll, IRQF_TRIGGER_RISING, "adis16204", st->trig); if (ret) goto error_free_trig; st->trig->dev.parent = &st->us->dev; st->trig->ops = &adis16204_trigger_ops; st->trig->private_data = indio_dev; ret = iio_trigger_register(st->trig); /* select default trigger */ indio_dev->trig = st->trig; if (ret) goto error_free_irq; return 0; error_free_irq: free_irq(st->us->irq, st->trig); error_free_trig: iio_free_trigger(st->trig); error_ret: return ret; } void adis16204_remove_trigger(struct iio_dev *indio_dev) { struct adis16204_state *state = iio_priv(indio_dev); iio_trigger_unregister(state->trig); free_irq(state->us->irq, state->trig); iio_free_trigger(state->trig); }
gpl-2.0
bekriebel/android_kernel_omap
drivers/staging/iio/accel/adis16240_trigger.c
5124
1885
#include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/export.h> #include "../iio.h" #include "../trigger.h" #include "adis16240.h" /** * adis16240_data_rdy_trig_poll() the event handler for the data rdy trig **/ static irqreturn_t adis16240_data_rdy_trig_poll(int irq, void *trig) { iio_trigger_poll(trig, iio_get_time_ns()); return IRQ_HANDLED; } /** * adis16240_data_rdy_trigger_set_state() set datardy interrupt state **/ static int adis16240_data_rdy_trigger_set_state(struct iio_trigger *trig, bool state) { struct iio_dev *indio_dev = trig->private_data; dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state); return adis16240_set_irq(indio_dev, state); } static const struct iio_trigger_ops adis16240_trigger_ops = { .owner = THIS_MODULE, .set_trigger_state = &adis16240_data_rdy_trigger_set_state, }; int adis16240_probe_trigger(struct iio_dev *indio_dev) { int ret; struct adis16240_state *st = iio_priv(indio_dev); st->trig = iio_allocate_trigger("adis16240-dev%d", indio_dev->id); if (st->trig == NULL) { ret = -ENOMEM; goto error_ret; } ret = request_irq(st->us->irq, adis16240_data_rdy_trig_poll, IRQF_TRIGGER_RISING, "adis16240", st->trig); if (ret) goto error_free_trig; st->trig->dev.parent = &st->us->dev; st->trig->ops = &adis16240_trigger_ops; st->trig->private_data = indio_dev; ret = iio_trigger_register(st->trig); /* select default trigger */ indio_dev->trig = st->trig; if (ret) goto error_free_irq; return 0; error_free_irq: free_irq(st->us->irq, st->trig); error_free_trig: iio_free_trigger(st->trig); error_ret: return ret; } void adis16240_remove_trigger(struct iio_dev *indio_dev) { struct adis16240_state *st = iio_priv(indio_dev); iio_trigger_unregister(st->trig); free_irq(st->us->irq, st->trig); iio_free_trigger(st->trig); }
gpl-2.0
lawnn/android_kernel_sony_msm8974
arch/arm/mach-imx/ehci-imx27.c
7940
2276
/* * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> * Copyright (C) 2010 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. */ #include <linux/platform_device.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/mxc_ehci.h> #define USBCTRL_OTGBASE_OFFSET 0x600 #define MX27_OTG_SIC_SHIFT 29 #define MX27_OTG_SIC_MASK (0x3 << MX27_OTG_SIC_SHIFT) #define MX27_OTG_PM_BIT (1 << 24) #define MX27_H2_SIC_SHIFT 21 #define MX27_H2_SIC_MASK (0x3 << MX27_H2_SIC_SHIFT) #define MX27_H2_PM_BIT (1 << 16) #define MX27_H2_DT_BIT (1 << 5) #define MX27_H1_SIC_SHIFT 13 #define MX27_H1_SIC_MASK (0x3 << MX27_H1_SIC_SHIFT) #define MX27_H1_PM_BIT (1 << 8) #define MX27_H1_DT_BIT (1 << 4) int mx27_initialize_usb_hw(int port, unsigned int flags) { unsigned int v; v = readl(MX27_IO_ADDRESS(MX27_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET)); switch (port) { case 0: /* OTG port */ v &= ~(MX27_OTG_SIC_MASK | MX27_OTG_PM_BIT); v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX27_OTG_SIC_SHIFT; if (!(flags & MXC_EHCI_POWER_PINS_ENABLED)) v |= MX27_OTG_PM_BIT; break; case 1: /* H1 port */ v &= ~(MX27_H1_SIC_MASK | MX27_H1_PM_BIT | MX27_H1_DT_BIT); v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX27_H1_SIC_SHIFT; if (!(flags & MXC_EHCI_POWER_PINS_ENABLED)) v |= MX27_H1_PM_BIT; if (!(flags & MXC_EHCI_TTL_ENABLED)) v |= MX27_H1_DT_BIT; break; case 2: /* H2 port */ v &= ~(MX27_H2_SIC_MASK | MX27_H2_PM_BIT | MX27_H2_DT_BIT); v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX27_H2_SIC_SHIFT; if (!(flags & MXC_EHCI_POWER_PINS_ENABLED)) v |= MX27_H2_PM_BIT; if (!(flags & MXC_EHCI_TTL_ENABLED)) v |= MX27_H2_DT_BIT; break; default: return -EINVAL; } writel(v, MX27_IO_ADDRESS(MX27_USB_BASE_ADDR + USBCTRL_OTGBASE_OFFSET)); return 0; }
gpl-2.0
zanezam/boeffla-kernel-oos-bacon
drivers/mtd/maps/ts5500_flash.c
11524
2857
/* * ts5500_flash.c -- MTD map driver for Technology Systems TS-5500 board * * Copyright (C) 2004 Sean Young <sean@mess.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * Note: * - In order for detection to work, jumper 3 must be set. * - Drive A and B use the resident flash disk (RFD) flash translation layer. * - If you have created your own jffs file system and the bios overwrites * it during boot, try disabling Drive A: and B: in the boot order. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/mtd/map.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/types.h> #define WINDOW_ADDR 0x09400000 #define WINDOW_SIZE 0x00200000 static struct map_info ts5500_map = { .name = "TS-5500 Flash", .size = WINDOW_SIZE, .bankwidth = 1, .phys = WINDOW_ADDR }; static struct mtd_partition ts5500_partitions[] = { { .name = "Drive A", .offset = 0, .size = 0x0e0000 }, { .name = "BIOS", .offset = 0x0e0000, .size = 0x020000, }, { .name = "Drive B", .offset = 0x100000, .size = 0x100000 } }; #define NUM_PARTITIONS ARRAY_SIZE(ts5500_partitions) static struct mtd_info *mymtd; static int __init init_ts5500_map(void) { int rc = 0; ts5500_map.virt = ioremap_nocache(ts5500_map.phys, ts5500_map.size); if (!ts5500_map.virt) { printk(KERN_ERR "Failed to ioremap_nocache\n"); rc = -EIO; goto err2; } simple_map_init(&ts5500_map); mymtd = do_map_probe("jedec_probe", &ts5500_map); if (!mymtd) mymtd = do_map_probe("map_rom", &ts5500_map); if (!mymtd) { rc = -ENXIO; goto err1; } mymtd->owner = THIS_MODULE; mtd_device_register(mymtd, ts5500_partitions, NUM_PARTITIONS); return 0; err1: iounmap(ts5500_map.virt); err2: return rc; } static void __exit cleanup_ts5500_map(void) { if (mymtd) { mtd_device_unregister(mymtd); map_destroy(mymtd); } if (ts5500_map.virt) { iounmap(ts5500_map.virt); ts5500_map.virt = NULL; } } module_init(init_ts5500_map); module_exit(cleanup_ts5500_map); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sean Young <sean@mess.org>"); MODULE_DESCRIPTION("MTD map driver for Techology Systems TS-5500 board");
gpl-2.0
kalev/PackageKit
backends/katja/katja-binary.c
5
8469
#include "katja-binary.h" /** * katja_pkgtools_iface_init: **/ static void katja_pkgtools_iface_init(KatjaPkgtoolsInterface *iface) { iface->get_name = katja_binary_real_get_name; iface->get_mirror = katja_binary_real_get_mirror; iface->get_order = katja_binary_real_get_order; iface->get_blacklist = katja_binary_real_get_blacklist; iface->collect_cache_info = (GSList *(*)(KatjaPkgtools *, const gchar *)) katja_binary_collect_cache_info; iface->generate_cache = (void (*)(KatjaPkgtools *, PkBackendJob *, const gchar *)) katja_binary_generate_cache; iface->download = katja_binary_real_download; iface->install = katja_binary_real_install; } G_DEFINE_TYPE_WITH_CODE(KatjaBinary, katja_binary, G_TYPE_OBJECT, G_IMPLEMENT_INTERFACE (KATJA_TYPE_PKGTOOLS, katja_pkgtools_iface_init)); /** * katja_binary_collect_cache_info: **/ GSList *katja_binary_collect_cache_info(KatjaBinary *binary, const gchar *tmpl) { g_return_val_if_fail(KATJA_IS_BINARY(binary), NULL); g_return_val_if_fail(KATJA_BINARY_GET_CLASS(binary)->collect_cache_info != NULL, NULL); return KATJA_BINARY_GET_CLASS(binary)->collect_cache_info(binary, tmpl); } /** * katja_binary_generate_cache: **/ void katja_binary_generate_cache(KatjaBinary *binary, PkBackendJob *job, const gchar *tmpl) { g_return_if_fail(KATJA_IS_BINARY(binary)); g_return_if_fail(KATJA_BINARY_GET_CLASS(binary)->generate_cache != NULL); KATJA_BINARY_GET_CLASS(binary)->generate_cache(binary, job, tmpl); } /** * katja_binary_manifest: **/ void katja_binary_manifest(KatjaBinary *binary, PkBackendJob *job, const gchar *tmpl, gchar *filename) { FILE *manifest; gint err, read_len; guint pos; gchar buf[KATJA_PKGTOOLS_MAX_BUF_SIZE], *path, *full_name = NULL, *pkg_filename, *rest = NULL, *start; gchar **line, **lines; BZFILE *manifest_bz2; GRegex *pkg_expr = NULL, *file_expr = NULL; GMatchInfo *match_info; sqlite3_stmt *statement = NULL; PkBackendKatjaJobData *job_data = pk_backend_job_get_user_data(job); path = g_build_filename(tmpl, binary->name, filename, NULL); manifest = fopen(path, "rb"); g_free(path); if (!manifest) return; if (!(manifest_bz2 = BZ2_bzReadOpen(&err, manifest, 0, 0, NULL, 0))) goto out; /* Prepare regular expressions */ if (!(pkg_expr = g_regex_new("^\\|\\|[[:blank:]]+Package:[[:blank:]]+.+\\/(.+)\\.(t[blxg]z$)?", G_REGEX_OPTIMIZE | G_REGEX_DUPNAMES, 0, NULL)) || !(file_expr = g_regex_new("^[-bcdlps][-r][-w][-xsS][-r][-w][-xsS][-r][-w][-xtT][[:space:]][^[:space:]]+" "[[:space:]]+[[:digit:]]+[[:space:]][[:digit:]-]+[[:space:]][[:digit:]:]+[[:space:]]" "(?!install\\/|\\.)(.*)", G_REGEX_OPTIMIZE | G_REGEX_DUPNAMES, 0, NULL))) goto out; /* Prepare SQL statements */ if (sqlite3_prepare_v2(job_data->db, "INSERT INTO filelist (full_name, filename) VALUES (@full_name, @filename)", -1, &statement, NULL) != SQLITE_OK) goto out; sqlite3_exec(job_data->db, "BEGIN TRANSACTION", NULL, NULL, NULL); while ((read_len = BZ2_bzRead(&err, manifest_bz2, buf, KATJA_PKGTOOLS_MAX_BUF_SIZE))) { if ((err != BZ_OK) && (err != BZ_STREAM_END)) break; /* Split the read text into lines */ lines = g_strsplit(buf, "\n", 0); if (rest) { /* Add to the first line rest characters from the previous read operation */ start = lines[0]; lines[0] = g_strconcat(rest, lines[0], NULL); g_free(start); g_free(rest); } if (err != BZ_STREAM_END) { /* The last line can be incomplete */ pos = g_strv_length(lines) - 1; rest = lines[pos]; lines[pos] = NULL; } for (line = lines; *line; line++) { if (g_regex_match(pkg_expr, *line, 0, &match_info)) { if (g_match_info_get_match_count(match_info) > 2) { /* If the extension matches */ g_free(full_name); full_name = g_match_info_fetch(match_info, 1); } else { full_name = NULL; } } g_match_info_free(match_info); match_info = NULL; if (full_name && g_regex_match(file_expr, *line, 0, &match_info)) { pkg_filename = g_match_info_fetch(match_info, 1); sqlite3_bind_text(statement, 1, full_name, -1, SQLITE_TRANSIENT); sqlite3_bind_text(statement, 2, pkg_filename, -1, SQLITE_TRANSIENT); sqlite3_step(statement); sqlite3_clear_bindings(statement); sqlite3_reset(statement); g_free(pkg_filename); } g_match_info_free(match_info); } g_strfreev(lines); } sqlite3_exec(job_data->db, "END TRANSACTION", NULL, NULL, NULL); g_free(full_name); BZ2_bzReadClose(&err, manifest_bz2); out: sqlite3_finalize(statement); if (file_expr) g_regex_unref(file_expr); if (pkg_expr) g_regex_unref(pkg_expr); fclose(manifest); } /** * katja_binary_real_get_name: **/ gchar *katja_binary_real_get_name(KatjaPkgtools *pkgtools) { return KATJA_BINARY(pkgtools)->name; } /** * katja_binary_real_get_mirror: **/ gchar *katja_binary_real_get_mirror(KatjaPkgtools *pkgtools) { return KATJA_BINARY(pkgtools)->mirror; } /** * katja_binary_real_get_order: **/ gushort katja_binary_real_get_order(KatjaPkgtools *pkgtools) { return KATJA_BINARY(pkgtools)->order; } /** * katja_binary_real_get_blacklist: **/ GRegex *katja_binary_real_get_blacklist(KatjaPkgtools *pkgtools) { return KATJA_BINARY(pkgtools)->blacklist; } /** * katja_binary_real_download: **/ gboolean katja_binary_real_download(KatjaPkgtools *pkgtools, PkBackendJob *job, gchar *dest_dir_name, gchar *pkg_name) { gchar *dest_filename, *source_url; gboolean ret = FALSE; sqlite3_stmt *statement = NULL; CURL *curl = NULL; PkBackendKatjaJobData *job_data = pk_backend_job_get_user_data(job); if ((sqlite3_prepare_v2(job_data->db, "SELECT location, (full_name || '.' || ext) FROM pkglist " "WHERE name LIKE @name AND repo_order = @repo_order", -1, &statement, NULL) != SQLITE_OK)) return FALSE; sqlite3_bind_text(statement, 1, pkg_name, -1, SQLITE_TRANSIENT); sqlite3_bind_int(statement, 2, katja_pkgtools_get_order(pkgtools)); if (sqlite3_step(statement) == SQLITE_ROW) { dest_filename = g_build_filename(dest_dir_name, sqlite3_column_text(statement, 1), NULL); source_url = g_strconcat(katja_pkgtools_get_mirror(pkgtools), sqlite3_column_text(statement, 0), "/", sqlite3_column_text(statement, 1), NULL); if (!g_file_test(dest_filename, G_FILE_TEST_EXISTS)) { if (katja_get_file(&curl, source_url, dest_filename) == CURLE_OK) { ret = TRUE; } } else { ret = TRUE; } if (curl) curl_easy_cleanup(curl); g_free(source_url); g_free(dest_filename); } sqlite3_finalize(statement); return ret; } /** * katja_binary_real_install: **/ void katja_binary_real_install(KatjaPkgtools *pkgtools, PkBackendJob *job, gchar *pkg_name) { gchar *pkg_filename, *cmd_line; sqlite3_stmt *statement = NULL; PkBackendKatjaJobData *job_data = pk_backend_job_get_user_data(job); if ((sqlite3_prepare_v2(job_data->db, "SELECT (full_name || '.' || ext) FROM pkglist " "WHERE name LIKE @name AND repo_order = @repo_order", -1, &statement, NULL) != SQLITE_OK)) return; sqlite3_bind_text(statement, 1, pkg_name, -1, SQLITE_TRANSIENT); sqlite3_bind_int(statement, 2, katja_pkgtools_get_order(pkgtools)); if (sqlite3_step(statement) == SQLITE_ROW) { pkg_filename = g_build_filename(LOCALSTATEDIR, "cache", "PackageKit", "downloads", sqlite3_column_text(statement, 0), NULL); cmd_line = g_strconcat("/sbin/upgradepkg --install-new ", pkg_filename, NULL); g_spawn_command_line_sync(cmd_line, NULL, NULL, NULL, NULL); g_free(cmd_line); g_free(pkg_filename); } sqlite3_finalize(statement); } /** * katja_binary_finalize: **/ static void katja_binary_finalize(GObject *object) { KatjaBinary *binary; binary = KATJA_BINARY(object); g_free(binary->name); g_free(binary->mirror); if (binary->blacklist) g_object_unref(binary->blacklist); G_OBJECT_CLASS(katja_binary_parent_class)->finalize(object); } /** * katja_binary_class_init: **/ static void katja_binary_class_init(KatjaBinaryClass *klass) { GObjectClass *object_class = G_OBJECT_CLASS(klass); object_class->finalize = katja_binary_finalize; } /** * katja_binary_init: **/ static void katja_binary_init(KatjaBinary *binary) { binary->name = NULL; binary->mirror = NULL; binary->blacklist = NULL; binary->order = 0; }
gpl-2.0
FlightGear/flightgear
3rdparty/iaxclient/lib/portaudio/test/patest1.c
5
6612
/** @file patest1.c @brief Ring modulate the audio input with a sine wave for 20 seconds. @author Ross Bencina <rossb@audiomulch.com> */ /* * $Id: patest1.c,v 1.1 2006/06/10 21:30:56 dmazzoni Exp $ * * This program uses the PortAudio Portable Audio Library. * For more information see: http://www.portaudio.com * Copyright (c) 1999-2000 Ross Bencina and Phil Burk * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files * (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * Any person wishing to distribute modifications to the Software is * requested to send the modifications to the original developer so that * they can be incorporated into the canonical version. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <stdio.h> #include <math.h> #include "portaudio.h" #ifndef M_PI #define M_PI (3.14159265) #endif #define SAMPLE_RATE (44100) typedef struct { float sine[100]; int phase; int sampsToGo; } patest1data; static int patest1Callback( const void *inputBuffer, void *outputBuffer, unsigned long framesPerBuffer, const PaStreamCallbackTimeInfo* timeInfo, PaStreamCallbackFlags statusFlags, void *userData ) { patest1data *data = (patest1data*)userData; float *in = (float*)inputBuffer; float *out = (float*)outputBuffer; int framesToCalc = framesPerBuffer; unsigned long i = 0; int finished; if( data->sampsToGo < framesPerBuffer ) { framesToCalc = data->sampsToGo; finished = paComplete; } else { finished = paContinue; } for( ; i<framesToCalc; i++ ) { *out++ = *in++ * data->sine[data->phase]; /* left */ *out++ = *in++ * data->sine[data->phase++]; /* right */ if( data->phase >= 100 ) data->phase = 0; } data->sampsToGo -= framesToCalc; /* zero remainder of final buffer if not already done */ for( ; i<framesPerBuffer; i++ ) { *out++ = 0; /* left */ *out++ = 0; /* right */ } return finished; } int main(int argc, char* argv[]); int main(int argc, char* argv[]) { PaStream *stream; PaError err; patest1data data; int i; PaStreamParameters inputParameters, outputParameters; const PaHostErrorInfo* herr; printf("patest1.c\n"); fflush(stdout); printf("Ring modulate input for 20 seconds.\n"); fflush(stdout); /* initialise sinusoidal wavetable */ for( i=0; i<100; i++ ) data.sine[i] = sin( ((double)i/100.) * M_PI * 2. ); data.phase = 0; data.sampsToGo = SAMPLE_RATE * 20; /* 20 seconds. */ /* initialise portaudio subsytem */ err = Pa_Initialize(); inputParameters.device = Pa_GetDefaultInputDevice(); /* default input device */ inputParameters.channelCount = 2; /* stereo input */ inputParameters.sampleFormat = paFloat32; /* 32 bit floating point input */ inputParameters.suggestedLatency = Pa_GetDeviceInfo( inputParameters.device )->defaultLowInputLatency; inputParameters.hostApiSpecificStreamInfo = NULL; outputParameters.device = Pa_GetDefaultOutputDevice(); /* default output device */ outputParameters.channelCount = 2; /* stereo output */ outputParameters.sampleFormat = paFloat32; /* 32 bit floating point output */ outputParameters.suggestedLatency = Pa_GetDeviceInfo( outputParameters.device )->defaultLowOutputLatency; outputParameters.hostApiSpecificStreamInfo = NULL; err = Pa_OpenStream( &stream, &inputParameters, &outputParameters, (double)SAMPLE_RATE, /* Samplerate in Hertz. */ 512, /* Small buffers */ paClipOff, /* We won't output out of range samples so don't bother clipping them. */ patest1Callback, &data ); if( err != paNoError ) goto done; err = Pa_StartStream( stream ); if( err != paNoError ) goto done; printf( "Press any key to end.\n" ); fflush(stdout); getc( stdin ); /* wait for input before exiting */ err = Pa_AbortStream( stream ); if( err != paNoError ) goto done; printf( "Waiting for stream to complete...\n" ); /* sleep until playback has finished */ while( ( err = Pa_IsStreamActive( stream ) ) == 1 ) Pa_Sleep(1000); if( err < 0 ) goto done; err = Pa_CloseStream( stream ); if( err != paNoError ) goto done; done: Pa_Terminate(); if( err != paNoError ) { fprintf( stderr, "An error occured while using portaudio\n" ); if( err == paUnanticipatedHostError ) { fprintf( stderr, " unanticipated host error.\n"); herr = Pa_GetLastHostErrorInfo(); if (herr) { fprintf( stderr, " Error number: %ld\n", herr->errorCode ); if (herr->errorText) fprintf( stderr, " Error text: %s\n", herr->errorText ); } else fprintf( stderr, " Pa_GetLastHostErrorInfo() failed!\n" ); } else { fprintf( stderr, " Error number: %d\n", err ); fprintf( stderr, " Error text: %s\n", Pa_GetErrorText( err ) ); } err = 1; /* Always return 0 or 1, but no other return codes. */ } printf( "bye\n" ); return err; }
gpl-2.0
JLaferri/dolphin
Source/Core/Core/DSP/Interpreter/DSPIntMisc.cpp
5
3945
// Copyright 2009 Dolphin Emulator Project // Licensed under GPLv2+ // Refer to the license.txt file included. // // Additional copyrights go to Duddie and Tratax (c) 2004 #include "Core/DSP/DSPCore.h" #include "Core/DSP/DSPMemoryMap.h" #include "Core/DSP/DSPTables.h" #include "Core/DSP/Interpreter/DSPIntUtil.h" #include "Core/DSP/Interpreter/DSPInterpreter.h" namespace DSP::Interpreter { // MRR $D, $S // 0001 11dd ddds ssss // Move value from register $S to register $D. void mrr(const UDSPInstruction opc) { u8 sreg = opc & 0x1f; u8 dreg = (opc >> 5) & 0x1f; if (sreg >= DSP_REG_ACM0) dsp_op_write_reg(dreg, dsp_op_read_reg_and_saturate(sreg - DSP_REG_ACM0)); else dsp_op_write_reg(dreg, dsp_op_read_reg(sreg)); dsp_conditional_extend_accum(dreg); } // LRI $D, #I // 0000 0000 100d dddd // iiii iiii iiii iiii // Load immediate value I to register $D. // // DSPSpy discovery: This, and possibly other instructions that load a // register, has a different behaviour in S40 mode if loaded to AC0.M: The // value gets sign extended to the whole accumulator! This does not happen in // S16 mode. void lri(const UDSPInstruction opc) { u8 reg = opc & 0x1F; u16 imm = dsp_fetch_code(); dsp_op_write_reg(reg, imm); dsp_conditional_extend_accum(reg); } // LRIS $(0x18+D), #I // 0000 1ddd iiii iiii // Load immediate value I (8-bit sign extended) to accumulator register. void lris(const UDSPInstruction opc) { u8 reg = ((opc >> 8) & 0x7) + DSP_REG_AXL0; u16 imm = (s8)opc; dsp_op_write_reg(reg, imm); dsp_conditional_extend_accum(reg); } //---- // NX // 1000 -000 xxxx xxxx // No operation, but can be extended with extended opcode. // This opcode is supposed to do nothing - it's used if you want to use // an opcode extension but not do anything. At least according to duddie. void nx(const UDSPInstruction opc) { ZeroWriteBackLog(); } //---- // DAR $arD // 0000 0000 0000 01dd // Decrement address register $arD. void dar(const UDSPInstruction opc) { g_dsp.r.ar[opc & 0x3] = dsp_decrement_addr_reg(opc & 0x3); } // IAR $arD // 0000 0000 0000 10dd // Increment address register $arD. void iar(const UDSPInstruction opc) { g_dsp.r.ar[opc & 0x3] = dsp_increment_addr_reg(opc & 0x3); } // SUBARN $arD // 0000 0000 0000 11dd // Subtract indexing register $ixD from an addressing register $arD. // used only in IPL-NTSC ucode void subarn(const UDSPInstruction opc) { u8 dreg = opc & 0x3; g_dsp.r.ar[dreg] = dsp_decrease_addr_reg(dreg, (s16)g_dsp.r.ix[dreg]); } // ADDARN $arD, $ixS // 0000 0000 0001 ssdd // Adds indexing register $ixS to an addressing register $arD. // It is critical for the Zelda ucode that this one wraps correctly. void addarn(const UDSPInstruction opc) { u8 dreg = opc & 0x3; u8 sreg = (opc >> 2) & 0x3; g_dsp.r.ar[dreg] = dsp_increase_addr_reg(dreg, (s16)g_dsp.r.ix[sreg]); } //---- // SBCLR #I // 0001 0011 aaaa aiii // bit of status register $sr. Bit number is calculated by adding 6 to // immediate value I. void sbclr(const UDSPInstruction opc) { u8 bit = (opc & 0x7) + 6; g_dsp.r.sr &= ~(1 << bit); } // SBSET #I // 0001 0010 aaaa aiii // Set bit of status register $sr. Bit number is calculated by adding 6 to // immediate value I. void sbset(const UDSPInstruction opc) { u8 bit = (opc & 0x7) + 6; g_dsp.r.sr |= (1 << bit); } // This is a bunch of flag setters, flipping bits in SR. void srbith(const UDSPInstruction opc) { ZeroWriteBackLog(); switch ((opc >> 8) & 0xf) { case 0xa: // M2 g_dsp.r.sr &= ~SR_MUL_MODIFY; break; case 0xb: // M0 g_dsp.r.sr |= SR_MUL_MODIFY; break; case 0xc: // CLR15 g_dsp.r.sr &= ~SR_MUL_UNSIGNED; break; case 0xd: // SET15 g_dsp.r.sr |= SR_MUL_UNSIGNED; break; case 0xe: // SET16 (CLR40) g_dsp.r.sr &= ~SR_40_MODE_BIT; break; case 0xf: // SET40 g_dsp.r.sr |= SR_40_MODE_BIT; break; default: break; } } } // namespace DSP::Interpreter
gpl-2.0
epicsdeb/rtems-gdb
gdb/solib.c
5
44503
/* Handle shared libraries for GDB, the GNU Debugger. Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. This file is part of GDB. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "defs.h" #include <sys/types.h> #include <fcntl.h> #include "gdb_string.h" #include "symtab.h" #include "bfd.h" #include "symfile.h" #include "objfiles.h" #include "exceptions.h" #include "gdbcore.h" #include "command.h" #include "target.h" #include "frame.h" #include "gdb_regex.h" #include "inferior.h" #include "environ.h" #include "language.h" #include "gdbcmd.h" #include "completer.h" #include "filenames.h" /* for DOSish file names */ #include "exec.h" #include "solist.h" #include "observer.h" #include "readline/readline.h" #include "remote.h" #include "solib.h" #include "interps.h" #include "filesystem.h" /* Architecture-specific operations. */ /* Per-architecture data key. */ static struct gdbarch_data *solib_data; static void * solib_init (struct obstack *obstack) { struct target_so_ops **ops; ops = OBSTACK_ZALLOC (obstack, struct target_so_ops *); *ops = current_target_so_ops; return ops; } static struct target_so_ops * solib_ops (struct gdbarch *gdbarch) { struct target_so_ops **ops = gdbarch_data (gdbarch, solib_data); return *ops; } /* Set the solib operations for GDBARCH to NEW_OPS. */ void set_solib_ops (struct gdbarch *gdbarch, struct target_so_ops *new_ops) { struct target_so_ops **ops = gdbarch_data (gdbarch, solib_data); *ops = new_ops; } /* external data declarations */ /* FIXME: gdbarch needs to control this variable, or else every configuration needs to call set_solib_ops. */ struct target_so_ops *current_target_so_ops; /* List of known shared objects */ #define so_list_head current_program_space->so_list /* Local function prototypes */ /* If non-empty, this is a search path for loading non-absolute shared library symbol files. This takes precedence over the environment variables PATH and LD_LIBRARY_PATH. */ static char *solib_search_path = NULL; static void show_solib_search_path (struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value) { fprintf_filtered (file, _("The search path for loading non-absolute " "shared library symbol files is %s.\n"), value); } /* Same as HAVE_DOS_BASED_FILE_SYSTEM, but useable as an rvalue. */ #if (HAVE_DOS_BASED_FILE_SYSTEM) # define DOS_BASED_FILE_SYSTEM 1 #else # define DOS_BASED_FILE_SYSTEM 0 #endif /* GLOBAL FUNCTION solib_find -- Find a shared library file. SYNOPSIS char *solib_find (char *in_pathname, int *fd); DESCRIPTION Global variable GDB_SYSROOT is used as a prefix directory to search for shared libraries if they have an absolute path. Global variable SOLIB_SEARCH_PATH is used as a prefix directory (or set of directories, as in LD_LIBRARY_PATH) to search for all shared libraries if not found in GDB_SYSROOT. Search algorithm: * If there is a gdb_sysroot and path is absolute: * Search for gdb_sysroot/path. * else * Look for it literally (unmodified). * Look in SOLIB_SEARCH_PATH. * If available, use target defined search function. * If gdb_sysroot is NOT set, perform the following two searches: * Look in inferior's $PATH. * Look in inferior's $LD_LIBRARY_PATH. * * The last check avoids doing this search when targetting remote * machines since gdb_sysroot will almost always be set. RETURNS Full pathname of the shared library file, or NULL if not found. (The pathname is malloc'ed; it needs to be freed by the caller.) *FD is set to either -1 or an open file handle for the library. */ char * solib_find (char *in_pathname, int *fd) { struct target_so_ops *ops = solib_ops (target_gdbarch); int found_file = -1; char *temp_pathname = NULL; int gdb_sysroot_is_empty; const char *solib_symbols_extension = gdbarch_solib_symbols_extension (target_gdbarch); const char *fskind = effective_target_file_system_kind (); struct cleanup *old_chain = make_cleanup (null_cleanup, NULL); char *sysroot = NULL; /* If solib_symbols_extension is set, replace the file's extension. */ if (solib_symbols_extension) { char *p = in_pathname + strlen (in_pathname); while (p > in_pathname && *p != '.') p--; if (*p == '.') { char *new_pathname; new_pathname = alloca (p - in_pathname + 1 + strlen (solib_symbols_extension) + 1); memcpy (new_pathname, in_pathname, p - in_pathname + 1); strcpy (new_pathname + (p - in_pathname) + 1, solib_symbols_extension); in_pathname = new_pathname; } } gdb_sysroot_is_empty = (gdb_sysroot == NULL || *gdb_sysroot == 0); if (!gdb_sysroot_is_empty) { int prefix_len = strlen (gdb_sysroot); /* Remove trailing slashes from absolute prefix. */ while (prefix_len > 0 && IS_DIR_SEPARATOR (gdb_sysroot[prefix_len - 1])) prefix_len--; sysroot = savestring (gdb_sysroot, prefix_len); make_cleanup (xfree, sysroot); } /* If we're on a non-DOS-based system, backslashes won't be understood as directory separator, so, convert them to forward slashes, iff we're supposed to handle DOS-based file system semantics for target paths. */ if (!DOS_BASED_FILE_SYSTEM && fskind == file_system_kind_dos_based) { char *p; /* Avoid clobbering our input. */ p = alloca (strlen (in_pathname) + 1); strcpy (p, in_pathname); in_pathname = p; for (; *p; p++) { if (*p == '\\') *p = '/'; } } /* Note, we're interested in IS_TARGET_ABSOLUTE_PATH, not IS_ABSOLUTE_PATH. The latter is for host paths only, while IN_PATHNAME is a target path. For example, if we're supposed to be handling DOS-like semantics we want to consider a 'c:/foo/bar.dll' path as an absolute path, even on a Unix box. With such a path, before giving up on the sysroot, we'll try: 1st attempt, c:/foo/bar.dll ==> /sysroot/c:/foo/bar.dll 2nd attempt, c:/foo/bar.dll ==> /sysroot/c/foo/bar.dll 3rd attempt, c:/foo/bar.dll ==> /sysroot/foo/bar.dll */ if (!IS_TARGET_ABSOLUTE_PATH (fskind, in_pathname) || gdb_sysroot_is_empty) temp_pathname = xstrdup (in_pathname); else { int need_dir_separator; need_dir_separator = !IS_DIR_SEPARATOR (in_pathname[0]); /* Cat the prefixed pathname together. */ temp_pathname = concat (sysroot, need_dir_separator ? SLASH_STRING : "", in_pathname, (char *) NULL); } /* Handle remote files. */ if (remote_filename_p (temp_pathname)) { *fd = -1; return temp_pathname; } /* Now see if we can open it. */ found_file = open (temp_pathname, O_RDONLY | O_BINARY, 0); if (found_file < 0) xfree (temp_pathname); /* If the search in gdb_sysroot failed, and the path name has a drive spec (e.g, c:/foo), try stripping ':' from the drive spec, and retrying in the sysroot: c:/foo/bar.dll ==> /sysroot/c/foo/bar.dll. */ if (found_file < 0 && !gdb_sysroot_is_empty && HAS_TARGET_DRIVE_SPEC (fskind, in_pathname)) { int need_dir_separator = !IS_DIR_SEPARATOR (in_pathname[2]); char *drive = savestring (in_pathname, 1); temp_pathname = concat (sysroot, SLASH_STRING, drive, need_dir_separator ? SLASH_STRING : "", in_pathname + 2, (char *) NULL); xfree (drive); found_file = open (temp_pathname, O_RDONLY | O_BINARY, 0); if (found_file < 0) { xfree (temp_pathname); /* If the search in gdb_sysroot still failed, try fully stripping the drive spec, and trying once more in the sysroot before giving up. c:/foo/bar.dll ==> /sysroot/foo/bar.dll. */ temp_pathname = concat (sysroot, need_dir_separator ? SLASH_STRING : "", in_pathname + 2, (char *) NULL); found_file = open (temp_pathname, O_RDONLY | O_BINARY, 0); if (found_file < 0) xfree (temp_pathname); } } do_cleanups (old_chain); /* We try to find the library in various ways. After each attempt, either found_file >= 0 and temp_pathname is a malloc'd string, or found_file < 0 and temp_pathname does not point to storage that needs to be freed. */ if (found_file < 0) temp_pathname = NULL; /* If not found, search the solib_search_path (if any). */ if (found_file < 0 && solib_search_path != NULL) found_file = openp (solib_search_path, OPF_TRY_CWD_FIRST, in_pathname, O_RDONLY | O_BINARY, &temp_pathname); /* If the search in gdb_sysroot failed, and the path name is absolute at this point, make it relative. (openp will try and open the file according to its absolute path otherwise, which is not what we want.) Affects subsequent searches for this solib. */ if (found_file < 0 && IS_TARGET_ABSOLUTE_PATH (fskind, in_pathname)) { /* First, get rid of any drive letters etc. */ while (!IS_TARGET_DIR_SEPARATOR (fskind, *in_pathname)) in_pathname++; /* Next, get rid of all leading dir separators. */ while (IS_TARGET_DIR_SEPARATOR (fskind, *in_pathname)) in_pathname++; } /* If not found, search the solib_search_path (if any). */ if (found_file < 0 && solib_search_path != NULL) found_file = openp (solib_search_path, OPF_TRY_CWD_FIRST, in_pathname, O_RDONLY | O_BINARY, &temp_pathname); /* If not found, next search the solib_search_path (if any) for the basename only (ignoring the path). This is to allow reading solibs from a path that differs from the opened path. */ if (found_file < 0 && solib_search_path != NULL) found_file = openp (solib_search_path, OPF_TRY_CWD_FIRST, target_lbasename (fskind, in_pathname), O_RDONLY | O_BINARY, &temp_pathname); /* If not found, try to use target supplied solib search method. */ if (found_file < 0 && ops->find_and_open_solib) found_file = ops->find_and_open_solib (in_pathname, O_RDONLY | O_BINARY, &temp_pathname); /* If not found, next search the inferior's $PATH environment variable. */ if (found_file < 0 && gdb_sysroot_is_empty) found_file = openp (get_in_environ (current_inferior ()->environment, "PATH"), OPF_TRY_CWD_FIRST, in_pathname, O_RDONLY | O_BINARY, &temp_pathname); /* If not found, next search the inferior's $LD_LIBRARY_PATH environment variable. */ if (found_file < 0 && gdb_sysroot_is_empty) found_file = openp (get_in_environ (current_inferior ()->environment, "LD_LIBRARY_PATH"), OPF_TRY_CWD_FIRST, in_pathname, O_RDONLY | O_BINARY, &temp_pathname); *fd = found_file; return temp_pathname; } /* Open and return a BFD for the shared library PATHNAME. If FD is not -1, it is used as file handle to open the file. Throws an error if the file could not be opened. Handles both local and remote file access. PATHNAME must be malloc'ed by the caller. If successful, the new BFD's name will point to it. If unsuccessful, PATHNAME will be freed and the FD will be closed (unless FD was -1). */ bfd * solib_bfd_fopen (char *pathname, int fd) { bfd *abfd; if (remote_filename_p (pathname)) { gdb_assert (fd == -1); abfd = remote_bfd_open (pathname, gnutarget); } else { abfd = bfd_fopen (pathname, gnutarget, FOPEN_RB, fd); if (abfd) bfd_set_cacheable (abfd, 1); else if (fd != -1) close (fd); } if (!abfd) { make_cleanup (xfree, pathname); error (_("Could not open `%s' as an executable file: %s"), pathname, bfd_errmsg (bfd_get_error ())); } return abfd; } /* Find shared library PATHNAME and open a BFD for it. */ bfd * solib_bfd_open (char *pathname) { char *found_pathname; int found_file; bfd *abfd; const struct bfd_arch_info *b; /* Search for shared library file. */ found_pathname = solib_find (pathname, &found_file); if (found_pathname == NULL) { /* Return failure if the file could not be found, so that we can accumulate messages about missing libraries. */ if (errno == ENOENT) return NULL; perror_with_name (pathname); } /* Open bfd for shared library. */ abfd = solib_bfd_fopen (found_pathname, found_file); /* Check bfd format. */ if (!bfd_check_format (abfd, bfd_object)) { bfd_close (abfd); make_cleanup (xfree, found_pathname); error (_("`%s': not in executable format: %s"), found_pathname, bfd_errmsg (bfd_get_error ())); } /* Check bfd arch. */ b = gdbarch_bfd_arch_info (target_gdbarch); if (!b->compatible (b, bfd_get_arch_info (abfd))) warning (_("`%s': Shared library architecture %s is not compatible " "with target architecture %s."), found_pathname, bfd_get_arch_info (abfd)->printable_name, b->printable_name); return abfd; } /* LOCAL FUNCTION solib_map_sections -- open bfd and build sections for shared lib SYNOPSIS static int solib_map_sections (struct so_list *so) DESCRIPTION Given a pointer to one of the shared objects in our list of mapped objects, use the recorded name to open a bfd descriptor for the object, build a section table, and then relocate all the section addresses by the base address at which the shared object was mapped. FIXMES In most (all?) cases the shared object file name recorded in the dynamic linkage tables will be a fully qualified pathname. For cases where it isn't, do we really mimic the systems search mechanism correctly in the below code (particularly the tilde expansion stuff?). */ static int solib_map_sections (struct so_list *so) { struct target_so_ops *ops = solib_ops (target_gdbarch); char *filename; struct target_section *p; struct cleanup *old_chain; bfd *abfd; filename = tilde_expand (so->so_name); old_chain = make_cleanup (xfree, filename); abfd = ops->bfd_open (filename); do_cleanups (old_chain); if (abfd == NULL) return 0; /* Leave bfd open, core_xfer_memory and "info files" need it. */ so->abfd = gdb_bfd_ref (abfd); /* copy full path name into so_name, so that later symbol_file_add can find it. */ if (strlen (bfd_get_filename (abfd)) >= SO_NAME_MAX_PATH_SIZE) error (_("Shared library file name is too long.")); strcpy (so->so_name, bfd_get_filename (abfd)); if (build_section_table (abfd, &so->sections, &so->sections_end)) { error (_("Can't find the file sections in `%s': %s"), bfd_get_filename (abfd), bfd_errmsg (bfd_get_error ())); } for (p = so->sections; p < so->sections_end; p++) { /* Relocate the section binding addresses as recorded in the shared object's file by the base address to which the object was actually mapped. */ ops->relocate_section_addresses (so, p); /* If the target didn't provide information about the address range of the shared object, assume we want the location of the .text section. */ if (so->addr_low == 0 && so->addr_high == 0 && strcmp (p->the_bfd_section->name, ".text") == 0) { so->addr_low = p->addr; so->addr_high = p->endaddr; } } /* Add the shared object's sections to the current set of file section tables. Do this immediately after mapping the object so that later nodes in the list can query this object, as is needed in solib-osf.c. */ add_target_sections (so->sections, so->sections_end); return 1; } /* Free symbol-file related contents of SO. If we have opened a BFD for SO, close it. If we have placed SO's sections in some target's section table, the caller is responsible for removing them. This function doesn't mess with objfiles at all. If there is an objfile associated with SO that needs to be removed, the caller is responsible for taking care of that. */ static void free_so_symbols (struct so_list *so) { if (so->sections) { xfree (so->sections); so->sections = so->sections_end = NULL; } gdb_bfd_unref (so->abfd); so->abfd = NULL; /* Our caller closed the objfile, possibly via objfile_purge_solibs. */ so->symbols_loaded = 0; so->objfile = NULL; so->addr_low = so->addr_high = 0; /* Restore the target-supplied file name. SO_NAME may be the path of the symbol file. */ strcpy (so->so_name, so->so_original_name); } /* LOCAL FUNCTION free_so --- free a `struct so_list' object SYNOPSIS void free_so (struct so_list *so) DESCRIPTION Free the storage associated with the `struct so_list' object SO. If we have opened a BFD for SO, close it. The caller is responsible for removing SO from whatever list it is a member of. If we have placed SO's sections in some target's section table, the caller is responsible for removing them. This function doesn't mess with objfiles at all. If there is an objfile associated with SO that needs to be removed, the caller is responsible for taking care of that. */ void free_so (struct so_list *so) { struct target_so_ops *ops = solib_ops (target_gdbarch); free_so_symbols (so); ops->free_so (so); xfree (so); } /* Return address of first so_list entry in master shared object list. */ struct so_list * master_so_list (void) { return so_list_head; } /* Read in symbols for shared object SO. If SYMFILE_VERBOSE is set in FLAGS, be chatty about it. Return non-zero if any symbols were actually loaded. */ int solib_read_symbols (struct so_list *so, int flags) { const int from_tty = flags & SYMFILE_VERBOSE; if (so->symbols_loaded) { /* If needed, we've already warned in our caller. */ } else if (so->abfd == NULL) { /* We've already warned about this library, when trying to open it. */ } else { volatile struct gdb_exception e; TRY_CATCH (e, RETURN_MASK_ERROR) { struct section_addr_info *sap; /* Have we already loaded this shared object? */ ALL_OBJFILES (so->objfile) { if (filename_cmp (so->objfile->name, so->so_name) == 0 && so->objfile->addr_low == so->addr_low) break; } if (so->objfile != NULL) break; sap = build_section_addr_info_from_section_table (so->sections, so->sections_end); so->objfile = symbol_file_add_from_bfd (so->abfd, flags, sap, OBJF_SHARED); so->objfile->addr_low = so->addr_low; free_section_addr_info (sap); } if (e.reason < 0) exception_fprintf (gdb_stderr, e, _("Error while reading shared" " library symbols for %s:\n"), so->so_name); else { if (from_tty || info_verbose) printf_unfiltered (_("Loaded symbols for %s\n"), so->so_name); so->symbols_loaded = 1; } return 1; } return 0; } /* LOCAL FUNCTION update_solib_list --- synchronize GDB's shared object list with inferior's SYNOPSIS void update_solib_list (int from_tty, struct target_ops *TARGET) Extract the list of currently loaded shared objects from the inferior, and compare it with the list of shared objects currently in GDB's so_list_head list. Edit so_list_head to bring it in sync with the inferior's new list. If we notice that the inferior has unloaded some shared objects, free any symbolic info GDB had read about those shared objects. Don't load symbolic info for any new shared objects; just add them to the list, and leave their symbols_loaded flag clear. If FROM_TTY is non-null, feel free to print messages about what we're doing. If TARGET is non-null, add the sections of all new shared objects to TARGET's section table. Note that this doesn't remove any sections for shared objects that have been unloaded, and it doesn't check to see if the new shared objects are already present in the section table. But we only use this for core files and processes we've just attached to, so that's okay. */ static void update_solib_list (int from_tty, struct target_ops *target) { struct target_so_ops *ops = solib_ops (target_gdbarch); struct so_list *inferior = ops->current_sos(); struct so_list *gdb, **gdb_link; /* We can reach here due to changing solib-search-path or the sysroot, before having any inferior. */ if (target_has_execution && !ptid_equal (inferior_ptid, null_ptid)) { struct inferior *inf = current_inferior (); /* If we are attaching to a running process for which we have not opened a symbol file, we may be able to get its symbols now! */ if (inf->attach_flag && symfile_objfile == NULL) catch_errors (ops->open_symbol_file_object, &from_tty, "Error reading attached process's symbol file.\n", RETURN_MASK_ALL); } /* GDB and the inferior's dynamic linker each maintain their own list of currently loaded shared objects; we want to bring the former in sync with the latter. Scan both lists, seeing which shared objects appear where. There are three cases: - A shared object appears on both lists. This means that GDB knows about it already, and it's still loaded in the inferior. Nothing needs to happen. - A shared object appears only on GDB's list. This means that the inferior has unloaded it. We should remove the shared object from GDB's tables. - A shared object appears only on the inferior's list. This means that it's just been loaded. We should add it to GDB's tables. So we walk GDB's list, checking each entry to see if it appears in the inferior's list too. If it does, no action is needed, and we remove it from the inferior's list. If it doesn't, the inferior has unloaded it, and we remove it from GDB's list. By the time we're done walking GDB's list, the inferior's list contains only the new shared objects, which we then add. */ gdb = so_list_head; gdb_link = &so_list_head; while (gdb) { struct so_list *i = inferior; struct so_list **i_link = &inferior; /* Check to see whether the shared object *gdb also appears in the inferior's current list. */ while (i) { if (ops->same) { if (ops->same (gdb, i)) break; } else { if (! filename_cmp (gdb->so_original_name, i->so_original_name)) break; } i_link = &i->next; i = *i_link; } /* If the shared object appears on the inferior's list too, then it's still loaded, so we don't need to do anything. Delete it from the inferior's list, and leave it on GDB's list. */ if (i) { *i_link = i->next; free_so (i); gdb_link = &gdb->next; gdb = *gdb_link; } /* If it's not on the inferior's list, remove it from GDB's tables. */ else { /* Notify any observer that the shared object has been unloaded before we remove it from GDB's tables. */ observer_notify_solib_unloaded (gdb); *gdb_link = gdb->next; /* Unless the user loaded it explicitly, free SO's objfile. */ if (gdb->objfile && ! (gdb->objfile->flags & OBJF_USERLOADED)) free_objfile (gdb->objfile); /* Some targets' section tables might be referring to sections from so->abfd; remove them. */ remove_target_sections (gdb->abfd); free_so (gdb); gdb = *gdb_link; } } /* Now the inferior's list contains only shared objects that don't appear in GDB's list --- those that are newly loaded. Add them to GDB's shared object list. */ if (inferior) { int not_found = 0; const char *not_found_filename = NULL; struct so_list *i; /* Add the new shared objects to GDB's list. */ *gdb_link = inferior; /* Fill in the rest of each of the `struct so_list' nodes. */ for (i = inferior; i; i = i->next) { volatile struct gdb_exception e; i->pspace = current_program_space; TRY_CATCH (e, RETURN_MASK_ERROR) { /* Fill in the rest of the `struct so_list' node. */ if (!solib_map_sections (i)) { not_found++; if (not_found_filename == NULL) not_found_filename = i->so_original_name; } } if (e.reason < 0) exception_fprintf (gdb_stderr, e, _("Error while mapping shared " "library sections:\n")); /* Notify any observer that the shared object has been loaded now that we've added it to GDB's tables. */ observer_notify_solib_loaded (i); } /* If a library was not found, issue an appropriate warning message. We have to use a single call to warning in case the front end does something special with warnings, e.g., pop up a dialog box. It Would Be Nice if we could get a "warning: " prefix on each line in the CLI front end, though - it doesn't stand out well. */ if (not_found == 1) warning (_("Could not load shared library symbols for %s.\n" "Do you need \"set solib-search-path\" " "or \"set sysroot\"?"), not_found_filename); else if (not_found > 1) warning (_("\ Could not load shared library symbols for %d libraries, e.g. %s.\n\ Use the \"info sharedlibrary\" command to see the complete listing.\n\ Do you need \"set solib-search-path\" or \"set sysroot\"?"), not_found, not_found_filename); } } /* Return non-zero if NAME is the libpthread shared library. Uses a fairly simplistic heuristic approach where we check the file name against "/libpthread". This can lead to false positives, but this should be good enough in practice. */ int libpthread_name_p (const char *name) { return (strstr (name, "/libpthread") != NULL); } /* Return non-zero if SO is the libpthread shared library. */ static int libpthread_solib_p (struct so_list *so) { return libpthread_name_p (so->so_name); } /* GLOBAL FUNCTION solib_add -- read in symbol info for newly added shared libraries SYNOPSIS void solib_add (char *pattern, int from_tty, struct target_ops *TARGET, int readsyms) DESCRIPTION Read in symbolic information for any shared objects whose names match PATTERN. (If we've already read a shared object's symbol info, leave it alone.) If PATTERN is zero, read them all. If READSYMS is 0, defer reading symbolic information until later but still do any needed low level processing. FROM_TTY and TARGET are as described for update_solib_list, above. */ void solib_add (char *pattern, int from_tty, struct target_ops *target, int readsyms) { struct so_list *gdb; if (pattern) { char *re_err = re_comp (pattern); if (re_err) error (_("Invalid regexp: %s"), re_err); } update_solib_list (from_tty, target); /* Walk the list of currently loaded shared libraries, and read symbols for any that match the pattern --- or any whose symbols aren't already loaded, if no pattern was given. */ { int any_matches = 0; int loaded_any_symbols = 0; const int flags = SYMFILE_DEFER_BP_RESET | (from_tty ? SYMFILE_VERBOSE : 0); for (gdb = so_list_head; gdb; gdb = gdb->next) if (! pattern || re_exec (gdb->so_name)) { /* Normally, we would read the symbols from that library only if READSYMS is set. However, we're making a small exception for the pthread library, because we sometimes need the library symbols to be loaded in order to provide thread support (x86-linux for instance). */ const int add_this_solib = (readsyms || libpthread_solib_p (gdb)); any_matches = 1; if (add_this_solib) { if (gdb->symbols_loaded) { /* If no pattern was given, be quiet for shared libraries we have already loaded. */ if (pattern && (from_tty || info_verbose)) printf_unfiltered (_("Symbols already loaded for %s\n"), gdb->so_name); } else if (solib_read_symbols (gdb, flags)) loaded_any_symbols = 1; } } if (loaded_any_symbols) breakpoint_re_set (); if (from_tty && pattern && ! any_matches) printf_unfiltered ("No loaded shared libraries match the pattern `%s'.\n", pattern); if (loaded_any_symbols) { struct target_so_ops *ops = solib_ops (target_gdbarch); /* Getting new symbols may change our opinion about what is frameless. */ reinit_frame_cache (); ops->special_symbol_handling (); } } } /* LOCAL FUNCTION info_sharedlibrary_command -- code for "info sharedlibrary" SYNOPSIS static void info_sharedlibrary_command () DESCRIPTION Walk through the shared library list and print information about each attached library matching PATTERN. If PATTERN is elided, print them all. */ static void info_sharedlibrary_command (char *pattern, int from_tty) { struct so_list *so = NULL; /* link map state variable */ int so_missing_debug_info = 0; int addr_width; int nr_libs; struct cleanup *table_cleanup; struct gdbarch *gdbarch = target_gdbarch; if (pattern) { char *re_err = re_comp (pattern); if (re_err) error (_("Invalid regexp: %s"), re_err); } /* "0x", a little whitespace, and two hex digits per byte of pointers. */ addr_width = 4 + (gdbarch_ptr_bit (gdbarch) / 4); update_solib_list (from_tty, 0); /* make_cleanup_ui_out_table_begin_end needs to know the number of rows, so we need to make two passes over the libs. */ for (nr_libs = 0, so = so_list_head; so; so = so->next) { if (so->so_name[0]) { if (pattern && ! re_exec (so->so_name)) continue; ++nr_libs; } } table_cleanup = make_cleanup_ui_out_table_begin_end (uiout, 4, nr_libs, "SharedLibraryTable"); /* The "- 1" is because ui_out adds one space between columns. */ ui_out_table_header (uiout, addr_width - 1, ui_left, "from", "From"); ui_out_table_header (uiout, addr_width - 1, ui_left, "to", "To"); ui_out_table_header (uiout, 12 - 1, ui_left, "syms-read", "Syms Read"); ui_out_table_header (uiout, 0, ui_noalign, "name", "Shared Object Library"); ui_out_table_body (uiout); for (so = so_list_head; so; so = so->next) { struct cleanup *lib_cleanup; if (! so->so_name[0]) continue; if (pattern && ! re_exec (so->so_name)) continue; lib_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout, "lib"); if (so->addr_high != 0) { ui_out_field_core_addr (uiout, "from", gdbarch, so->addr_low); ui_out_field_core_addr (uiout, "to", gdbarch, so->addr_high); } else { ui_out_field_skip (uiout, "from"); ui_out_field_skip (uiout, "to"); } if (! ui_out_is_mi_like_p (interp_ui_out (top_level_interpreter ())) && so->symbols_loaded && !objfile_has_symbols (so->objfile)) { so_missing_debug_info = 1; ui_out_field_string (uiout, "syms-read", "Yes (*)"); } else ui_out_field_string (uiout, "syms-read", so->symbols_loaded ? "Yes" : "No"); ui_out_field_string (uiout, "name", so->so_name); ui_out_text (uiout, "\n"); do_cleanups (lib_cleanup); } do_cleanups (table_cleanup); if (nr_libs == 0) { if (pattern) ui_out_message (uiout, 0, _("No shared libraries matched.\n")); else ui_out_message (uiout, 0, _("No shared libraries loaded at this time.\n")); } else { if (so_missing_debug_info) ui_out_message (uiout, 0, _("(*): Shared library is missing " "debugging information.\n")); } } /* Return 1 if ADDRESS lies within SOLIB. */ int solib_contains_address_p (const struct so_list *const solib, CORE_ADDR address) { struct target_section *p; for (p = solib->sections; p < solib->sections_end; p++) if (p->addr <= address && address < p->endaddr) return 1; return 0; } /* GLOBAL FUNCTION solib_name_from_address -- if an address is in a shared lib, return its name. SYNOPSIS char * solib_name_from_address (CORE_ADDR address) DESCRIPTION Provides a hook for other gdb routines to discover whether or not a particular address is within the mapped address space of a shared library. For example, this routine is called at one point to disable breakpoints which are in shared libraries that are not currently mapped in. */ char * solib_name_from_address (struct program_space *pspace, CORE_ADDR address) { struct so_list *so = NULL; for (so = pspace->so_list; so; so = so->next) if (solib_contains_address_p (so, address)) return (so->so_name); return (0); } /* Return whether the data starting at VADDR, size SIZE, must be kept in a core file for shared libraries loaded before "gcore" is used to be handled correctly when the core file is loaded. This only applies when the section would otherwise not be kept in the core file (in particular, for readonly sections). */ int solib_keep_data_in_core (CORE_ADDR vaddr, unsigned long size) { struct target_so_ops *ops = solib_ops (target_gdbarch); if (ops->keep_data_in_core) return ops->keep_data_in_core (vaddr, size); else return 0; } /* Called by free_all_symtabs */ void clear_solib (void) { struct target_so_ops *ops = solib_ops (target_gdbarch); /* This function is expected to handle ELF shared libraries. It is also used on Solaris, which can run either ELF or a.out binaries (for compatibility with SunOS 4), both of which can use shared libraries. So we don't know whether we have an ELF executable or an a.out executable until the user chooses an executable file. ELF shared libraries don't get mapped into the address space until after the program starts, so we'd better not try to insert breakpoints in them immediately. We have to wait until the dynamic linker has loaded them; we'll hit a bp_shlib_event breakpoint (look for calls to create_solib_event_breakpoint) when it's ready. SunOS shared libraries seem to be different --- they're present as soon as the process begins execution, so there's no need to put off inserting breakpoints. There's also nowhere to put a bp_shlib_event breakpoint, so if we put it off, we'll never get around to it. So: disable breakpoints only if we're using ELF shared libs. */ if (exec_bfd != NULL && bfd_get_flavour (exec_bfd) != bfd_target_aout_flavour) disable_breakpoints_in_shlibs (); while (so_list_head) { struct so_list *so = so_list_head; so_list_head = so->next; observer_notify_solib_unloaded (so); if (so->abfd) remove_target_sections (so->abfd); free_so (so); } ops->clear_solib (); } /* GLOBAL FUNCTION solib_create_inferior_hook -- shared library startup support SYNOPSIS void solib_create_inferior_hook (int from_tty) DESCRIPTION When gdb starts up the inferior, it nurses it along (through the shell) until it is ready to execute it's first instruction. At this point, this function gets called via expansion of the macro SOLIB_CREATE_INFERIOR_HOOK. */ void solib_create_inferior_hook (int from_tty) { struct target_so_ops *ops = solib_ops (target_gdbarch); ops->solib_create_inferior_hook (from_tty); } /* GLOBAL FUNCTION in_solib_dynsym_resolve_code -- check to see if an address is in dynamic loader's dynamic symbol resolution code SYNOPSIS int in_solib_dynsym_resolve_code (CORE_ADDR pc) DESCRIPTION Determine if PC is in the dynamic linker's symbol resolution code. Return 1 if so, 0 otherwise. */ int in_solib_dynsym_resolve_code (CORE_ADDR pc) { struct target_so_ops *ops = solib_ops (target_gdbarch); return ops->in_dynsym_resolve_code (pc); } /* LOCAL FUNCTION sharedlibrary_command -- handle command to explicitly add library SYNOPSIS static void sharedlibrary_command (char *args, int from_tty) DESCRIPTION */ static void sharedlibrary_command (char *args, int from_tty) { dont_repeat (); solib_add (args, from_tty, (struct target_ops *) 0, 1); } /* LOCAL FUNCTION no_shared_libraries -- handle command to explicitly discard symbols from shared libraries. DESCRIPTION Implements the command "nosharedlibrary", which discards symbols that have been auto-loaded from shared libraries. Symbols from shared libraries that were added by explicit request of the user are not discarded. Also called from remote.c. */ void no_shared_libraries (char *ignored, int from_tty) { /* The order of the two routines below is important: clear_solib notifies the solib_unloaded observers, and some of these observers might need access to their associated objfiles. Therefore, we can not purge the solibs' objfiles before clear_solib has been called. */ clear_solib (); objfile_purge_solibs (); } /* Reload shared libraries, but avoid reloading the same symbol file we already have loaded. */ static void reload_shared_libraries_1 (int from_tty) { struct so_list *so; struct cleanup *old_chain = make_cleanup (null_cleanup, NULL); for (so = so_list_head; so != NULL; so = so->next) { char *filename, *found_pathname = NULL; bfd *abfd; int was_loaded = so->symbols_loaded; const int flags = SYMFILE_DEFER_BP_RESET | (from_tty ? SYMFILE_VERBOSE : 0); filename = tilde_expand (so->so_original_name); make_cleanup (xfree, filename); abfd = solib_bfd_open (filename); if (abfd != NULL) { found_pathname = xstrdup (bfd_get_filename (abfd)); make_cleanup (xfree, found_pathname); gdb_bfd_close_or_warn (abfd); } /* If this shared library is no longer associated with its previous symbol file, close that. */ if ((found_pathname == NULL && was_loaded) || (found_pathname != NULL && filename_cmp (found_pathname, so->so_name) != 0)) { if (so->objfile && ! (so->objfile->flags & OBJF_USERLOADED)) free_objfile (so->objfile); remove_target_sections (so->abfd); free_so_symbols (so); } /* If this shared library is now associated with a new symbol file, open it. */ if (found_pathname != NULL && (!was_loaded || filename_cmp (found_pathname, so->so_name) != 0)) { volatile struct gdb_exception e; TRY_CATCH (e, RETURN_MASK_ERROR) solib_map_sections (so); if (e.reason < 0) exception_fprintf (gdb_stderr, e, _("Error while mapping " "shared library sections:\n")); else if (auto_solib_add || was_loaded || libpthread_solib_p (so)) solib_read_symbols (so, flags); } } do_cleanups (old_chain); } static void reload_shared_libraries (char *ignored, int from_tty, struct cmd_list_element *e) { struct target_so_ops *ops; reload_shared_libraries_1 (from_tty); ops = solib_ops (target_gdbarch); /* Creating inferior hooks here has two purposes. First, if we reload shared libraries then the address of solib breakpoint we've computed previously might be no longer valid. For example, if we forgot to set solib-absolute-prefix and are setting it right now, then the previous breakpoint address is plain wrong. Second, installing solib hooks also implicitly figures were ld.so is and loads symbols for it. Absent this call, if we've just connected to a target and set solib-absolute-prefix or solib-search-path, we'll lose all information about ld.so. */ if (target_has_execution) { /* Reset or free private data structures not associated with so_list entries. */ ops->clear_solib (); /* Remove any previous solib event breakpoint. This is usually done in common code, at breakpoint_init_inferior time, but we're not really starting up the inferior here. */ remove_solib_event_breakpoints (); #ifdef SOLIB_CREATE_INFERIOR_HOOK SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid)); #else solib_create_inferior_hook (from_tty); #endif } /* Sometimes the platform-specific hook loads initial shared libraries, and sometimes it doesn't. If it doesn't FROM_TTY will be incorrectly 0 but such solib targets should be fixed anyway. If we made all the inferior hook methods consistent, this call could be removed. Call it only after the solib target has been initialized by solib_create_inferior_hook. */ solib_add (NULL, 0, NULL, auto_solib_add); breakpoint_re_set (); /* We may have loaded or unloaded debug info for some (or all) shared libraries. However, frames may still reference them. For example, a frame's unwinder might still point at DWARF FDE structures that are now freed. Also, getting new symbols may change our opinion about what is frameless. */ reinit_frame_cache (); ops->special_symbol_handling (); } static void show_auto_solib_add (struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value) { fprintf_filtered (file, _("Autoloading of shared library symbols is %s.\n"), value); } /* Handler for library-specific lookup of global symbol NAME in OBJFILE. Call the library-specific handler if it is installed for the current target. */ struct symbol * solib_global_lookup (const struct objfile *objfile, const char *name, const domain_enum domain) { struct target_so_ops *ops = solib_ops (target_gdbarch); if (ops->lookup_lib_global_symbol != NULL) return ops->lookup_lib_global_symbol (objfile, name, domain); return NULL; } extern initialize_file_ftype _initialize_solib; /* -Wmissing-prototypes */ void _initialize_solib (void) { solib_data = gdbarch_data_register_pre_init (solib_init); add_com ("sharedlibrary", class_files, sharedlibrary_command, _("Load shared object library symbols for files matching REGEXP.")); add_info ("sharedlibrary", info_sharedlibrary_command, _("Status of loaded shared object libraries.")); add_com ("nosharedlibrary", class_files, no_shared_libraries, _("Unload all shared object library symbols.")); add_setshow_boolean_cmd ("auto-solib-add", class_support, &auto_solib_add, _("\ Set autoloading of shared library symbols."), _("\ Show autoloading of shared library symbols."), _("\ If \"on\", symbols from all shared object libraries will be loaded\n\ automatically when the inferior begins execution, when the dynamic linker\n\ informs gdb that a new library has been loaded, or when attaching to the\n\ inferior. Otherwise, symbols must be loaded manually, using \ `sharedlibrary'."), NULL, show_auto_solib_add, &setlist, &showlist); add_setshow_filename_cmd ("sysroot", class_support, &gdb_sysroot, _("\ Set an alternate system root."), _("\ Show the current system root."), _("\ The system root is used to load absolute shared library symbol files.\n\ For other (relative) files, you can add directories using\n\ `set solib-search-path'."), reload_shared_libraries, NULL, &setlist, &showlist); add_alias_cmd ("solib-absolute-prefix", "sysroot", class_support, 0, &setlist); add_alias_cmd ("solib-absolute-prefix", "sysroot", class_support, 0, &showlist); add_setshow_optional_filename_cmd ("solib-search-path", class_support, &solib_search_path, _("\ Set the search path for loading non-absolute shared library symbol files."), _("\ Show the search path for loading non-absolute shared library symbol files."), _("\ This takes precedence over the environment variables \ PATH and LD_LIBRARY_PATH."), reload_shared_libraries, show_solib_search_path, &setlist, &showlist); }
gpl-2.0
csolanol/kernel-morrison
arch/arm/boot/compressed/misc.c
5
3380
/* * misc.c * * This is a collection of several routines from gzip-1.0.3 * adapted for Linux. * * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 * * Modified for ARM Linux by Russell King * * Nicolas Pitre <nico@visuaide.com> 1999/04/14 : * For this code to run directly from Flash, all constant variables must * be marked with 'const' and all other variables initialized at run-time * only. This way all non constant variables will end up in the bss segment, * which should point to addresses in RAM and cleared to 0 on start. * This allows for a much quicker boot time. */ unsigned int __machine_arch_type; #define _LINUX_STRING_H_ #include <linux/compiler.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/linkage.h> #include <linux/string.h> #include <asm/unaligned.h> static void putstr(const char *ptr); extern void error(char *x); #include <mach/uncompress.h> #ifdef CONFIG_DEBUG_ICEDCC #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V7) static void icedcc_putc(int ch) { int status, i = 0x4000000; do { if (--i < 0) return; asm volatile ("mrc p14, 0, %0, c0, c1, 0" : "=r" (status)); } while (status & (1 << 29)); asm("mcr p14, 0, %0, c0, c5, 0" : : "r" (ch)); } #else static void icedcc_putc(int ch) { int status, i = 0x4000000; do { if (--i < 0) return; asm volatile ("mrc p14, 0, %0, c0, c0, 0" : "=r" (status)); } while (status & 2); asm("mcr p14, 0, %0, c1, c0, 0" : : "r" (ch)); } #endif #define putc(ch) icedcc_putc(ch) #define flush() do { } while (0) #endif static void putstr(const char *ptr) { char c; while ((c = *ptr++) != '\0') { if (c == '\n') putc('\r'); putc(c); } flush(); } void *memcpy(void *__dest, __const void *__src, size_t __n) { int i = 0; unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src; for (i = __n >> 3; i > 0; i--) { *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; } if (__n & 1 << 2) { *d++ = *s++; *d++ = *s++; *d++ = *s++; *d++ = *s++; } if (__n & 1 << 1) { *d++ = *s++; *d++ = *s++; } if (__n & 1) *d++ = *s++; return __dest; } /* * gzip delarations */ extern char input_data[]; extern char input_data_end[]; unsigned char *output_data; unsigned long output_ptr; unsigned long free_mem_ptr; unsigned long free_mem_end_ptr; #ifndef arch_error #define arch_error(x) #endif void error(char *x) { arch_error(x); putstr("\n\n"); putstr(x); putstr("\n\n -- System halted"); while(1); /* Halt */ } asmlinkage void __div0(void) { error("Attempting division by 0!"); } extern void do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)); unsigned long decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p, unsigned long free_mem_ptr_end_p, int arch_id) { unsigned char *tmp; output_data = (unsigned char *)output_start; free_mem_ptr = free_mem_ptr_p; free_mem_end_ptr = free_mem_ptr_end_p; __machine_arch_type = arch_id; arch_decomp_setup(); tmp = (unsigned char *) (((unsigned long)input_data_end) - 4); output_ptr = get_unaligned_le32(tmp); putstr("Uncompressing Linux..."); do_decompress(input_data, input_data_end - input_data, output_data, error); putstr(" done, booting the kernel.\n"); return output_ptr; }
gpl-2.0
cwabbott0/samsung-chromebook-kernel
security/chromiumos/lsm.c
5
5600
/* * Linux Security Module for Chromium OS * * Copyright 2011 Google Inc. All Rights Reserved * * Authors: * Stephan Uphoff <ups@google.com> * Kees Cook <keescook@chromium.org> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "Chromium OS LSM: " fmt #include <linux/module.h> #include <linux/security.h> #include <linux/sched.h> /* current and other task related stuff */ #include <linux/fs.h> #include <linux/fs_struct.h> #include <linux/mount.h> #include <linux/path.h> #include <linux/root_dev.h> #include "utils.h" static int chromiumos_security_sb_mount(const char *dev_name, struct path *path, const char *type, unsigned long flags, void *data) { int error = current->total_link_count ? -ELOOP : 0; if (error) { char *cmdline; cmdline = printable_cmdline(current); pr_notice("Mount path with symlinks prohibited - " "pid=%d cmdline=%s\n", task_pid_nr(current), cmdline); kfree(cmdline); } return error; } static void report_load_module(struct path *path, char *operation) { char *alloced = NULL, *cmdline; char *pathname; /* Pointer to either static string or "alloced". */ if (!path) pathname = "<unknown>"; else { /* We will allow 11 spaces for ' (deleted)' to be appended */ alloced = pathname = kmalloc(PATH_MAX+11, GFP_KERNEL); if (!pathname) pathname = "<no_memory>"; else { pathname = d_path(path, pathname, PATH_MAX+11); if (IS_ERR(pathname)) pathname = "<too_long>"; else { pathname = printable(pathname); kfree(alloced); alloced = pathname; } } } cmdline = printable_cmdline(current); pr_notice("init_module %s module=%s pid=%d cmdline=%s\n", operation, pathname, task_pid_nr(current), cmdline); kfree(cmdline); kfree(alloced); } static int module_locking = 1; static struct dentry *locked_root; static DEFINE_SPINLOCK(locked_root_spinlock); #ifdef CONFIG_SYSCTL static int zero; static int one = 1; static struct ctl_path chromiumos_sysctl_path[] = { { .procname = "kernel", }, { .procname = "chromiumos", }, { } }; static struct ctl_table chromiumos_sysctl_table[] = { { .procname = "module_locking", .data = &module_locking, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { } }; /* Check if the root device is read-only (e.g. dm-verity is enabled). * This must be called after early kernel init, since then the rootdev * is available. */ static bool rootdev_readonly(void) { bool rc; struct block_device *bdev; const fmode_t mode = FMODE_WRITE; bdev = blkdev_get_by_dev(ROOT_DEV, mode, NULL); if (IS_ERR(bdev)) { /* In this weird case, assume it is read-only. */ pr_info("dev(%u,%u): FMODE_WRITE disallowed?!\n", MAJOR(ROOT_DEV), MINOR(ROOT_DEV)); return true; } rc = bdev_read_only(bdev); blkdev_put(bdev, mode); pr_info("dev(%u,%u): %s\n", MAJOR(ROOT_DEV), MINOR(ROOT_DEV), rc ? "read-only" : "writable"); return rc; } static void check_locking_enforcement(void) { /* If module locking is not being enforced, allow sysctl to change * modes for testing. */ if (!rootdev_readonly()) { if (!register_sysctl_paths(chromiumos_sysctl_path, chromiumos_sysctl_table)) pr_notice("sysctl registration failed!\n"); else pr_info("module locking can be disabled.\n"); } else pr_info("module locking engaged.\n"); } #else static void check_locking_enforcement(void) { } #endif static int chromiumos_security_load_module(struct file *file) { struct dentry *module_root; if (!file) { if (!module_locking) { report_load_module(NULL, "old-api-locking-ignored"); return 0; } report_load_module(NULL, "old-api-denied"); return -EPERM; } module_root = file->f_vfsmnt->mnt_root; /* First loaded module defines the root for all others. */ spin_lock(&locked_root_spinlock); if (!locked_root) { locked_root = dget(module_root); /* * Unlock now since it's only locked_root we care about. * In the worst case, we will (correctly) report locking * failures before we have announced that locking is * enabled. This would be purely cosmetic. */ spin_unlock(&locked_root_spinlock); report_load_module(&file->f_path, "locked"); check_locking_enforcement(); } else { spin_unlock(&locked_root_spinlock); } if (module_root != locked_root) { if (unlikely(!module_locking)) { report_load_module(&file->f_path, "locking-ignored"); return 0; } report_load_module(&file->f_path, "denied"); return -EPERM; } return 0; } static struct security_operations chromiumos_security_ops = { .name = "chromiumos", .sb_mount = chromiumos_security_sb_mount, .kernel_module_from_file = chromiumos_security_load_module, }; static int __init chromiumos_security_init(void) { int error; error = register_security(&chromiumos_security_ops); if (error) panic("Could not register Chromium OS security module"); return error; } security_initcall(chromiumos_security_init); module_param(module_locking, int, S_IRUSR); MODULE_PARM_DESC(module_locking, "Module loading restrictions (default: true)");
gpl-2.0
Winless/NPLRuntime
Client/trunk/ParaEngineClient/Core/AttributeField.cpp
5
2980
//----------------------------------------------------------------------------- // Class: AttributeField // Authors: Li,Xizhi // Emails: LiXizhi@yeah.net // Date: 2006.5.9, refactored 2015.9.2 // Notes: //----------------------------------------------------------------------------- #include "ParaEngine.h" #include "AttributeField.h" /**@def */ #define MAX_SIMPLE_SCHEMA_STRING_LENGTH 256 using namespace ParaEngine; std::hash<string> CAttributeField::HashFunc; CAttributeField::CAttributeField() :m_type(FieldType_Deprecated) , m_hash((size_t)-1) { m_offsetSetFunc.ptr_fun = NULL; m_offsetGetFunc.ptr_fun = NULL; } CAttributeField::~CAttributeField() { } const char* CAttributeField::GetTypeAsString() { return CVariable::GetTypeAsString(m_type); } #define SIMPLE_SCHEMATYPE_COUNT 6 const char g_schemaType[][20] = { ":rgb", ":file", ":script", ":int", ":float", ":dialog", }; void CAttributeField::SetFieldname(const string& sFieldname) { m_sFieldname = sFieldname; m_hash = HashFunc(sFieldname); } const string& CAttributeField::GetFieldname() const { return m_sFieldname; } size_t CAttributeField::GetHash() const { return m_hash; } const char* CAttributeField::GetSimpleSchema(SIMPLE_SCHEMA schema) { return g_schemaType[schema]; } static char tmp[MAX_SIMPLE_SCHEMA_STRING_LENGTH + 1]; const char* CAttributeField::GetSimpleSchemaOfInt(int nMin, int nMax) { memset(tmp, 0, sizeof(tmp)); snprintf(tmp, MAX_SIMPLE_SCHEMA_STRING_LENGTH, ":int{%d,%d}", nMin, nMax); return tmp; } const char* CAttributeField::GetSimpleSchemaOfFloat(float fMin, float fMax) { memset(tmp, 0, sizeof(tmp)); snprintf(tmp, MAX_SIMPLE_SCHEMA_STRING_LENGTH, ":float{%f,%f}", fMin, fMax); return tmp; } const char* CAttributeField::GetSchematicsType() { if (m_sSchematics.empty()) return CGlobals::GetString(G_STR_EMPTY).c_str(); else if (m_sSchematics[0] == ':') { // this is a simple schema, so let us perform further parse int nSize = (int)m_sSchematics.size(); for (int i = 0; i < SIMPLE_SCHEMATYPE_COUNT; ++i) { char c = 0; bool bFound = true; for (int j = 1; j < nSize && (c = g_schemaType[i][j]) != '\0'; ++j) { if (c != m_sSchematics[j]){ bFound = false; break; } } if (bFound) { return g_schemaType[i]; } } } else { // TODO: parse more advanced schema here. } return CGlobals::GetString(G_STR_EMPTY).c_str(); } bool CAttributeField::GetSchematicsMinMax(float& fMin, float& fMax) { float min = -99999999.f; float max = 99999999.f; int nFrom = (int)m_sSchematics.find_first_of('{'); int nTo = (int)m_sSchematics.find_last_of('}'); bool res = false; if (nFrom != string::npos && nTo != string::npos && nTo > nFrom) { string str = m_sSchematics.substr(nFrom, nTo - nFrom + 1); #ifdef WIN32 res = _snscanf(str.c_str(), (int)str.size(), "{%f,%f}", &min, &max) >= 2; #else res = sscanf(str.c_str(), "{%f,%f}", &min, &max) >= 2; #endif } fMin = min; fMax = max; return res; }
gpl-2.0
plenarius/nwnx2-linux
api/CExoEncapsulatedFile.cpp
5
2133
#include "CExoEncapsulatedFile.h" void CExoEncapsulatedFile::AddAsyncRefCount() { asm("leave"); asm("mov $0x082c4ac0, %eax"); asm("jmp *%eax"); } void CExoEncapsulatedFile::AddRefCount() { asm("leave"); asm("mov $0x082c4a90, %eax"); asm("jmp *%eax"); } int CExoEncapsulatedFile::CloseAsyncFile() { asm("leave"); asm("mov $0x082c4b30, %eax"); asm("jmp *%eax"); } int CExoEncapsulatedFile::CloseFile() { asm("leave"); asm("mov $0x082c4af0, %eax"); asm("jmp *%eax"); } void CExoEncapsulatedFile::DeleteAsyncRefCount() { asm("leave"); asm("mov $0x082c4c04, %eax"); asm("jmp *%eax"); } void CExoEncapsulatedFile::DeleteRefCount() { asm("leave"); asm("mov $0x082c4bd4, %eax"); asm("jmp *%eax"); } int CExoEncapsulatedFile::GetDescription() { asm("leave"); asm("mov $0x082c5124, %eax"); asm("jmp *%eax"); } unsigned long CExoEncapsulatedFile::GetResourceSize(unsigned long) { asm("leave"); asm("mov $0x082c4c34, %eax"); asm("jmp *%eax"); } int CExoEncapsulatedFile::Initialize() { asm("leave"); asm("mov $0x082c4c4c, %eax"); asm("jmp *%eax"); } int CExoEncapsulatedFile::LoadHeader(unsigned char) { asm("leave"); asm("mov $0x082c3688, %eax"); asm("jmp *%eax"); } int CExoEncapsulatedFile::OpenAsyncFile() { asm("leave"); asm("mov $0x082c334c, %eax"); asm("jmp *%eax"); } int CExoEncapsulatedFile::OpenFile() { asm("leave"); asm("mov $0x082c3010, %eax"); asm("jmp *%eax"); } int CExoEncapsulatedFile::OpenFile(unsigned char *) { asm("leave"); asm("mov $0x082c4ca4, %eax"); asm("jmp *%eax"); } void CExoEncapsulatedFile::ReadResourceAsync(unsigned long, void *, unsigned long, unsigned long) { asm("leave"); asm("mov $0x082c4d3c, %eax"); asm("jmp *%eax"); } unsigned long CExoEncapsulatedFile::ReadResource(unsigned long, void *, unsigned long, unsigned long) { asm("leave"); asm("mov $0x082c4cbc, %eax"); asm("jmp *%eax"); } int CExoEncapsulatedFile::UnloadHeader() { asm("leave"); asm("mov $0x082c4b70, %eax"); asm("jmp *%eax"); }
gpl-2.0
luaman/qforge-2
irix/snd_irix.c
5
6144
/* Copyright (C) 1997-2001 Id Software, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <dmedia/dmedia.h> #include <dmedia/audio.h> #include "../client/client.h" #include "../client/snd_loc.h" /* ================== SNDDM_Init Try to find a sound device to mix for. Returns false if nothing is found. Returns true and fills in the "dma" structure with information for the mixer. ================== */ // must be power of two! #define QSND_SKID 2 #define QSND_BUFFER_FRAMES 8192 #define QSND_BUFFER_SIZE (QSND_BUFFER_FRAMES*2) #define UST_TO_BUFFPOS(ust) ((int)((ust) & (QSND_BUFFER_FRAMES - 1)) << 1) cvar_t *s_loadas8bit; cvar_t *s_khz; cvar_t *sndchannels; short int dma_buffer[QSND_BUFFER_SIZE]; ALport sgisnd_aport = NULL; long long sgisnd_startframe; double sgisnd_frames_per_ns; long long sgisnd_lastframewritten = 0; qboolean SNDDMA_Init(void) { ALconfig ac = NULL; ALpv pvbuf[2]; s_loadas8bit = Cvar_Get("s_loadas8bit", "16", CVAR_ARCHIVE); if ((int)s_loadas8bit->value) dma.samplebits = 8; else dma.samplebits = 16; if (dma.samplebits != 16) { Com_Printf("Don't currently support %i-bit data. Forcing 16-bit.\n", dma.samplebits); dma.samplebits = 16; Cvar_SetValue( "s_loadas8bit", false ); } s_khz = Cvar_Get("s_khz", "0", CVAR_ARCHIVE); switch ((int)s_khz->value) { case 48: dma.speed = AL_RATE_48000; break; case 44: dma.speed = AL_RATE_44100; break; case 32: dma.speed = AL_RATE_32000; break; case 22: dma.speed = AL_RATE_22050; break; case 16: dma.speed = AL_RATE_16000; break; case 11: dma.speed = AL_RATE_11025; break; case 8: dma.speed = AL_RATE_8000; break; default: dma.speed = AL_RATE_22050; Com_Printf("Don't currently support %i kHz sample rate. Using %i.\n", (int)s_khz->value, (int)(dma.speed/1000)); } sndchannels = Cvar_Get("sndchannels", "2", CVAR_ARCHIVE); dma.channels = (int)sndchannels->value; if (dma.channels != 2) Com_Printf("Don't currently support %i sound channels. Try 2.\n", sndchannels); /***********************/ ac = alNewConfig(); alSetChannels( ac, AL_STEREO ); alSetSampFmt( ac, AL_SAMPFMT_TWOSCOMP ); alSetQueueSize( ac, QSND_BUFFER_FRAMES ); if (dma.samplebits == 8) alSetWidth( ac, AL_SAMPLE_8 ); else alSetWidth( ac, AL_SAMPLE_16 ); sgisnd_aport = alOpenPort( "Quake", "w", ac ); if (!sgisnd_aport) { printf( "failed to open audio port!\n" ); } // set desired sample rate pvbuf[0].param = AL_MASTER_CLOCK; pvbuf[0].value.i = AL_CRYSTAL_MCLK_TYPE; pvbuf[1].param = AL_RATE; pvbuf[1].value.ll = alIntToFixed( dma.speed ); alSetParams( alGetResource( sgisnd_aport ), pvbuf, 2 ); if (pvbuf[1].sizeOut < 0) printf( "illegal sample rate %d\n", dma.speed ); sgisnd_frames_per_ns = dma.speed * 1.0e-9; dma.samples = sizeof(dma_buffer)/(dma.samplebits/8); dma.submission_chunk = 1; dma.buffer = (unsigned char *)dma_buffer; dma.samplepos = 0; alFreeConfig( ac ); return true; } /* ============== SNDDMA_GetDMAPos return the current sample position (in mono samples, not stereo) inside the recirculating dma buffer, so the mixing code will know how many sample are required to fill it up. =============== */ int SNDDMA_GetDMAPos(void) { long long ustFuture, ustNow; if (!sgisnd_aport) return( 0 ); alGetFrameTime( sgisnd_aport, &sgisnd_startframe, &ustFuture ); dmGetUST( (unsigned long long *)&ustNow ); sgisnd_startframe -= (long long)((ustFuture - ustNow) * sgisnd_frames_per_ns); sgisnd_startframe += 100; //printf( "frame %ld pos %d\n", frame, UST_TO_BUFFPOS( sgisnd_startframe ) ); return( UST_TO_BUFFPOS( sgisnd_startframe ) ); } /* ============== SNDDMA_Shutdown Reset the sound device for exiting =============== */ void SNDDMA_Shutdown(void) { if (sgisnd_aport) alClosePort( sgisnd_aport ), sgisnd_aport = NULL; return; } /* ============== SNDDMA_Submit Send sound to device if buffer isn't really the dma buffer =============== */ extern int soundtime; void SNDDMA_Submit(void) { int nFillable, nFilled, nPos; int nFrames, nFramesLeft; unsigned endtime; if (!sgisnd_aport) return; nFillable = alGetFillable( sgisnd_aport ); nFilled = QSND_BUFFER_FRAMES - nFillable; nFrames = dma.samples >> (dma.channels - 1); if (paintedtime - soundtime < nFrames) nFrames = paintedtime - soundtime; if (nFrames <= QSND_SKID) return; nPos = UST_TO_BUFFPOS( sgisnd_startframe ); // dump re-written contents of the buffer if (sgisnd_lastframewritten > sgisnd_startframe) { alDiscardFrames( sgisnd_aport, sgisnd_lastframewritten - sgisnd_startframe ); } else if ((int)(sgisnd_startframe - sgisnd_lastframewritten) >= QSND_BUFFER_FRAMES) { // blow away everything if we've underflowed alDiscardFrames( sgisnd_aport, QSND_BUFFER_FRAMES ); } // don't block if (nFrames > nFillable) nFrames = nFillable; // account for stereo nFramesLeft = nFrames; if (nPos + nFrames * dma.channels > QSND_BUFFER_SIZE) { int nFramesAtEnd = (QSND_BUFFER_SIZE - nPos) >> (dma.channels - 1); alWriteFrames( sgisnd_aport, &dma_buffer[nPos], nFramesAtEnd ); nPos = 0; nFramesLeft -= nFramesAtEnd; } alWriteFrames( sgisnd_aport, &dma_buffer[nPos], nFramesLeft ); sgisnd_lastframewritten = sgisnd_startframe + nFrames; } void SNDDMA_BeginPainting (void) { }
gpl-2.0
jpoirier/linux
fs/cifs/misc.c
5
18628
/* * fs/cifs/misc.c * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/slab.h> #include <linux/ctype.h> #include <linux/mempool.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "smberr.h" #include "nterr.h" #include "cifs_unicode.h" #ifdef CONFIG_CIFS_SMB2 #include "smb2pdu.h" #endif extern mempool_t *cifs_sm_req_poolp; extern mempool_t *cifs_req_poolp; /* The xid serves as a useful identifier for each incoming vfs request, in a similar way to the mid which is useful to track each sent smb, and CurrentXid can also provide a running counter (although it will eventually wrap past zero) of the total vfs operations handled since the cifs fs was mounted */ unsigned int _get_xid(void) { unsigned int xid; spin_lock(&GlobalMid_Lock); GlobalTotalActiveXid++; /* keep high water mark for number of simultaneous ops in filesystem */ if (GlobalTotalActiveXid > GlobalMaxActiveXid) GlobalMaxActiveXid = GlobalTotalActiveXid; if (GlobalTotalActiveXid > 65000) cifs_dbg(FYI, "warning: more than 65000 requests active\n"); xid = GlobalCurrentXid++; spin_unlock(&GlobalMid_Lock); return xid; } void _free_xid(unsigned int xid) { spin_lock(&GlobalMid_Lock); /* if (GlobalTotalActiveXid == 0) BUG(); */ GlobalTotalActiveXid--; spin_unlock(&GlobalMid_Lock); } struct cifs_ses * sesInfoAlloc(void) { struct cifs_ses *ret_buf; ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL); if (ret_buf) { atomic_inc(&sesInfoAllocCount); ret_buf->status = CifsNew; ++ret_buf->ses_count; INIT_LIST_HEAD(&ret_buf->smb_ses_list); INIT_LIST_HEAD(&ret_buf->tcon_list); mutex_init(&ret_buf->session_mutex); } return ret_buf; } void sesInfoFree(struct cifs_ses *buf_to_free) { if (buf_to_free == NULL) { cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n"); return; } atomic_dec(&sesInfoAllocCount); kfree(buf_to_free->serverOS); kfree(buf_to_free->serverDomain); kfree(buf_to_free->serverNOS); if (buf_to_free->password) { memset(buf_to_free->password, 0, strlen(buf_to_free->password)); kfree(buf_to_free->password); } kfree(buf_to_free->user_name); kfree(buf_to_free->domainName); kfree(buf_to_free->auth_key.response); kfree(buf_to_free); } struct cifs_tcon * tconInfoAlloc(void) { struct cifs_tcon *ret_buf; ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL); if (ret_buf) { atomic_inc(&tconInfoAllocCount); ret_buf->tidStatus = CifsNew; ++ret_buf->tc_count; INIT_LIST_HEAD(&ret_buf->openFileList); INIT_LIST_HEAD(&ret_buf->tcon_list); #ifdef CONFIG_CIFS_STATS spin_lock_init(&ret_buf->stat_lock); #endif } return ret_buf; } void tconInfoFree(struct cifs_tcon *buf_to_free) { if (buf_to_free == NULL) { cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n"); return; } atomic_dec(&tconInfoAllocCount); kfree(buf_to_free->nativeFileSystem); if (buf_to_free->password) { memset(buf_to_free->password, 0, strlen(buf_to_free->password)); kfree(buf_to_free->password); } kfree(buf_to_free); } struct smb_hdr * cifs_buf_get(void) { struct smb_hdr *ret_buf = NULL; size_t buf_size = sizeof(struct smb_hdr); #ifdef CONFIG_CIFS_SMB2 /* * SMB2 header is bigger than CIFS one - no problems to clean some * more bytes for CIFS. */ buf_size = sizeof(struct smb2_hdr); #endif /* * We could use negotiated size instead of max_msgsize - * but it may be more efficient to always alloc same size * albeit slightly larger than necessary and maxbuffersize * defaults to this and can not be bigger. */ ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS); /* clear the first few header bytes */ /* for most paths, more is cleared in header_assemble */ if (ret_buf) { memset(ret_buf, 0, buf_size + 3); atomic_inc(&bufAllocCount); #ifdef CONFIG_CIFS_STATS2 atomic_inc(&totBufAllocCount); #endif /* CONFIG_CIFS_STATS2 */ } return ret_buf; } void cifs_buf_release(void *buf_to_free) { if (buf_to_free == NULL) { /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/ return; } mempool_free(buf_to_free, cifs_req_poolp); atomic_dec(&bufAllocCount); return; } struct smb_hdr * cifs_small_buf_get(void) { struct smb_hdr *ret_buf = NULL; /* We could use negotiated size instead of max_msgsize - but it may be more efficient to always alloc same size albeit slightly larger than necessary and maxbuffersize defaults to this and can not be bigger */ ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS); if (ret_buf) { /* No need to clear memory here, cleared in header assemble */ /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ atomic_inc(&smBufAllocCount); #ifdef CONFIG_CIFS_STATS2 atomic_inc(&totSmBufAllocCount); #endif /* CONFIG_CIFS_STATS2 */ } return ret_buf; } void cifs_small_buf_release(void *buf_to_free) { if (buf_to_free == NULL) { cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n"); return; } mempool_free(buf_to_free, cifs_sm_req_poolp); atomic_dec(&smBufAllocCount); return; } /* NB: MID can not be set if treeCon not passed in, in that case it is responsbility of caller to set the mid */ void header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , const struct cifs_tcon *treeCon, int word_count /* length of fixed section (word count) in two byte units */) { char *temp = (char *) buffer; memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */ buffer->smb_buf_length = cpu_to_be32( (2 * word_count) + sizeof(struct smb_hdr) - 4 /* RFC 1001 length field does not count */ + 2 /* for bcc field itself */) ; buffer->Protocol[0] = 0xFF; buffer->Protocol[1] = 'S'; buffer->Protocol[2] = 'M'; buffer->Protocol[3] = 'B'; buffer->Command = smb_command; buffer->Flags = 0x00; /* case sensitive */ buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES; buffer->Pid = cpu_to_le16((__u16)current->tgid); buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16)); if (treeCon) { buffer->Tid = treeCon->tid; if (treeCon->ses) { if (treeCon->ses->capabilities & CAP_UNICODE) buffer->Flags2 |= SMBFLG2_UNICODE; if (treeCon->ses->capabilities & CAP_STATUS32) buffer->Flags2 |= SMBFLG2_ERR_STATUS; /* Uid is not converted */ buffer->Uid = treeCon->ses->Suid; buffer->Mid = get_next_mid(treeCon->ses->server); } if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) buffer->Flags2 |= SMBFLG2_DFS; if (treeCon->nocase) buffer->Flags |= SMBFLG_CASELESS; if ((treeCon->ses) && (treeCon->ses->server)) if (treeCon->ses->server->sign) buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; } /* endian conversion of flags is now done just before sending */ buffer->WordCount = (char) word_count; return; } static int check_smb_hdr(struct smb_hdr *smb) { /* does it have the right SMB "signature" ? */ if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) { cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n", *(unsigned int *)smb->Protocol); return 1; } /* if it's a response then accept */ if (smb->Flags & SMBFLG_RESPONSE) return 0; /* only one valid case where server sends us request */ if (smb->Command == SMB_COM_LOCKING_ANDX) return 0; cifs_dbg(VFS, "Server sent request, not response. mid=%u\n", get_mid(smb)); return 1; } int checkSMB(char *buf, unsigned int total_read) { struct smb_hdr *smb = (struct smb_hdr *)buf; __u32 rfclen = be32_to_cpu(smb->smb_buf_length); __u32 clc_len; /* calculated length */ cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n", total_read, rfclen); /* is this frame too small to even get to a BCC? */ if (total_read < 2 + sizeof(struct smb_hdr)) { if ((total_read >= sizeof(struct smb_hdr) - 1) && (smb->Status.CifsError != 0)) { /* it's an error return */ smb->WordCount = 0; /* some error cases do not return wct and bcc */ return 0; } else if ((total_read == sizeof(struct smb_hdr) + 1) && (smb->WordCount == 0)) { char *tmp = (char *)smb; /* Need to work around a bug in two servers here */ /* First, check if the part of bcc they sent was zero */ if (tmp[sizeof(struct smb_hdr)] == 0) { /* some servers return only half of bcc * on simple responses (wct, bcc both zero) * in particular have seen this on * ulogoffX and FindClose. This leaves * one byte of bcc potentially unitialized */ /* zero rest of bcc */ tmp[sizeof(struct smb_hdr)+1] = 0; return 0; } cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n"); } else { cifs_dbg(VFS, "Length less than smb header size\n"); } return -EIO; } /* otherwise, there is enough to get to the BCC */ if (check_smb_hdr(smb)) return -EIO; clc_len = smbCalcSize(smb); if (4 + rfclen != total_read) { cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n", rfclen); return -EIO; } if (4 + rfclen != clc_len) { __u16 mid = get_mid(smb); /* check if bcc wrapped around for large read responses */ if ((rfclen > 64 * 1024) && (rfclen > clc_len)) { /* check if lengths match mod 64K */ if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF)) return 0; /* bcc wrapped */ } cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n", clc_len, 4 + rfclen, mid); if (4 + rfclen < clc_len) { cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n", rfclen, mid); return -EIO; } else if (rfclen > clc_len + 512) { /* * Some servers (Windows XP in particular) send more * data than the lengths in the SMB packet would * indicate on certain calls (byte range locks and * trans2 find first calls in particular). While the * client can handle such a frame by ignoring the * trailing data, we choose limit the amount of extra * data to 512 bytes. */ cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n", rfclen, mid); return -EIO; } } return 0; } bool is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) { struct smb_hdr *buf = (struct smb_hdr *)buffer; struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; struct list_head *tmp, *tmp1, *tmp2; struct cifs_ses *ses; struct cifs_tcon *tcon; struct cifsInodeInfo *pCifsInode; struct cifsFileInfo *netfile; cifs_dbg(FYI, "Checking for oplock break or dnotify response\n"); if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) && (pSMB->hdr.Flags & SMBFLG_RESPONSE)) { struct smb_com_transaction_change_notify_rsp *pSMBr = (struct smb_com_transaction_change_notify_rsp *)buf; struct file_notify_information *pnotify; __u32 data_offset = 0; if (get_bcc(buf) > sizeof(struct file_notify_information)) { data_offset = le32_to_cpu(pSMBr->DataOffset); pnotify = (struct file_notify_information *) ((char *)&pSMBr->hdr.Protocol + data_offset); cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n", pnotify->FileName, pnotify->Action); /* cifs_dump_mem("Rcvd notify Data: ",buf, sizeof(struct smb_hdr)+60); */ return true; } if (pSMBr->hdr.Status.CifsError) { cifs_dbg(FYI, "notify err 0x%d\n", pSMBr->hdr.Status.CifsError); return true; } return false; } if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX) return false; if (pSMB->hdr.Flags & SMBFLG_RESPONSE) { /* no sense logging error on invalid handle on oplock break - harmless race between close request and oplock break response is expected from time to time writing out large dirty files cached on the client */ if ((NT_STATUS_INVALID_HANDLE) == le32_to_cpu(pSMB->hdr.Status.CifsError)) { cifs_dbg(FYI, "invalid handle on oplock break\n"); return true; } else if (ERRbadfid == le16_to_cpu(pSMB->hdr.Status.DosError.Error)) { return true; } else { return false; /* on valid oplock brk we get "request" */ } } if (pSMB->hdr.WordCount != 8) return false; cifs_dbg(FYI, "oplock type 0x%d level 0x%d\n", pSMB->LockType, pSMB->OplockLevel); if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)) return false; /* look up tcon based on tid & uid */ spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &srv->smb_ses_list) { ses = list_entry(tmp, struct cifs_ses, smb_ses_list); list_for_each(tmp1, &ses->tcon_list) { tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); if (tcon->tid != buf->Tid) continue; cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); spin_lock(&cifs_file_list_lock); list_for_each(tmp2, &tcon->openFileList) { netfile = list_entry(tmp2, struct cifsFileInfo, tlist); if (pSMB->Fid != netfile->fid.netfid) continue; cifs_dbg(FYI, "file id match, oplock break\n"); pCifsInode = CIFS_I(netfile->dentry->d_inode); set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &pCifsInode->flags); /* * Set flag if the server downgrades the oplock * to L2 else clear. */ if (pSMB->OplockLevel) set_bit( CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &pCifsInode->flags); else clear_bit( CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &pCifsInode->flags); queue_work(cifsiod_wq, &netfile->oplock_break); netfile->oplock_break_cancelled = false; spin_unlock(&cifs_file_list_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } spin_unlock(&cifs_file_list_lock); spin_unlock(&cifs_tcp_ses_lock); cifs_dbg(FYI, "No matching file for oplock break\n"); return true; } } spin_unlock(&cifs_tcp_ses_lock); cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); return true; } void dump_smb(void *buf, int smb_buf_length) { int i, j; char debug_line[17]; unsigned char *buffer = buf; if (traceSMB == 0) return; for (i = 0, j = 0; i < smb_buf_length; i++, j++) { if (i % 8 == 0) { /* have reached the beginning of line */ printk(KERN_DEBUG "| "); j = 0; } printk("%0#4x ", buffer[i]); debug_line[2 * j] = ' '; if (isprint(buffer[i])) debug_line[1 + (2 * j)] = buffer[i]; else debug_line[1 + (2 * j)] = '_'; if (i % 8 == 7) { /* reached end of line, time to print ascii */ debug_line[16] = 0; printk(" | %s\n", debug_line); } } for (; j < 8; j++) { printk(" "); debug_line[2 * j] = ' '; debug_line[1 + (2 * j)] = ' '; } printk(" | %s\n", debug_line); return; } void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n", cifs_sb_master_tcon(cifs_sb)->treeName); } } void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock) { oplock &= 0xF; if (oplock == OPLOCK_EXCLUSIVE) { cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG; cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n", &cinode->vfs_inode); } else if (oplock == OPLOCK_READ) { cinode->oplock = CIFS_CACHE_READ_FLG; cifs_dbg(FYI, "Level II Oplock granted on inode %p\n", &cinode->vfs_inode); } else cinode->oplock = 0; } static int cifs_oplock_break_wait(void *unused) { schedule(); return signal_pending(current) ? -ERESTARTSYS : 0; } /* * We wait for oplock breaks to be processed before we attempt to perform * writes. */ int cifs_get_writer(struct cifsInodeInfo *cinode) { int rc; start: rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK, TASK_KILLABLE); if (rc) return rc; spin_lock(&cinode->writers_lock); if (!cinode->writers) set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); cinode->writers++; /* Check to see if we have started servicing an oplock break */ if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) { cinode->writers--; if (cinode->writers == 0) { clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS); } spin_unlock(&cinode->writers_lock); goto start; } spin_unlock(&cinode->writers_lock); return 0; } void cifs_put_writer(struct cifsInodeInfo *cinode) { spin_lock(&cinode->writers_lock); cinode->writers--; if (cinode->writers == 0) { clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags); wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS); } spin_unlock(&cinode->writers_lock); } void cifs_done_oplock_break(struct cifsInodeInfo *cinode) { clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK); } bool backup_cred(struct cifs_sb_info *cifs_sb) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) { if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid())) return true; } if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) { if (in_group_p(cifs_sb->mnt_backupgid)) return true; } return false; } void cifs_del_pending_open(struct cifs_pending_open *open) { spin_lock(&cifs_file_list_lock); list_del(&open->olist); spin_unlock(&cifs_file_list_lock); } void cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink, struct cifs_pending_open *open) { #ifdef CONFIG_CIFS_SMB2 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE); #endif open->oplock = CIFS_OPLOCK_NO_CHANGE; open->tlink = tlink; fid->pending_open = open; list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens); } void cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink, struct cifs_pending_open *open) { spin_lock(&cifs_file_list_lock); cifs_add_pending_open_locked(fid, tlink, open); spin_unlock(&cifs_file_list_lock); }
gpl-2.0
Picture-Elements/linux-2.4-peijse
net/ipv4/netfilter/ipt_recent.c
5
33780
/* Kernel module to check if the source address has been seen recently. */ /* Copyright 2002-2003, Stephen Frost */ /* Author: Stephen Frost <sfrost@snowman.net> */ /* Project Page: http://snowman.net/projects/ipt_recent/ */ /* This software is distributed under the terms of the GPL, Version 2 */ /* This copyright does not cover user programs that use kernel services * by normal system calls. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <asm/uaccess.h> #include <linux/ctype.h> #include <linux/ip.h> #include <linux/vmalloc.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv4/ipt_recent.h> #undef DEBUG #define HASH_LOG 9 /* Defaults, these can be overridden on the module command-line. */ static int ip_list_tot = 100; static int ip_pkt_list_tot = 20; static int ip_list_hash_size = 0; static int ip_list_perms = 0644; #ifdef DEBUG static int debug = 1; #endif static char version[] = KERN_INFO RECENT_NAME " " RECENT_VER ": Stephen Frost <sfrost@snowman.net>. http://snowman.net/projects/ipt_recent/\n"; MODULE_AUTHOR("Stephen Frost <sfrost@snowman.net>"); MODULE_DESCRIPTION("IP tables recently seen matching module " RECENT_VER); MODULE_LICENSE("GPL"); MODULE_PARM(ip_list_tot,"i"); MODULE_PARM(ip_pkt_list_tot,"i"); MODULE_PARM(ip_list_hash_size,"i"); MODULE_PARM(ip_list_perms,"i"); #ifdef DEBUG MODULE_PARM(debug,"i"); MODULE_PARM_DESC(debug,"debugging level, defaults to 1"); #endif MODULE_PARM_DESC(ip_list_tot,"number of IPs to remember per list"); MODULE_PARM_DESC(ip_pkt_list_tot,"number of packets per IP to remember"); MODULE_PARM_DESC(ip_list_hash_size,"size of hash table used to look up IPs"); MODULE_PARM_DESC(ip_list_perms,"permissions on /proc/net/ipt_recent/* files"); /* Structure of our list of recently seen addresses. */ struct recent_ip_list { u_int32_t addr; u_int8_t ttl; u_int32_t last_seen; u_int32_t *last_pkts; u_int32_t oldest_pkt; u_int32_t hash_entry; u_int32_t time_pos; }; struct time_info_list { u_int32_t position; u_int32_t time; }; /* Structure of our linked list of tables of recent lists. */ struct recent_ip_tables { char name[IPT_RECENT_NAME_LEN]; int count; int time_pos; struct recent_ip_list *table; struct recent_ip_tables *next; spinlock_t list_lock; int *hash_table; struct time_info_list *time_info; #ifdef CONFIG_PROC_FS struct proc_dir_entry *status_proc; #endif /* CONFIG_PROC_FS */ }; /* Our current list of addresses we have recently seen. * Only added to on a --set, and only updated on --set || --update */ static struct recent_ip_tables *r_tables = NULL; /* We protect r_list with this spinlock so two processors are not modifying * the list at the same time. */ static spinlock_t recent_lock = SPIN_LOCK_UNLOCKED; #ifdef CONFIG_PROC_FS /* Our /proc/net/ipt_recent entry */ static struct proc_dir_entry *proc_net_ipt_recent = NULL; #endif /* Function declaration for later. */ static int match(const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const void *matchinfo, int offset, const void *hdr, u_int16_t datalen, int *hotdrop); /* Function to hash a given address into the hash table of table_size size */ static int hash_func(unsigned int addr, int table_size) { int result = 0; unsigned int value = addr; do { result ^= value; } while((value >>= HASH_LOG)); #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": %d = hash_func(%u,%d)\n", result & (table_size - 1), addr, table_size); #endif return(result & (table_size - 1)); } #ifdef CONFIG_PROC_FS /* This is the function which produces the output for our /proc output * interface which lists each IP address, the last seen time and the * other recent times the address was seen. */ static int ip_recent_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) { int len = 0, count, last_len = 0, pkt_count; off_t pos = 0; off_t begin = 0; struct recent_ip_tables *curr_table; curr_table = (struct recent_ip_tables*) data; spin_lock_bh(&curr_table->list_lock); for(count = 0; count < ip_list_tot; count++) { if(!curr_table->table[count].addr) continue; last_len = len; len += sprintf(buffer+len,"src=%u.%u.%u.%u ",NIPQUAD(curr_table->table[count].addr)); len += sprintf(buffer+len,"ttl: %u ",curr_table->table[count].ttl); len += sprintf(buffer+len,"last_seen: %u ",curr_table->table[count].last_seen); len += sprintf(buffer+len,"oldest_pkt: %u ",curr_table->table[count].oldest_pkt); len += sprintf(buffer+len,"last_pkts: %u",curr_table->table[count].last_pkts[0]); for(pkt_count = 1; pkt_count < ip_pkt_list_tot; pkt_count++) { if(!curr_table->table[count].last_pkts[pkt_count]) break; len += sprintf(buffer+len,", %u",curr_table->table[count].last_pkts[pkt_count]); } len += sprintf(buffer+len,"\n"); pos = begin + len; if(pos < offset) { len = 0; begin = pos; } if(pos > offset + length) { len = last_len; break; } } *start = buffer + (offset - begin); len -= (offset - begin); if(len > length) len = length; spin_unlock_bh(&curr_table->list_lock); return len; } /* ip_recent_ctrl provides an interface for users to modify the table * directly. This allows adding entries, removing entries, and * flushing the entire table. * This is done by opening up the appropriate table for writing and * sending one of: * xx.xx.xx.xx -- Add entry to table with current time * +xx.xx.xx.xx -- Add entry to table with current time * -xx.xx.xx.xx -- Remove entry from table * clear -- Flush table, remove all entries */ static int ip_recent_ctrl(struct file *file, const char *input, unsigned long size, void *data) { static const u_int32_t max[4] = { 0xffffffff, 0xffffff, 0xffff, 0xff }; u_int32_t val; int base, used = 0; char c, *cp; union iaddr { uint8_t bytes[4]; uint32_t word; } res; uint8_t *pp = res.bytes; int digit; char buffer[20]; int len, check_set = 0, count; u_int32_t addr = 0; struct sk_buff *skb; struct ipt_recent_info *info; struct recent_ip_tables *curr_table; curr_table = (struct recent_ip_tables*) data; if(size > 20) len = 20; else len = size; if(copy_from_user(buffer,input,len)) return -EFAULT; if(len < 20) buffer[len] = '\0'; #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": ip_recent_ctrl len: %d, input: `%.20s'\n",len,buffer); #endif cp = buffer; while(isspace(*cp)) { cp++; used++; if(used >= len-5) return used; } /* Check if we are asked to flush the entire table */ if(!memcmp(cp,"clear",5)) { used += 5; spin_lock_bh(&curr_table->list_lock); curr_table->time_pos = 0; for(count = 0; count < ip_list_hash_size; count++) { curr_table->hash_table[count] = -1; } for(count = 0; count < ip_list_tot; count++) { curr_table->table[count].last_seen = 0; curr_table->table[count].addr = 0; curr_table->table[count].ttl = 0; memset(curr_table->table[count].last_pkts,0,ip_pkt_list_tot*sizeof(u_int32_t)); curr_table->table[count].oldest_pkt = 0; curr_table->table[count].time_pos = 0; curr_table->time_info[count].position = count; curr_table->time_info[count].time = 0; } spin_unlock_bh(&curr_table->list_lock); return used; } check_set = IPT_RECENT_SET; switch(*cp) { case '+': check_set = IPT_RECENT_SET; cp++; used++; break; case '-': check_set = IPT_RECENT_REMOVE; cp++; used++; break; default: if(!isdigit(*cp)) return (used+1); break; } #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": ip_recent_ctrl cp: `%c', check_set: %d\n",*cp,check_set); #endif /* Get addr (effectively inet_aton()) */ /* Shamelessly stolen from libc, a function in the kernel for doing * this would, of course, be greatly preferred, but our options appear * to be rather limited, so we will just do it ourselves here. */ res.word = 0; c = *cp; for(;;) { if(!isdigit(c)) return used; val = 0; base = 10; digit = 0; if(c == '0') { c = *++cp; if(c == 'x' || c == 'X') base = 16, c = *++cp; else { base = 8; digit = 1; } } for(;;) { if(isascii(c) && isdigit(c)) { if(base == 8 && (c == '8' || c == '0')) return used; val = (val * base) + (c - '0'); c = *++cp; digit = 1; } else if(base == 16 && isascii(c) && isxdigit(c)) { val = (val << 4) | (c + 10 - (islower(c) ? 'a' : 'A')); c = *++cp; digit = 1; } else break; } if(c == '.') { if(pp > res.bytes + 2 || val > 0xff) return used; *pp++ = val; c = *++cp; } else break; } used = cp - buffer; if(c != '\0' && (!isascii(c) || !isspace(c))) return used; if(c == '\n') used++; if(!digit) return used; if(val > max[pp - res.bytes]) return used; addr = res.word | htonl(val); if(!addr && check_set == IPT_RECENT_SET) return used; #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": ip_recent_ctrl c: %c, addr: %u used: %d\n",c,addr,used); #endif /* Set up and just call match */ info = kmalloc(sizeof(struct ipt_recent_info),GFP_KERNEL); if(!info) { return -ENOMEM; } info->seconds = 0; info->hit_count = 0; info->check_set = check_set; info->invert = 0; info->side = IPT_RECENT_SOURCE; strncpy(info->name,curr_table->name,IPT_RECENT_NAME_LEN); info->name[IPT_RECENT_NAME_LEN-1] = '\0'; skb = kmalloc(sizeof(struct sk_buff),GFP_KERNEL); if (!skb) { used = -ENOMEM; goto out_free_info; } skb->nh.iph = kmalloc(sizeof(struct iphdr),GFP_KERNEL); if (!skb->nh.iph) { used = -ENOMEM; goto out_free_skb; } skb->nh.iph->saddr = addr; skb->nh.iph->daddr = 0; /* Clear ttl since we have no way of knowing it */ skb->nh.iph->ttl = 0; match(skb,NULL,NULL,info,0,NULL,sizeof(struct ipt_recent_info),NULL); kfree(skb->nh.iph); out_free_skb: kfree(skb); out_free_info: kfree(info); #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": Leaving ip_recent_ctrl addr: %u used: %d\n",addr,used); #endif return used; } #endif /* CONFIG_PROC_FS */ /* 'match' is our primary function, called by the kernel whenever a rule is * hit with our module as an option to it. * What this function does depends on what was specifically asked of it by * the user: * --set -- Add or update last seen time of the source address of the packet * -- matchinfo->check_set == IPT_RECENT_SET * --rcheck -- Just check if the source address is in the list * -- matchinfo->check_set == IPT_RECENT_CHECK * --update -- If the source address is in the list, update last_seen * -- matchinfo->check_set == IPT_RECENT_UPDATE * --remove -- If the source address is in the list, remove it * -- matchinfo->check_set == IPT_RECENT_REMOVE * --seconds -- Option to --rcheck/--update, only match if last_seen within seconds * -- matchinfo->seconds * --hitcount -- Option to --rcheck/--update, only match if seen hitcount times * -- matchinfo->hit_count * --seconds and --hitcount can be combined */ static int match(const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const void *matchinfo, int offset, const void *hdr, u_int16_t datalen, int *hotdrop) { int pkt_count, hits_found, ans; unsigned long now; const struct ipt_recent_info *info = matchinfo; u_int32_t addr = 0, time_temp; u_int8_t ttl = skb->nh.iph->ttl; int *hash_table; int orig_hash_result, hash_result, temp, location = 0, time_loc, end_collision_chain = -1; struct time_info_list *time_info; struct recent_ip_tables *curr_table; struct recent_ip_tables *last_table; struct recent_ip_list *r_list; #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match() called\n"); #endif /* Default is false ^ info->invert */ ans = info->invert; #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match(): name = '%s'\n",info->name); #endif /* if out != NULL then routing has been done and TTL changed. * We change it back here internally for match what came in before routing. */ if(out) ttl++; /* Find the right table */ spin_lock_bh(&recent_lock); curr_table = r_tables; while( (last_table = curr_table) && strncmp(info->name,curr_table->name,IPT_RECENT_NAME_LEN) && (curr_table = curr_table->next) ); #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match(): table found('%s')\n",info->name); #endif spin_unlock_bh(&recent_lock); /* Table with this name not found, match impossible */ if(!curr_table) { return ans; } /* Make sure no one is changing the list while we work with it */ spin_lock_bh(&curr_table->list_lock); r_list = curr_table->table; if(info->side == IPT_RECENT_DEST) addr = skb->nh.iph->daddr; else addr = skb->nh.iph->saddr; if(!addr) { #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match() address (%u) invalid, leaving.\n",addr); #endif spin_unlock_bh(&curr_table->list_lock); return ans; } #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match(): checking table, addr: %u, ttl: %u, orig_ttl: %u\n",addr,ttl,skb->nh.iph->ttl); #endif /* Get jiffies now in case they changed while we were waiting for a lock */ now = jiffies; hash_table = curr_table->hash_table; time_info = curr_table->time_info; orig_hash_result = hash_result = hash_func(addr,ip_list_hash_size); /* Hash entry at this result used */ /* Check for TTL match if requested. If TTL is zero then a match would never * happen, so match regardless of existing TTL in that case. Zero means the * entry was added via the /proc interface anyway, so we will just use the * first TTL we get for that IP address. */ if(info->check_set & IPT_RECENT_TTL) { while(hash_table[hash_result] != -1 && !(r_list[hash_table[hash_result]].addr == addr && (!r_list[hash_table[hash_result]].ttl || r_list[hash_table[hash_result]].ttl == ttl))) { /* Collision in hash table */ hash_result = (hash_result + 1) % ip_list_hash_size; } } else { while(hash_table[hash_result] != -1 && r_list[hash_table[hash_result]].addr != addr) { /* Collision in hash table */ hash_result = (hash_result + 1) % ip_list_hash_size; } } if(hash_table[hash_result] == -1 && !(info->check_set & IPT_RECENT_SET)) { /* IP not in list and not asked to SET */ spin_unlock_bh(&curr_table->list_lock); return ans; } /* Check if we need to handle the collision, do not need to on REMOVE */ if(orig_hash_result != hash_result && !(info->check_set & IPT_RECENT_REMOVE)) { #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match(): Collision in hash table. (or: %d,hr: %d,oa: %u,ha: %u)\n", orig_hash_result, hash_result, r_list[hash_table[orig_hash_result]].addr, addr); #endif /* We had a collision. * orig_hash_result is where we started, hash_result is where we ended up. * So, swap them because we are likely to see the same guy again sooner */ #ifdef DEBUG if(debug) { printk(KERN_INFO RECENT_NAME ": match(): Collision; hash_table[orig_hash_result] = %d\n",hash_table[orig_hash_result]); printk(KERN_INFO RECENT_NAME ": match(): Collision; r_list[hash_table[orig_hash_result]].hash_entry = %d\n", r_list[hash_table[orig_hash_result]].hash_entry); } #endif r_list[hash_table[orig_hash_result]].hash_entry = hash_result; temp = hash_table[orig_hash_result]; #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match(): Collision; hash_table[hash_result] = %d\n",hash_table[hash_result]); #endif hash_table[orig_hash_result] = hash_table[hash_result]; hash_table[hash_result] = temp; temp = hash_result; hash_result = orig_hash_result; orig_hash_result = temp; time_info[r_list[hash_table[orig_hash_result]].time_pos].position = hash_table[orig_hash_result]; if(hash_table[hash_result] != -1) { r_list[hash_table[hash_result]].hash_entry = hash_result; time_info[r_list[hash_table[hash_result]].time_pos].position = hash_table[hash_result]; } #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match(): Collision handled.\n"); #endif } if(hash_table[hash_result] == -1) { #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match(): New table entry. (hr: %d,ha: %u)\n", hash_result, addr); #endif /* New item found and IPT_RECENT_SET, so we need to add it */ location = time_info[curr_table->time_pos].position; hash_table[r_list[location].hash_entry] = -1; hash_table[hash_result] = location; memset(r_list[location].last_pkts,0,ip_pkt_list_tot*sizeof(u_int32_t)); r_list[location].time_pos = curr_table->time_pos; r_list[location].addr = addr; r_list[location].ttl = ttl; r_list[location].last_seen = now; r_list[location].oldest_pkt = 1; r_list[location].last_pkts[0] = now; r_list[location].hash_entry = hash_result; time_info[curr_table->time_pos].time = r_list[location].last_seen; curr_table->time_pos = (curr_table->time_pos + 1) % ip_list_tot; ans = !info->invert; } else { #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match(): Existing table entry. (hr: %d,ha: %u)\n", hash_result, addr); #endif /* Existing item found */ location = hash_table[hash_result]; /* We have a match on address, now to make sure it meets all requirements for a * full match. */ if(info->check_set & IPT_RECENT_CHECK || info->check_set & IPT_RECENT_UPDATE) { if(!info->seconds && !info->hit_count) ans = !info->invert; else ans = info->invert; if(info->seconds && !info->hit_count) { if(time_before_eq(now,r_list[location].last_seen+info->seconds*HZ)) ans = !info->invert; else ans = info->invert; } if(info->seconds && info->hit_count) { for(pkt_count = 0, hits_found = 0; pkt_count < ip_pkt_list_tot; pkt_count++) { if(time_before_eq(now,r_list[location].last_pkts[pkt_count]+info->seconds*HZ)) hits_found++; } if(hits_found >= info->hit_count) ans = !info->invert; else ans = info->invert; } if(info->hit_count && !info->seconds) { for(pkt_count = 0, hits_found = 0; pkt_count < ip_pkt_list_tot; pkt_count++) { if(r_list[location].last_pkts[pkt_count] == 0) break; hits_found++; } if(hits_found >= info->hit_count) ans = !info->invert; else ans = info->invert; } } #ifdef DEBUG if(debug) { if(ans) printk(KERN_INFO RECENT_NAME ": match(): match addr: %u\n",addr); else printk(KERN_INFO RECENT_NAME ": match(): no match addr: %u\n",addr); } #endif /* If and only if we have been asked to SET, or to UPDATE (on match) do we add the * current timestamp to the last_seen. */ if((info->check_set & IPT_RECENT_SET && (ans = !info->invert)) || (info->check_set & IPT_RECENT_UPDATE && ans)) { #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match(): SET or UPDATE; updating time info.\n"); #endif /* Have to update our time info */ time_loc = r_list[location].time_pos; time_info[time_loc].time = now; time_info[time_loc].position = location; while((time_info[(time_loc+1) % ip_list_tot].time < time_info[time_loc].time) && ((time_loc+1) % ip_list_tot) != curr_table->time_pos) { time_temp = time_info[time_loc].time; time_info[time_loc].time = time_info[(time_loc+1)%ip_list_tot].time; time_info[(time_loc+1)%ip_list_tot].time = time_temp; time_temp = time_info[time_loc].position; time_info[time_loc].position = time_info[(time_loc+1)%ip_list_tot].position; time_info[(time_loc+1)%ip_list_tot].position = time_temp; r_list[time_info[time_loc].position].time_pos = time_loc; r_list[time_info[(time_loc+1)%ip_list_tot].position].time_pos = (time_loc+1)%ip_list_tot; time_loc = (time_loc+1) % ip_list_tot; } r_list[location].time_pos = time_loc; r_list[location].ttl = ttl; r_list[location].last_pkts[r_list[location].oldest_pkt] = now; r_list[location].oldest_pkt = ++r_list[location].oldest_pkt % ip_pkt_list_tot; r_list[location].last_seen = now; } /* If we have been asked to remove the entry from the list, just set it to 0 */ if(info->check_set & IPT_RECENT_REMOVE) { #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match(): REMOVE; clearing entry (or: %d, hr: %d).\n",orig_hash_result,hash_result); #endif /* Check if this is part of a collision chain */ while(hash_table[(orig_hash_result+1) % ip_list_hash_size] != -1) { orig_hash_result++; if(hash_func(r_list[hash_table[orig_hash_result]].addr,ip_list_hash_size) == hash_result) { /* Found collision chain, how deep does this rabbit hole go? */ #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match(): REMOVE; found collision chain.\n"); #endif end_collision_chain = orig_hash_result; } } if(end_collision_chain != -1) { #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match(): REMOVE; part of collision chain, moving to end.\n"); #endif /* Part of a collision chain, swap it with the end of the chain * before removing. */ r_list[hash_table[end_collision_chain]].hash_entry = hash_result; temp = hash_table[end_collision_chain]; hash_table[end_collision_chain] = hash_table[hash_result]; hash_table[hash_result] = temp; time_info[r_list[hash_table[hash_result]].time_pos].position = hash_table[hash_result]; hash_result = end_collision_chain; r_list[hash_table[hash_result]].hash_entry = hash_result; time_info[r_list[hash_table[hash_result]].time_pos].position = hash_table[hash_result]; } location = hash_table[hash_result]; hash_table[r_list[location].hash_entry] = -1; time_loc = r_list[location].time_pos; time_info[time_loc].time = 0; time_info[time_loc].position = location; while((time_info[(time_loc+1) % ip_list_tot].time < time_info[time_loc].time) && ((time_loc+1) % ip_list_tot) != curr_table->time_pos) { time_temp = time_info[time_loc].time; time_info[time_loc].time = time_info[(time_loc+1)%ip_list_tot].time; time_info[(time_loc+1)%ip_list_tot].time = time_temp; time_temp = time_info[time_loc].position; time_info[time_loc].position = time_info[(time_loc+1)%ip_list_tot].position; time_info[(time_loc+1)%ip_list_tot].position = time_temp; r_list[time_info[time_loc].position].time_pos = time_loc; r_list[time_info[(time_loc+1)%ip_list_tot].position].time_pos = (time_loc+1)%ip_list_tot; time_loc = (time_loc+1) % ip_list_tot; } r_list[location].time_pos = time_loc; r_list[location].last_seen = 0; r_list[location].addr = 0; r_list[location].ttl = 0; memset(r_list[location].last_pkts,0,ip_pkt_list_tot*sizeof(u_int32_t)); r_list[location].oldest_pkt = 0; ans = !info->invert; } spin_unlock_bh(&curr_table->list_lock); return ans; } spin_unlock_bh(&curr_table->list_lock); #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": match() left.\n"); #endif return ans; } /* This function is to verify that the rule given during the userspace iptables * command is correct. * If the command is valid then we check if the table name referred to by the * rule exists, if not it is created. */ static int checkentry(const char *tablename, const struct ipt_ip *ip, void *matchinfo, unsigned int matchsize, unsigned int hook_mask) { int flag = 0, c; u_int32_t *hold; const struct ipt_recent_info *info = matchinfo; struct recent_ip_tables *curr_table, *find_table, *last_table; #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": checkentry() entered.\n"); #endif if (matchsize != IPT_ALIGN(sizeof(struct ipt_recent_info))) return 0; /* seconds and hit_count only valid for CHECK/UPDATE */ if(info->check_set & IPT_RECENT_SET) { flag++; if(info->seconds || info->hit_count) return 0; } if(info->check_set & IPT_RECENT_REMOVE) { flag++; if(info->seconds || info->hit_count) return 0; } if(info->check_set & IPT_RECENT_CHECK) flag++; if(info->check_set & IPT_RECENT_UPDATE) flag++; /* One and only one of these should ever be set */ if(flag != 1) return 0; /* Name must be set to something */ if(!info->name || !info->name[0]) return 0; /* Things look good, create a list for this if it does not exist */ /* Lock the linked list while we play with it */ spin_lock_bh(&recent_lock); /* Look for an entry with this name already created */ /* Finds the end of the list and the entry before the end if current name does not exist */ find_table = r_tables; while( (last_table = find_table) && strncmp(info->name,find_table->name,IPT_RECENT_NAME_LEN) && (find_table = find_table->next) ); /* If a table already exists just increment the count on that table and return */ if(find_table) { #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": checkentry: table found (%s), incrementing count.\n",info->name); #endif find_table->count++; spin_unlock_bh(&recent_lock); return 1; } spin_unlock_bh(&recent_lock); /* Table with this name not found */ /* Allocate memory for new linked list item */ #ifdef DEBUG if(debug) { printk(KERN_INFO RECENT_NAME ": checkentry: no table found (%s)\n",info->name); printk(KERN_INFO RECENT_NAME ": checkentry: Allocationg %d for link-list entry.\n",sizeof(struct recent_ip_tables)); } #endif curr_table = vmalloc(sizeof(struct recent_ip_tables)); if(curr_table == NULL) return 0; curr_table->list_lock = SPIN_LOCK_UNLOCKED; curr_table->next = NULL; curr_table->count = 1; curr_table->time_pos = 0; strncpy(curr_table->name,info->name,IPT_RECENT_NAME_LEN); curr_table->name[IPT_RECENT_NAME_LEN-1] = '\0'; /* Allocate memory for this table and the list of packets in each entry. */ #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": checkentry: Allocating %d for table (%s).\n", sizeof(struct recent_ip_list)*ip_list_tot, info->name); #endif curr_table->table = vmalloc(sizeof(struct recent_ip_list)*ip_list_tot); if(curr_table->table == NULL) { vfree(curr_table); return 0; } memset(curr_table->table,0,sizeof(struct recent_ip_list)*ip_list_tot); #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": checkentry: Allocating %d for pkt_list.\n", sizeof(u_int32_t)*ip_pkt_list_tot*ip_list_tot); #endif hold = vmalloc(sizeof(u_int32_t)*ip_pkt_list_tot*ip_list_tot); #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": checkentry: After pkt_list allocation.\n"); #endif if(hold == NULL) { printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for pkt_list.\n"); vfree(curr_table->table); vfree(curr_table); return 0; } for(c = 0; c < ip_list_tot; c++) { curr_table->table[c].last_pkts = hold + c*ip_pkt_list_tot; } /* Allocate memory for the hash table */ #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": checkentry: Allocating %d for hash_table.\n", sizeof(int)*ip_list_hash_size); #endif curr_table->hash_table = vmalloc(sizeof(int)*ip_list_hash_size); if(!curr_table->hash_table) { printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for hash_table.\n"); vfree(hold); vfree(curr_table->table); vfree(curr_table); return 0; } for(c = 0; c < ip_list_hash_size; c++) { curr_table->hash_table[c] = -1; } /* Allocate memory for the time info */ #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": checkentry: Allocating %d for time_info.\n", sizeof(struct time_info_list)*ip_list_tot); #endif curr_table->time_info = vmalloc(sizeof(struct time_info_list)*ip_list_tot); if(!curr_table->time_info) { printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for time_info.\n"); vfree(curr_table->hash_table); vfree(hold); vfree(curr_table->table); vfree(curr_table); return 0; } for(c = 0; c < ip_list_tot; c++) { curr_table->time_info[c].position = c; curr_table->time_info[c].time = 0; } /* Put the new table in place */ spin_lock_bh(&recent_lock); find_table = r_tables; while( (last_table = find_table) && strncmp(info->name,find_table->name,IPT_RECENT_NAME_LEN) && (find_table = find_table->next) ); /* If a table already exists just increment the count on that table and return */ if(find_table) { find_table->count++; spin_unlock_bh(&recent_lock); #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": checkentry: table found (%s), created by other process.\n",info->name); #endif vfree(curr_table->time_info); vfree(curr_table->hash_table); vfree(hold); vfree(curr_table->table); vfree(curr_table); return 1; } if(!last_table) r_tables = curr_table; else last_table->next = curr_table; spin_unlock_bh(&recent_lock); #ifdef CONFIG_PROC_FS /* Create our proc 'status' entry. */ curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent); if (!curr_table->status_proc) { vfree(hold); printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n"); /* Destroy the created table */ spin_lock_bh(&recent_lock); last_table = NULL; curr_table = r_tables; if(!curr_table) { #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": checkentry() create_proc failed, no tables.\n"); #endif spin_unlock_bh(&recent_lock); return 0; } while( strncmp(info->name,curr_table->name,IPT_RECENT_NAME_LEN) && (last_table = curr_table) && (curr_table = curr_table->next) ); if(!curr_table) { #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": checkentry() create_proc failed, table already destroyed.\n"); #endif spin_unlock_bh(&recent_lock); return 0; } if(last_table) last_table->next = curr_table->next; else r_tables = curr_table->next; spin_unlock_bh(&recent_lock); vfree(curr_table->time_info); vfree(curr_table->hash_table); vfree(curr_table->table); vfree(curr_table); return 0; } curr_table->status_proc->owner = THIS_MODULE; curr_table->status_proc->data = curr_table; wmb(); curr_table->status_proc->read_proc = ip_recent_get_info; curr_table->status_proc->write_proc = ip_recent_ctrl; #endif /* CONFIG_PROC_FS */ #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": checkentry() left.\n"); #endif return 1; } /* This function is called in the event that a rule matching this module is * removed. * When this happens we need to check if there are no other rules matching * the table given. If that is the case then we remove the table and clean * up its memory. */ static void destroy(void *matchinfo, unsigned int matchsize) { const struct ipt_recent_info *info = matchinfo; struct recent_ip_tables *curr_table, *last_table; #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": destroy() entered.\n"); #endif if(matchsize != IPT_ALIGN(sizeof(struct ipt_recent_info))) return; /* Lock the linked list while we play with it */ spin_lock_bh(&recent_lock); /* Look for an entry with this name already created */ /* Finds the end of the list and the entry before the end if current name does not exist */ last_table = NULL; curr_table = r_tables; if(!curr_table) { #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": destroy() No tables found, leaving.\n"); #endif spin_unlock_bh(&recent_lock); return; } while( strncmp(info->name,curr_table->name,IPT_RECENT_NAME_LEN) && (last_table = curr_table) && (curr_table = curr_table->next) ); /* If a table does not exist then do nothing and return */ if(!curr_table) { #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": destroy() table not found, leaving.\n"); #endif spin_unlock_bh(&recent_lock); return; } curr_table->count--; /* If count is still non-zero then there are still rules referenceing it so we do nothing */ if(curr_table->count) { #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": destroy() table found, non-zero count, leaving.\n"); #endif spin_unlock_bh(&recent_lock); return; } #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": destroy() table found, zero count, removing.\n"); #endif /* Count must be zero so we remove this table from the list */ if(last_table) last_table->next = curr_table->next; else r_tables = curr_table->next; spin_unlock_bh(&recent_lock); /* lock to make sure any late-runners still using this after we removed it from * the list finish up then remove everything */ spin_lock_bh(&curr_table->list_lock); spin_unlock_bh(&curr_table->list_lock); #ifdef CONFIG_PROC_FS if(curr_table->status_proc) remove_proc_entry(curr_table->name,proc_net_ipt_recent); #endif /* CONFIG_PROC_FS */ vfree(curr_table->table[0].last_pkts); vfree(curr_table->table); vfree(curr_table->hash_table); vfree(curr_table->time_info); vfree(curr_table); #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": destroy() left.\n"); #endif return; } /* This is the structure we pass to ipt_register to register our * module with iptables. */ static struct ipt_match recent_match = { .name = "recent", .match = &match, .checkentry = &checkentry, .destroy = &destroy, .me = THIS_MODULE }; /* Kernel module initialization. */ static int __init init(void) { int err, count; printk(version); #ifdef CONFIG_PROC_FS proc_net_ipt_recent = proc_mkdir("ipt_recent",proc_net); if(!proc_net_ipt_recent) return -ENOMEM; #endif if(ip_list_hash_size && ip_list_hash_size <= ip_list_tot) { printk(KERN_WARNING RECENT_NAME ": ip_list_hash_size too small, resetting to default.\n"); ip_list_hash_size = 0; } if(!ip_list_hash_size) { ip_list_hash_size = ip_list_tot*3; count = 2*2; while(ip_list_hash_size > count) count = count*2; ip_list_hash_size = count; } #ifdef DEBUG if(debug) printk(KERN_INFO RECENT_NAME ": ip_list_hash_size: %d\n",ip_list_hash_size); #endif err = ipt_register_match(&recent_match); if (err) remove_proc_entry("ipt_recent", proc_net); return err; } /* Kernel module destruction. */ static void __exit fini(void) { ipt_unregister_match(&recent_match); remove_proc_entry("ipt_recent",proc_net); } /* Register our module with the kernel. */ module_init(init); module_exit(fini);
gpl-2.0
hustcalm/vlc-player
modules/gui/skins2/src/os_factory.cpp
5
2207
/***************************************************************************** * os_factory.cpp ***************************************************************************** * Copyright (C) 2003 the VideoLAN team * $Id: 297485c2b0fa9514f5ce58cc9675c630a5f00b42 $ * * Authors: Cyril Deguet <asmax@via.ecp.fr> * Olivier Teulière <ipkiss@via.ecp.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ #include "os_factory.hpp" #ifdef X11_SKINS #include "../x11/x11_factory.hpp" #elif defined WIN32_SKINS #include "../win32/win32_factory.hpp" #elif defined MACOSX_SKINS #include "../macosx/macosx_factory.hpp" #endif OSFactory *OSFactory::instance( intf_thread_t *pIntf ) { if( ! pIntf->p_sys->p_osFactory ) { OSFactory *pOsFactory; #ifdef X11_SKINS pOsFactory = new X11Factory( pIntf ); #elif defined WIN32_SKINS pOsFactory = new Win32Factory( pIntf ); #elif defined MACOSX_SKINS pOsFactory = new MacOSXFactory( pIntf ); #else #error "No OSFactory implementation !" #endif if( pOsFactory->init() ) { // Initialization succeeded pIntf->p_sys->p_osFactory = pOsFactory; } else { // Initialization failed delete pOsFactory; } } return pIntf->p_sys->p_osFactory; } void OSFactory::destroy( intf_thread_t *pIntf ) { delete pIntf->p_sys->p_osFactory; pIntf->p_sys->p_osFactory = NULL; }
gpl-2.0
tbalden/htc-kernel-endeavoru-stable
drivers/spi/spi_nuc900.c
261
10431
/* linux/drivers/spi/spi_nuc900.c * * Copyright (c) 2009 Nuvoton technology. * Wan ZongShun <mcuos.com@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <mach/nuc900_spi.h> /* usi registers offset */ #define USI_CNT 0x00 #define USI_DIV 0x04 #define USI_SSR 0x08 #define USI_RX0 0x10 #define USI_TX0 0x10 /* usi register bit */ #define ENINT (0x01 << 17) #define ENFLG (0x01 << 16) #define TXNUM (0x03 << 8) #define TXNEG (0x01 << 2) #define RXNEG (0x01 << 1) #define LSB (0x01 << 10) #define SELECTLEV (0x01 << 2) #define SELECTPOL (0x01 << 31) #define SELECTSLAVE 0x01 #define GOBUSY 0x01 struct nuc900_spi { struct spi_bitbang bitbang; struct completion done; void __iomem *regs; int irq; int len; int count; const unsigned char *tx; unsigned char *rx; struct clk *clk; struct resource *ioarea; struct spi_master *master; struct spi_device *curdev; struct device *dev; struct nuc900_spi_info *pdata; spinlock_t lock; struct resource *res; }; static inline struct nuc900_spi *to_hw(struct spi_device *sdev) { return spi_master_get_devdata(sdev->master); } static void nuc900_slave_select(struct spi_device *spi, unsigned int ssr) { struct nuc900_spi *hw = to_hw(spi); unsigned int val; unsigned int cs = spi->mode & SPI_CS_HIGH ? 1 : 0; unsigned int cpol = spi->mode & SPI_CPOL ? 1 : 0; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); val = __raw_readl(hw->regs + USI_SSR); if (!cs) val &= ~SELECTLEV; else val |= SELECTLEV; if (!ssr) val &= ~SELECTSLAVE; else val |= SELECTSLAVE; __raw_writel(val, hw->regs + USI_SSR); val = __raw_readl(hw->regs + USI_CNT); if (!cpol) val &= ~SELECTPOL; else val |= SELECTPOL; __raw_writel(val, hw->regs + USI_CNT); spin_unlock_irqrestore(&hw->lock, flags); } static void nuc900_spi_chipsel(struct spi_device *spi, int value) { switch (value) { case BITBANG_CS_INACTIVE: nuc900_slave_select(spi, 0); break; case BITBANG_CS_ACTIVE: nuc900_slave_select(spi, 1); break; } } static void nuc900_spi_setup_txnum(struct nuc900_spi *hw, unsigned int txnum) { unsigned int val; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); val = __raw_readl(hw->regs + USI_CNT); if (!txnum) val &= ~TXNUM; else val |= txnum << 0x08; __raw_writel(val, hw->regs + USI_CNT); spin_unlock_irqrestore(&hw->lock, flags); } static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw, unsigned int txbitlen) { unsigned int val; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); val = __raw_readl(hw->regs + USI_CNT); val |= (txbitlen << 0x03); __raw_writel(val, hw->regs + USI_CNT); spin_unlock_irqrestore(&hw->lock, flags); } static void nuc900_spi_gobusy(struct nuc900_spi *hw) { unsigned int val; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); val = __raw_readl(hw->regs + USI_CNT); val |= GOBUSY; __raw_writel(val, hw->regs + USI_CNT); spin_unlock_irqrestore(&hw->lock, flags); } static int nuc900_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) { return 0; } static int nuc900_spi_setup(struct spi_device *spi) { return 0; } static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count) { return hw->tx ? hw->tx[count] : 0; } static int nuc900_spi_txrx(struct spi_device *spi, struct spi_transfer *t) { struct nuc900_spi *hw = to_hw(spi); hw->tx = t->tx_buf; hw->rx = t->rx_buf; hw->len = t->len; hw->count = 0; __raw_writel(hw_txbyte(hw, 0x0), hw->regs + USI_TX0); nuc900_spi_gobusy(hw); wait_for_completion(&hw->done); return hw->count; } static irqreturn_t nuc900_spi_irq(int irq, void *dev) { struct nuc900_spi *hw = dev; unsigned int status; unsigned int count = hw->count; status = __raw_readl(hw->regs + USI_CNT); __raw_writel(status, hw->regs + USI_CNT); if (status & ENFLG) { hw->count++; if (hw->rx) hw->rx[count] = __raw_readl(hw->regs + USI_RX0); count++; if (count < hw->len) { __raw_writel(hw_txbyte(hw, count), hw->regs + USI_TX0); nuc900_spi_gobusy(hw); } else { complete(&hw->done); } return IRQ_HANDLED; } complete(&hw->done); return IRQ_HANDLED; } static void nuc900_tx_edge(struct nuc900_spi *hw, unsigned int edge) { unsigned int val; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); val = __raw_readl(hw->regs + USI_CNT); if (edge) val |= TXNEG; else val &= ~TXNEG; __raw_writel(val, hw->regs + USI_CNT); spin_unlock_irqrestore(&hw->lock, flags); } static void nuc900_rx_edge(struct nuc900_spi *hw, unsigned int edge) { unsigned int val; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); val = __raw_readl(hw->regs + USI_CNT); if (edge) val |= RXNEG; else val &= ~RXNEG; __raw_writel(val, hw->regs + USI_CNT); spin_unlock_irqrestore(&hw->lock, flags); } static void nuc900_send_first(struct nuc900_spi *hw, unsigned int lsb) { unsigned int val; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); val = __raw_readl(hw->regs + USI_CNT); if (lsb) val |= LSB; else val &= ~LSB; __raw_writel(val, hw->regs + USI_CNT); spin_unlock_irqrestore(&hw->lock, flags); } static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep) { unsigned int val; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); val = __raw_readl(hw->regs + USI_CNT); if (sleep) val |= (sleep << 12); else val &= ~(0x0f << 12); __raw_writel(val, hw->regs + USI_CNT); spin_unlock_irqrestore(&hw->lock, flags); } static void nuc900_enable_int(struct nuc900_spi *hw) { unsigned int val; unsigned long flags; spin_lock_irqsave(&hw->lock, flags); val = __raw_readl(hw->regs + USI_CNT); val |= ENINT; __raw_writel(val, hw->regs + USI_CNT); spin_unlock_irqrestore(&hw->lock, flags); } static void nuc900_set_divider(struct nuc900_spi *hw) { __raw_writel(hw->pdata->divider, hw->regs + USI_DIV); } static void nuc900_init_spi(struct nuc900_spi *hw) { clk_enable(hw->clk); spin_lock_init(&hw->lock); nuc900_tx_edge(hw, hw->pdata->txneg); nuc900_rx_edge(hw, hw->pdata->rxneg); nuc900_send_first(hw, hw->pdata->lsb); nuc900_set_sleep(hw, hw->pdata->sleep); nuc900_spi_setup_txbitlen(hw, hw->pdata->txbitlen); nuc900_spi_setup_txnum(hw, hw->pdata->txnum); nuc900_set_divider(hw); nuc900_enable_int(hw); } static int __devinit nuc900_spi_probe(struct platform_device *pdev) { struct nuc900_spi *hw; struct spi_master *master; int err = 0; master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi)); if (master == NULL) { dev_err(&pdev->dev, "No memory for spi_master\n"); err = -ENOMEM; goto err_nomem; } hw = spi_master_get_devdata(master); memset(hw, 0, sizeof(struct nuc900_spi)); hw->master = spi_master_get(master); hw->pdata = pdev->dev.platform_data; hw->dev = &pdev->dev; if (hw->pdata == NULL) { dev_err(&pdev->dev, "No platform data supplied\n"); err = -ENOENT; goto err_pdata; } platform_set_drvdata(pdev, hw); init_completion(&hw->done); master->mode_bits = SPI_MODE_0; master->num_chipselect = hw->pdata->num_cs; master->bus_num = hw->pdata->bus_num; hw->bitbang.master = hw->master; hw->bitbang.setup_transfer = nuc900_spi_setupxfer; hw->bitbang.chipselect = nuc900_spi_chipsel; hw->bitbang.txrx_bufs = nuc900_spi_txrx; hw->bitbang.master->setup = nuc900_spi_setup; hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (hw->res == NULL) { dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); err = -ENOENT; goto err_pdata; } hw->ioarea = request_mem_region(hw->res->start, resource_size(hw->res), pdev->name); if (hw->ioarea == NULL) { dev_err(&pdev->dev, "Cannot reserve region\n"); err = -ENXIO; goto err_pdata; } hw->regs = ioremap(hw->res->start, resource_size(hw->res)); if (hw->regs == NULL) { dev_err(&pdev->dev, "Cannot map IO\n"); err = -ENXIO; goto err_iomap; } hw->irq = platform_get_irq(pdev, 0); if (hw->irq < 0) { dev_err(&pdev->dev, "No IRQ specified\n"); err = -ENOENT; goto err_irq; } err = request_irq(hw->irq, nuc900_spi_irq, 0, pdev->name, hw); if (err) { dev_err(&pdev->dev, "Cannot claim IRQ\n"); goto err_irq; } hw->clk = clk_get(&pdev->dev, "spi"); if (IS_ERR(hw->clk)) { dev_err(&pdev->dev, "No clock for device\n"); err = PTR_ERR(hw->clk); goto err_clk; } mfp_set_groupg(&pdev->dev); nuc900_init_spi(hw); err = spi_bitbang_start(&hw->bitbang); if (err) { dev_err(&pdev->dev, "Failed to register SPI master\n"); goto err_register; } return 0; err_register: clk_disable(hw->clk); clk_put(hw->clk); err_clk: free_irq(hw->irq, hw); err_irq: iounmap(hw->regs); err_iomap: release_mem_region(hw->res->start, resource_size(hw->res)); kfree(hw->ioarea); err_pdata: spi_master_put(hw->master); err_nomem: return err; } static int __devexit nuc900_spi_remove(struct platform_device *dev) { struct nuc900_spi *hw = platform_get_drvdata(dev); free_irq(hw->irq, hw); platform_set_drvdata(dev, NULL); spi_unregister_master(hw->master); clk_disable(hw->clk); clk_put(hw->clk); iounmap(hw->regs); release_mem_region(hw->res->start, resource_size(hw->res)); kfree(hw->ioarea); spi_master_put(hw->master); return 0; } static struct platform_driver nuc900_spi_driver = { .probe = nuc900_spi_probe, .remove = __devexit_p(nuc900_spi_remove), .driver = { .name = "nuc900-spi", .owner = THIS_MODULE, }, }; static int __init nuc900_spi_init(void) { return platform_driver_register(&nuc900_spi_driver); } static void __exit nuc900_spi_exit(void) { platform_driver_unregister(&nuc900_spi_driver); } module_init(nuc900_spi_init); module_exit(nuc900_spi_exit); MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); MODULE_DESCRIPTION("nuc900 spi driver!"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:nuc900-spi");
gpl-2.0
dadziokPL/android_kernel_samsung_grandprimevelte
drivers/clk/clk-wm831x.c
517
9919
/* * WM831x clock control * * Copyright 2011-2 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/mfd/wm831x/core.h> struct wm831x_clk { struct wm831x *wm831x; struct clk_hw xtal_hw; struct clk_hw fll_hw; struct clk_hw clkout_hw; struct clk *xtal; struct clk *fll; struct clk *clkout; bool xtal_ena; }; static int wm831x_xtal_is_prepared(struct clk_hw *hw) { struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk, xtal_hw); return clkdata->xtal_ena; } static unsigned long wm831x_xtal_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk, xtal_hw); if (clkdata->xtal_ena) return 32768; else return 0; } static const struct clk_ops wm831x_xtal_ops = { .is_prepared = wm831x_xtal_is_prepared, .recalc_rate = wm831x_xtal_recalc_rate, }; static struct clk_init_data wm831x_xtal_init = { .name = "xtal", .ops = &wm831x_xtal_ops, .flags = CLK_IS_ROOT, }; static const unsigned long wm831x_fll_auto_rates[] = { 2048000, 11289600, 12000000, 12288000, 19200000, 22579600, 24000000, 24576000, }; static int wm831x_fll_is_prepared(struct clk_hw *hw) { struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk, fll_hw); struct wm831x *wm831x = clkdata->wm831x; int ret; ret = wm831x_reg_read(wm831x, WM831X_FLL_CONTROL_1); if (ret < 0) { dev_err(wm831x->dev, "Unable to read FLL_CONTROL_1: %d\n", ret); return true; } return (ret & WM831X_FLL_ENA) != 0; } static int wm831x_fll_prepare(struct clk_hw *hw) { struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk, fll_hw); struct wm831x *wm831x = clkdata->wm831x; int ret; ret = wm831x_set_bits(wm831x, WM831X_FLL_CONTROL_1, WM831X_FLL_ENA, WM831X_FLL_ENA); if (ret != 0) dev_crit(wm831x->dev, "Failed to enable FLL: %d\n", ret); usleep_range(2000, 2000); return ret; } static void wm831x_fll_unprepare(struct clk_hw *hw) { struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk, fll_hw); struct wm831x *wm831x = clkdata->wm831x; int ret; ret = wm831x_set_bits(wm831x, WM831X_FLL_CONTROL_1, WM831X_FLL_ENA, 0); if (ret != 0) dev_crit(wm831x->dev, "Failed to disable FLL: %d\n", ret); } static unsigned long wm831x_fll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk, fll_hw); struct wm831x *wm831x = clkdata->wm831x; int ret; ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2); if (ret < 0) { dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_2: %d\n", ret); return 0; } if (ret & WM831X_FLL_AUTO) return wm831x_fll_auto_rates[ret & WM831X_FLL_AUTO_FREQ_MASK]; dev_err(wm831x->dev, "FLL only supported in AUTO mode\n"); return 0; } static long wm831x_fll_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *unused) { int best = 0; int i; for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++) if (abs(wm831x_fll_auto_rates[i] - rate) < abs(wm831x_fll_auto_rates[best] - rate)) best = i; return wm831x_fll_auto_rates[best]; } static int wm831x_fll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk, fll_hw); struct wm831x *wm831x = clkdata->wm831x; int i; for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++) if (wm831x_fll_auto_rates[i] == rate) break; if (i == ARRAY_SIZE(wm831x_fll_auto_rates)) return -EINVAL; if (wm831x_fll_is_prepared(hw)) return -EPERM; return wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_2, WM831X_FLL_AUTO_FREQ_MASK, i); } static const char *wm831x_fll_parents[] = { "xtal", "clkin", }; static u8 wm831x_fll_get_parent(struct clk_hw *hw) { struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk, fll_hw); struct wm831x *wm831x = clkdata->wm831x; int ret; /* AUTO mode is always clocked from the crystal */ ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2); if (ret < 0) { dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_2: %d\n", ret); return 0; } if (ret & WM831X_FLL_AUTO) return 0; ret = wm831x_reg_read(wm831x, WM831X_FLL_CONTROL_5); if (ret < 0) { dev_err(wm831x->dev, "Unable to read FLL_CONTROL_5: %d\n", ret); return 0; } switch (ret & WM831X_FLL_CLK_SRC_MASK) { case 0: return 0; case 1: return 1; default: dev_err(wm831x->dev, "Unsupported FLL clock source %d\n", ret & WM831X_FLL_CLK_SRC_MASK); return 0; } } static const struct clk_ops wm831x_fll_ops = { .is_prepared = wm831x_fll_is_prepared, .prepare = wm831x_fll_prepare, .unprepare = wm831x_fll_unprepare, .round_rate = wm831x_fll_round_rate, .recalc_rate = wm831x_fll_recalc_rate, .set_rate = wm831x_fll_set_rate, .get_parent = wm831x_fll_get_parent, }; static struct clk_init_data wm831x_fll_init = { .name = "fll", .ops = &wm831x_fll_ops, .parent_names = wm831x_fll_parents, .num_parents = ARRAY_SIZE(wm831x_fll_parents), .flags = CLK_SET_RATE_GATE, }; static int wm831x_clkout_is_prepared(struct clk_hw *hw) { struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk, clkout_hw); struct wm831x *wm831x = clkdata->wm831x; int ret; ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_1); if (ret < 0) { dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n", ret); return true; } return (ret & WM831X_CLKOUT_ENA) != 0; } static int wm831x_clkout_prepare(struct clk_hw *hw) { struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk, clkout_hw); struct wm831x *wm831x = clkdata->wm831x; int ret; ret = wm831x_reg_unlock(wm831x); if (ret != 0) { dev_crit(wm831x->dev, "Failed to lock registers: %d\n", ret); return ret; } ret = wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_1, WM831X_CLKOUT_ENA, WM831X_CLKOUT_ENA); if (ret != 0) dev_crit(wm831x->dev, "Failed to enable CLKOUT: %d\n", ret); wm831x_reg_lock(wm831x); return ret; } static void wm831x_clkout_unprepare(struct clk_hw *hw) { struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk, clkout_hw); struct wm831x *wm831x = clkdata->wm831x; int ret; ret = wm831x_reg_unlock(wm831x); if (ret != 0) { dev_crit(wm831x->dev, "Failed to lock registers: %d\n", ret); return; } ret = wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_1, WM831X_CLKOUT_ENA, 0); if (ret != 0) dev_crit(wm831x->dev, "Failed to disable CLKOUT: %d\n", ret); wm831x_reg_lock(wm831x); } static const char *wm831x_clkout_parents[] = { "fll", "xtal", }; static u8 wm831x_clkout_get_parent(struct clk_hw *hw) { struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk, clkout_hw); struct wm831x *wm831x = clkdata->wm831x; int ret; ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_1); if (ret < 0) { dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n", ret); return 0; } if (ret & WM831X_CLKOUT_SRC) return 1; else return 0; } static int wm831x_clkout_set_parent(struct clk_hw *hw, u8 parent) { struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk, clkout_hw); struct wm831x *wm831x = clkdata->wm831x; return wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_1, WM831X_CLKOUT_SRC, parent << WM831X_CLKOUT_SRC_SHIFT); } static const struct clk_ops wm831x_clkout_ops = { .is_prepared = wm831x_clkout_is_prepared, .prepare = wm831x_clkout_prepare, .unprepare = wm831x_clkout_unprepare, .get_parent = wm831x_clkout_get_parent, .set_parent = wm831x_clkout_set_parent, }; static struct clk_init_data wm831x_clkout_init = { .name = "clkout", .ops = &wm831x_clkout_ops, .parent_names = wm831x_clkout_parents, .num_parents = ARRAY_SIZE(wm831x_clkout_parents), .flags = CLK_SET_RATE_PARENT, }; static int wm831x_clk_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_clk *clkdata; int ret; clkdata = devm_kzalloc(&pdev->dev, sizeof(*clkdata), GFP_KERNEL); if (!clkdata) return -ENOMEM; clkdata->wm831x = wm831x; /* XTAL_ENA can only be set via OTP/InstantConfig so just read once */ ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2); if (ret < 0) { dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_2: %d\n", ret); return ret; } clkdata->xtal_ena = ret & WM831X_XTAL_ENA; clkdata->xtal_hw.init = &wm831x_xtal_init; clkdata->xtal = devm_clk_register(&pdev->dev, &clkdata->xtal_hw); if (IS_ERR(clkdata->xtal)) return PTR_ERR(clkdata->xtal); clkdata->fll_hw.init = &wm831x_fll_init; clkdata->fll = devm_clk_register(&pdev->dev, &clkdata->fll_hw); if (IS_ERR(clkdata->fll)) return PTR_ERR(clkdata->fll); clkdata->clkout_hw.init = &wm831x_clkout_init; clkdata->clkout = devm_clk_register(&pdev->dev, &clkdata->clkout_hw); if (IS_ERR(clkdata->clkout)) return PTR_ERR(clkdata->clkout); platform_set_drvdata(pdev, clkdata); return 0; } static struct platform_driver wm831x_clk_driver = { .probe = wm831x_clk_probe, .driver = { .name = "wm831x-clk", .owner = THIS_MODULE, }, }; module_platform_driver(wm831x_clk_driver); /* Module information */ MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("WM831x clock driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-clk");
gpl-2.0
abyssxsy/linux-tk1
drivers/ata/ahci_platform.c
1029
8273
/* * AHCI SATA platform driver * * Copyright 2004-2005 Red Hat, Inc. * Jeff Garzik <jgarzik@pobox.com> * Copyright 2010 MontaVista Software, LLC. * Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. */ #include <linux/clk.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/module.h> #include <linux/pm.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/libata.h> #include <linux/ahci_platform.h> #include "ahci.h" static void ahci_host_stop(struct ata_host *host); enum ahci_type { AHCI, /* standard platform ahci */ IMX53_AHCI, /* ahci on i.mx53 */ STRICT_AHCI, /* delayed DMA engine start */ }; static struct platform_device_id ahci_devtype[] = { { .name = "ahci", .driver_data = AHCI, }, { .name = "imx53-ahci", .driver_data = IMX53_AHCI, }, { .name = "strict-ahci", .driver_data = STRICT_AHCI, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, ahci_devtype); static struct ata_port_operations ahci_platform_ops = { .inherits = &ahci_ops, .host_stop = ahci_host_stop, }; static struct ata_port_operations ahci_platform_retry_srst_ops = { .inherits = &ahci_pmp_retry_srst_ops, .host_stop = ahci_host_stop, }; static const struct ata_port_info ahci_port_info[] = { /* by features */ [AHCI] = { .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_platform_ops, }, [IMX53_AHCI] = { .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_platform_retry_srst_ops, }, [STRICT_AHCI] = { AHCI_HFLAGS (AHCI_HFLAG_DELAY_ENGINE), .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_platform_ops, }, }; static struct scsi_host_template ahci_platform_sht = { AHCI_SHT("ahci_platform"), }; static int ahci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ahci_platform_data *pdata = dev_get_platdata(dev); const struct platform_device_id *id = platform_get_device_id(pdev); struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0]; const struct ata_port_info *ppi[] = { &pi, NULL }; struct ahci_host_priv *hpriv; struct ata_host *host; struct resource *mem; int irq; int n_ports; int i; int rc; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(dev, "no mmio space\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(dev, "no irq\n"); return -EINVAL; } if (pdata && pdata->ata_port_info) pi = *pdata->ata_port_info; hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) { dev_err(dev, "can't alloc ahci_host_priv\n"); return -ENOMEM; } hpriv->flags |= (unsigned long)pi.private_data; hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem)); if (!hpriv->mmio) { dev_err(dev, "can't map %pR\n", mem); return -ENOMEM; } hpriv->clk = clk_get(dev, NULL); if (IS_ERR(hpriv->clk)) { dev_err(dev, "can't get clock\n"); } else { rc = clk_prepare_enable(hpriv->clk); if (rc) { dev_err(dev, "clock prepare enable failed"); goto free_clk; } } /* * Some platforms might need to prepare for mmio region access, * which could be done in the following init call. So, the mmio * region shouldn't be accessed before init (if provided) has * returned successfully. */ if (pdata && pdata->init) { rc = pdata->init(dev, hpriv->mmio); if (rc) goto disable_unprepare_clk; } ahci_save_initial_config(dev, hpriv, pdata ? pdata->force_port_map : 0, pdata ? pdata->mask_port_map : 0); /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) pi.flags |= ATA_FLAG_NCQ; if (hpriv->cap & HOST_CAP_PMP) pi.flags |= ATA_FLAG_PMP; ahci_set_em_messages(hpriv, &pi); /* CAP.NP sometimes indicate the index of the last enabled * port, at other times, that of the last possible port, so * determining the maximum port number requires looking at * both CAP.NP and port_map. */ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); host = ata_host_alloc_pinfo(dev, ppi, n_ports); if (!host) { rc = -ENOMEM; goto pdata_exit; } host->private_data = hpriv; if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) host->flags |= ATA_HOST_PARALLEL_SCAN; else printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); if (pi.flags & ATA_FLAG_EM) ahci_reset_em(host); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; ata_port_desc(ap, "mmio %pR", mem); ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); /* set enclosure management message type */ if (ap->flags & ATA_FLAG_EM) ap->em_message_type = hpriv->em_msg_type; /* disabled/not-implemented port */ if (!(hpriv->port_map & (1 << i))) ap->ops = &ata_dummy_port_ops; } rc = ahci_reset_controller(host); if (rc) goto pdata_exit; ahci_init_controller(host); ahci_print_info(host, "platform"); rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, &ahci_platform_sht); if (rc) goto pdata_exit; return 0; pdata_exit: if (pdata && pdata->exit) pdata->exit(dev); disable_unprepare_clk: if (!IS_ERR(hpriv->clk)) clk_disable_unprepare(hpriv->clk); free_clk: if (!IS_ERR(hpriv->clk)) clk_put(hpriv->clk); return rc; } static void ahci_host_stop(struct ata_host *host) { struct device *dev = host->dev; struct ahci_platform_data *pdata = dev_get_platdata(dev); struct ahci_host_priv *hpriv = host->private_data; if (pdata && pdata->exit) pdata->exit(dev); if (!IS_ERR(hpriv->clk)) { clk_disable_unprepare(hpriv->clk); clk_put(hpriv->clk); } } #ifdef CONFIG_PM_SLEEP static int ahci_suspend(struct device *dev) { struct ahci_platform_data *pdata = dev_get_platdata(dev); struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->mmio; u32 ctl; int rc; if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { dev_err(dev, "firmware update required for suspend/resume\n"); return -EIO; } /* * AHCI spec rev1.1 section 8.3.3: * Software must disable interrupts prior to requesting a * transition of the HBA to D3 state. */ ctl = readl(mmio + HOST_CTL); ctl &= ~HOST_IRQ_EN; writel(ctl, mmio + HOST_CTL); readl(mmio + HOST_CTL); /* flush */ rc = ata_host_suspend(host, PMSG_SUSPEND); if (rc) return rc; if (pdata && pdata->suspend) return pdata->suspend(dev); if (!IS_ERR(hpriv->clk)) clk_disable_unprepare(hpriv->clk); return 0; } static int ahci_resume(struct device *dev) { struct ahci_platform_data *pdata = dev_get_platdata(dev); struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; int rc; if (!IS_ERR(hpriv->clk)) { rc = clk_prepare_enable(hpriv->clk); if (rc) { dev_err(dev, "clock prepare enable failed"); return rc; } } if (pdata && pdata->resume) { rc = pdata->resume(dev); if (rc) goto disable_unprepare_clk; } if (dev->power.power_state.event == PM_EVENT_SUSPEND) { rc = ahci_reset_controller(host); if (rc) goto disable_unprepare_clk; ahci_init_controller(host); } ata_host_resume(host); return 0; disable_unprepare_clk: if (!IS_ERR(hpriv->clk)) clk_disable_unprepare(hpriv->clk); return rc; } #endif static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume); static const struct of_device_id ahci_of_match[] = { { .compatible = "snps,spear-ahci", }, {}, }; MODULE_DEVICE_TABLE(of, ahci_of_match); static struct platform_driver ahci_driver = { .probe = ahci_probe, .remove = ata_platform_remove_one, .driver = { .name = "ahci", .owner = THIS_MODULE, .of_match_table = ahci_of_match, .pm = &ahci_pm_ops, }, .id_table = ahci_devtype, }; module_platform_driver(ahci_driver); MODULE_DESCRIPTION("AHCI SATA platform driver"); MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ahci");
gpl-2.0
ak-67/kernel_mediatek_wiko
scripts/mod/file2alias.c
1029
34583
/* Simple code to turn various tables in an ELF file into alias definitions. * This deals with kernel datastructures where they should be * dealt with: in the kernel source. * * Copyright 2002-2003 Rusty Russell, IBM Corporation * 2003 Kai Germaschewski * * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. */ #include "modpost.h" /* We use the ELF typedefs for kernel_ulong_t but bite the bullet and * use either stdint.h or inttypes.h for the rest. */ #if KERNEL_ELFCLASS == ELFCLASS32 typedef Elf32_Addr kernel_ulong_t; #define BITS_PER_LONG 32 #else typedef Elf64_Addr kernel_ulong_t; #define BITS_PER_LONG 64 #endif #ifdef __sun__ #include <inttypes.h> #else #include <stdint.h> #endif #include <ctype.h> #include <stdbool.h> typedef uint32_t __u32; typedef uint16_t __u16; typedef unsigned char __u8; /* Big exception to the "don't include kernel headers into userspace, which * even potentially has different endianness and word sizes, since * we handle those differences explicitly below */ #include "../../include/linux/mod_devicetable.h" /* This array collects all instances that use the generic do_table */ struct devtable { const char *device_id; /* name of table, __mod_<name>_device_table. */ unsigned long id_size; void *function; }; #define ___cat(a,b) a ## b #define __cat(a,b) ___cat(a,b) /* we need some special handling for this host tool running eventually on * Darwin. The Mach-O section handling is a bit different than ELF section * handling. The differnces in detail are: * a) we have segments which have sections * b) we need a API call to get the respective section symbols */ #if defined(__MACH__) #include <mach-o/getsect.h> #define INIT_SECTION(name) do { \ unsigned long name ## _len; \ char *__cat(pstart_,name) = getsectdata("__TEXT", \ #name, &__cat(name,_len)); \ char *__cat(pstop_,name) = __cat(pstart_,name) + \ __cat(name, _len); \ __cat(__start_,name) = (void *)__cat(pstart_,name); \ __cat(__stop_,name) = (void *)__cat(pstop_,name); \ } while (0) #define SECTION(name) __attribute__((section("__TEXT, " #name))) struct devtable **__start___devtable, **__stop___devtable; #else #define INIT_SECTION(name) /* no-op for ELF */ #define SECTION(name) __attribute__((section(#name))) /* We construct a table of pointers in an ELF section (pointers generally * go unpadded by gcc). ld creates boundary syms for us. */ extern struct devtable *__start___devtable[], *__stop___devtable[]; #endif /* __MACH__ */ #if __GNUC__ == 3 && __GNUC_MINOR__ < 3 # define __used __attribute__((__unused__)) #else # define __used __attribute__((__used__)) #endif /* Add a table entry. We test function type matches while we're here. */ #define ADD_TO_DEVTABLE(device_id, type, function) \ static struct devtable __cat(devtable,__LINE__) = { \ device_id + 0*sizeof((function)((const char *)NULL, \ (type *)NULL, \ (char *)NULL)), \ sizeof(type), (function) }; \ static struct devtable *SECTION(__devtable) __used \ __cat(devtable_ptr,__LINE__) = &__cat(devtable,__LINE__) #define ADD(str, sep, cond, field) \ do { \ strcat(str, sep); \ if (cond) \ sprintf(str + strlen(str), \ sizeof(field) == 1 ? "%02X" : \ sizeof(field) == 2 ? "%04X" : \ sizeof(field) == 4 ? "%08X" : "", \ field); \ else \ sprintf(str + strlen(str), "*"); \ } while(0) /* Always end in a wildcard, for future extension */ static inline void add_wildcard(char *str) { int len = strlen(str); if (str[len - 1] != '*') strcat(str + len, "*"); } unsigned int cross_build = 0; /** * Check that sizeof(device_id type) are consistent with size of section * in .o file. If in-consistent then userspace and kernel does not agree * on actual size which is a bug. * Also verify that the final entry in the table is all zeros. * Ignore both checks if build host differ from target host and size differs. **/ static void device_id_check(const char *modname, const char *device_id, unsigned long size, unsigned long id_size, void *symval) { int i; if (size % id_size || size < id_size) { if (cross_build != 0) return; fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo " "of the size of section __mod_%s_device_table=%lu.\n" "Fix definition of struct %s_device_id " "in mod_devicetable.h\n", modname, device_id, id_size, device_id, size, device_id); } /* Verify last one is a terminator */ for (i = 0; i < id_size; i++ ) { if (*(uint8_t*)(symval+size-id_size+i)) { fprintf(stderr,"%s: struct %s_device_id is %lu bytes. " "The last of %lu is:\n", modname, device_id, id_size, size / id_size); for (i = 0; i < id_size; i++ ) fprintf(stderr,"0x%02x ", *(uint8_t*)(symval+size-id_size+i) ); fprintf(stderr,"\n"); fatal("%s: struct %s_device_id is not terminated " "with a NULL entry!\n", modname, device_id); } } } /* USB is special because the bcdDevice can be matched against a numeric range */ /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */ static void do_usb_entry(struct usb_device_id *id, unsigned int bcdDevice_initial, int bcdDevice_initial_digits, unsigned char range_lo, unsigned char range_hi, unsigned char max, struct module *mod) { char alias[500]; strcpy(alias, "usb:"); ADD(alias, "v", id->match_flags&USB_DEVICE_ID_MATCH_VENDOR, id->idVendor); ADD(alias, "p", id->match_flags&USB_DEVICE_ID_MATCH_PRODUCT, id->idProduct); strcat(alias, "d"); if (bcdDevice_initial_digits) sprintf(alias + strlen(alias), "%0*X", bcdDevice_initial_digits, bcdDevice_initial); if (range_lo == range_hi) sprintf(alias + strlen(alias), "%X", range_lo); else if (range_lo > 0 || range_hi < max) { if (range_lo > 0x9 || range_hi < 0xA) sprintf(alias + strlen(alias), "[%X-%X]", range_lo, range_hi); else { sprintf(alias + strlen(alias), range_lo < 0x9 ? "[%X-9" : "[%X", range_lo); sprintf(alias + strlen(alias), range_hi > 0xA ? "a-%X]" : "%X]", range_lo); } } if (bcdDevice_initial_digits < (sizeof(id->bcdDevice_lo) * 2 - 1)) strcat(alias, "*"); ADD(alias, "dc", id->match_flags&USB_DEVICE_ID_MATCH_DEV_CLASS, id->bDeviceClass); ADD(alias, "dsc", id->match_flags&USB_DEVICE_ID_MATCH_DEV_SUBCLASS, id->bDeviceSubClass); ADD(alias, "dp", id->match_flags&USB_DEVICE_ID_MATCH_DEV_PROTOCOL, id->bDeviceProtocol); ADD(alias, "ic", id->match_flags&USB_DEVICE_ID_MATCH_INT_CLASS, id->bInterfaceClass); ADD(alias, "isc", id->match_flags&USB_DEVICE_ID_MATCH_INT_SUBCLASS, id->bInterfaceSubClass); ADD(alias, "ip", id->match_flags&USB_DEVICE_ID_MATCH_INT_PROTOCOL, id->bInterfaceProtocol); add_wildcard(alias); buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias); } /* Handles increment/decrement of BCD formatted integers */ /* Returns the previous value, so it works like i++ or i-- */ static unsigned int incbcd(unsigned int *bcd, int inc, unsigned char max, size_t chars) { unsigned int init = *bcd, i, j; unsigned long long c, dec = 0; /* If bcd is not in BCD format, just increment */ if (max > 0x9) { *bcd += inc; return init; } /* Convert BCD to Decimal */ for (i=0 ; i < chars ; i++) { c = (*bcd >> (i << 2)) & 0xf; c = c > 9 ? 9 : c; /* force to bcd just in case */ for (j=0 ; j < i ; j++) c = c * 10; dec += c; } /* Do our increment/decrement */ dec += inc; *bcd = 0; /* Convert back to BCD */ for (i=0 ; i < chars ; i++) { for (c=1,j=0 ; j < i ; j++) c = c * 10; c = (dec / c) % 10; *bcd += c << (i << 2); } return init; } static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod) { unsigned int devlo, devhi; unsigned char chi, clo, max; int ndigits; id->match_flags = TO_NATIVE(id->match_flags); id->idVendor = TO_NATIVE(id->idVendor); id->idProduct = TO_NATIVE(id->idProduct); devlo = id->match_flags & USB_DEVICE_ID_MATCH_DEV_LO ? TO_NATIVE(id->bcdDevice_lo) : 0x0U; devhi = id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI ? TO_NATIVE(id->bcdDevice_hi) : ~0x0U; /* Figure out if this entry is in bcd or hex format */ max = 0x9; /* Default to decimal format */ for (ndigits = 0 ; ndigits < sizeof(id->bcdDevice_lo) * 2 ; ndigits++) { clo = (devlo >> (ndigits << 2)) & 0xf; chi = ((devhi > 0x9999 ? 0x9999 : devhi) >> (ndigits << 2)) & 0xf; if (clo > max || chi > max) { max = 0xf; break; } } /* * Some modules (visor) have empty slots as placeholder for * run-time specification that results in catch-all alias */ if (!(id->idVendor | id->idProduct | id->bDeviceClass | id->bInterfaceClass)) return; /* Convert numeric bcdDevice range into fnmatch-able pattern(s) */ for (ndigits = sizeof(id->bcdDevice_lo) * 2 - 1; devlo <= devhi; ndigits--) { clo = devlo & 0xf; chi = devhi & 0xf; if (chi > max) /* If we are in bcd mode, truncate if necessary */ chi = max; devlo >>= 4; devhi >>= 4; if (devlo == devhi || !ndigits) { do_usb_entry(id, devlo, ndigits, clo, chi, max, mod); break; } if (clo > 0x0) do_usb_entry(id, incbcd(&devlo, 1, max, sizeof(id->bcdDevice_lo) * 2), ndigits, clo, max, max, mod); if (chi < max) do_usb_entry(id, incbcd(&devhi, -1, max, sizeof(id->bcdDevice_lo) * 2), ndigits, 0x0, chi, max, mod); } } static void do_usb_table(void *symval, unsigned long size, struct module *mod) { unsigned int i; const unsigned long id_size = sizeof(struct usb_device_id); device_id_check(mod->name, "usb", size, id_size, symval); /* Leave last one: it's the terminator. */ size -= id_size; for (i = 0; i < size; i += id_size) do_usb_entry_multi(symval + i, mod); } /* Looks like: hid:bNvNpN */ static int do_hid_entry(const char *filename, struct hid_device_id *id, char *alias) { id->bus = TO_NATIVE(id->bus); id->vendor = TO_NATIVE(id->vendor); id->product = TO_NATIVE(id->product); sprintf(alias, "hid:b%04X", id->bus); ADD(alias, "v", id->vendor != HID_ANY_ID, id->vendor); ADD(alias, "p", id->product != HID_ANY_ID, id->product); return 1; } ADD_TO_DEVTABLE("hid", struct hid_device_id, do_hid_entry); /* Looks like: ieee1394:venNmoNspNverN */ static int do_ieee1394_entry(const char *filename, struct ieee1394_device_id *id, char *alias) { id->match_flags = TO_NATIVE(id->match_flags); id->vendor_id = TO_NATIVE(id->vendor_id); id->model_id = TO_NATIVE(id->model_id); id->specifier_id = TO_NATIVE(id->specifier_id); id->version = TO_NATIVE(id->version); strcpy(alias, "ieee1394:"); ADD(alias, "ven", id->match_flags & IEEE1394_MATCH_VENDOR_ID, id->vendor_id); ADD(alias, "mo", id->match_flags & IEEE1394_MATCH_MODEL_ID, id->model_id); ADD(alias, "sp", id->match_flags & IEEE1394_MATCH_SPECIFIER_ID, id->specifier_id); ADD(alias, "ver", id->match_flags & IEEE1394_MATCH_VERSION, id->version); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("ieee1394", struct ieee1394_device_id, do_ieee1394_entry); /* Looks like: pci:vNdNsvNsdNbcNscNiN. */ static int do_pci_entry(const char *filename, struct pci_device_id *id, char *alias) { /* Class field can be divided into these three. */ unsigned char baseclass, subclass, interface, baseclass_mask, subclass_mask, interface_mask; id->vendor = TO_NATIVE(id->vendor); id->device = TO_NATIVE(id->device); id->subvendor = TO_NATIVE(id->subvendor); id->subdevice = TO_NATIVE(id->subdevice); id->class = TO_NATIVE(id->class); id->class_mask = TO_NATIVE(id->class_mask); strcpy(alias, "pci:"); ADD(alias, "v", id->vendor != PCI_ANY_ID, id->vendor); ADD(alias, "d", id->device != PCI_ANY_ID, id->device); ADD(alias, "sv", id->subvendor != PCI_ANY_ID, id->subvendor); ADD(alias, "sd", id->subdevice != PCI_ANY_ID, id->subdevice); baseclass = (id->class) >> 16; baseclass_mask = (id->class_mask) >> 16; subclass = (id->class) >> 8; subclass_mask = (id->class_mask) >> 8; interface = id->class; interface_mask = id->class_mask; if ((baseclass_mask != 0 && baseclass_mask != 0xFF) || (subclass_mask != 0 && subclass_mask != 0xFF) || (interface_mask != 0 && interface_mask != 0xFF)) { warn("Can't handle masks in %s:%04X\n", filename, id->class_mask); return 0; } ADD(alias, "bc", baseclass_mask == 0xFF, baseclass); ADD(alias, "sc", subclass_mask == 0xFF, subclass); ADD(alias, "i", interface_mask == 0xFF, interface); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("pci", struct pci_device_id, do_pci_entry); /* looks like: "ccw:tNmNdtNdmN" */ static int do_ccw_entry(const char *filename, struct ccw_device_id *id, char *alias) { id->match_flags = TO_NATIVE(id->match_flags); id->cu_type = TO_NATIVE(id->cu_type); id->cu_model = TO_NATIVE(id->cu_model); id->dev_type = TO_NATIVE(id->dev_type); id->dev_model = TO_NATIVE(id->dev_model); strcpy(alias, "ccw:"); ADD(alias, "t", id->match_flags&CCW_DEVICE_ID_MATCH_CU_TYPE, id->cu_type); ADD(alias, "m", id->match_flags&CCW_DEVICE_ID_MATCH_CU_MODEL, id->cu_model); ADD(alias, "dt", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_TYPE, id->dev_type); ADD(alias, "dm", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_MODEL, id->dev_model); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("ccw", struct ccw_device_id, do_ccw_entry); /* looks like: "ap:tN" */ static int do_ap_entry(const char *filename, struct ap_device_id *id, char *alias) { sprintf(alias, "ap:t%02X*", id->dev_type); return 1; } ADD_TO_DEVTABLE("ap", struct ap_device_id, do_ap_entry); /* looks like: "css:tN" */ static int do_css_entry(const char *filename, struct css_device_id *id, char *alias) { sprintf(alias, "css:t%01X", id->type); return 1; } ADD_TO_DEVTABLE("css", struct css_device_id, do_css_entry); /* Looks like: "serio:tyNprNidNexN" */ static int do_serio_entry(const char *filename, struct serio_device_id *id, char *alias) { id->type = TO_NATIVE(id->type); id->proto = TO_NATIVE(id->proto); id->id = TO_NATIVE(id->id); id->extra = TO_NATIVE(id->extra); strcpy(alias, "serio:"); ADD(alias, "ty", id->type != SERIO_ANY, id->type); ADD(alias, "pr", id->proto != SERIO_ANY, id->proto); ADD(alias, "id", id->id != SERIO_ANY, id->id); ADD(alias, "ex", id->extra != SERIO_ANY, id->extra); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("serio", struct serio_device_id, do_serio_entry); /* looks like: "acpi:ACPI0003 or acpi:PNP0C0B" or "acpi:LNXVIDEO" */ static int do_acpi_entry(const char *filename, struct acpi_device_id *id, char *alias) { sprintf(alias, "acpi*:%s:*", id->id); return 1; } ADD_TO_DEVTABLE("acpi", struct acpi_device_id, do_acpi_entry); /* looks like: "pnp:dD" */ static void do_pnp_device_entry(void *symval, unsigned long size, struct module *mod) { const unsigned long id_size = sizeof(struct pnp_device_id); const unsigned int count = (size / id_size)-1; const struct pnp_device_id *devs = symval; unsigned int i; device_id_check(mod->name, "pnp", size, id_size, symval); for (i = 0; i < count; i++) { const char *id = (char *)devs[i].id; char acpi_id[sizeof(devs[0].id)]; int j; buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"pnp:d%s*\");\n", id); /* fix broken pnp bus lowercasing */ for (j = 0; j < sizeof(acpi_id); j++) acpi_id[j] = toupper(id[j]); buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"acpi*:%s:*\");\n", acpi_id); } } /* looks like: "pnp:dD" for every device of the card */ static void do_pnp_card_entries(void *symval, unsigned long size, struct module *mod) { const unsigned long id_size = sizeof(struct pnp_card_device_id); const unsigned int count = (size / id_size)-1; const struct pnp_card_device_id *cards = symval; unsigned int i; device_id_check(mod->name, "pnp", size, id_size, symval); for (i = 0; i < count; i++) { unsigned int j; const struct pnp_card_device_id *card = &cards[i]; for (j = 0; j < PNP_MAX_DEVICES; j++) { const char *id = (char *)card->devs[j].id; int i2, j2; int dup = 0; if (!id[0]) break; /* find duplicate, already added value */ for (i2 = 0; i2 < i && !dup; i2++) { const struct pnp_card_device_id *card2 = &cards[i2]; for (j2 = 0; j2 < PNP_MAX_DEVICES; j2++) { const char *id2 = (char *)card2->devs[j2].id; if (!id2[0]) break; if (!strcmp(id, id2)) { dup = 1; break; } } } /* add an individual alias for every device entry */ if (!dup) { char acpi_id[sizeof(card->devs[0].id)]; int k; buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"pnp:d%s*\");\n", id); /* fix broken pnp bus lowercasing */ for (k = 0; k < sizeof(acpi_id); k++) acpi_id[k] = toupper(id[k]); buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"acpi*:%s:*\");\n", acpi_id); } } } } /* Looks like: pcmcia:mNcNfNfnNpfnNvaNvbNvcNvdN. */ static int do_pcmcia_entry(const char *filename, struct pcmcia_device_id *id, char *alias) { unsigned int i; id->match_flags = TO_NATIVE(id->match_flags); id->manf_id = TO_NATIVE(id->manf_id); id->card_id = TO_NATIVE(id->card_id); id->func_id = TO_NATIVE(id->func_id); id->function = TO_NATIVE(id->function); id->device_no = TO_NATIVE(id->device_no); for (i=0; i<4; i++) { id->prod_id_hash[i] = TO_NATIVE(id->prod_id_hash[i]); } strcpy(alias, "pcmcia:"); ADD(alias, "m", id->match_flags & PCMCIA_DEV_ID_MATCH_MANF_ID, id->manf_id); ADD(alias, "c", id->match_flags & PCMCIA_DEV_ID_MATCH_CARD_ID, id->card_id); ADD(alias, "f", id->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID, id->func_id); ADD(alias, "fn", id->match_flags & PCMCIA_DEV_ID_MATCH_FUNCTION, id->function); ADD(alias, "pfn", id->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO, id->device_no); ADD(alias, "pa", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID1, id->prod_id_hash[0]); ADD(alias, "pb", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID2, id->prod_id_hash[1]); ADD(alias, "pc", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID3, id->prod_id_hash[2]); ADD(alias, "pd", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID4, id->prod_id_hash[3]); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("pcmcia", struct pcmcia_device_id, do_pcmcia_entry); static int do_of_entry (const char *filename, struct of_device_id *of, char *alias) { int len; char *tmp; len = sprintf (alias, "of:N%sT%s", of->name[0] ? of->name : "*", of->type[0] ? of->type : "*"); if (of->compatible[0]) sprintf (&alias[len], "%sC%s", of->type[0] ? "*" : "", of->compatible); /* Replace all whitespace with underscores */ for (tmp = alias; tmp && *tmp; tmp++) if (isspace (*tmp)) *tmp = '_'; add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("of", struct of_device_id, do_of_entry); static int do_vio_entry(const char *filename, struct vio_device_id *vio, char *alias) { char *tmp; sprintf(alias, "vio:T%sS%s", vio->type[0] ? vio->type : "*", vio->compat[0] ? vio->compat : "*"); /* Replace all whitespace with underscores */ for (tmp = alias; tmp && *tmp; tmp++) if (isspace (*tmp)) *tmp = '_'; add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("vio", struct vio_device_id, do_vio_entry); #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) static void do_input(char *alias, kernel_ulong_t *arr, unsigned int min, unsigned int max) { unsigned int i; for (i = min; i < max; i++) if (arr[i / BITS_PER_LONG] & (1L << (i%BITS_PER_LONG))) sprintf(alias + strlen(alias), "%X,*", i); } /* input:b0v0p0e0-eXkXrXaXmXlXsXfXwX where X is comma-separated %02X. */ static int do_input_entry(const char *filename, struct input_device_id *id, char *alias) { sprintf(alias, "input:"); ADD(alias, "b", id->flags & INPUT_DEVICE_ID_MATCH_BUS, id->bustype); ADD(alias, "v", id->flags & INPUT_DEVICE_ID_MATCH_VENDOR, id->vendor); ADD(alias, "p", id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT, id->product); ADD(alias, "e", id->flags & INPUT_DEVICE_ID_MATCH_VERSION, id->version); sprintf(alias + strlen(alias), "-e*"); if (id->flags & INPUT_DEVICE_ID_MATCH_EVBIT) do_input(alias, id->evbit, 0, INPUT_DEVICE_ID_EV_MAX); sprintf(alias + strlen(alias), "k*"); if (id->flags & INPUT_DEVICE_ID_MATCH_KEYBIT) do_input(alias, id->keybit, INPUT_DEVICE_ID_KEY_MIN_INTERESTING, INPUT_DEVICE_ID_KEY_MAX); sprintf(alias + strlen(alias), "r*"); if (id->flags & INPUT_DEVICE_ID_MATCH_RELBIT) do_input(alias, id->relbit, 0, INPUT_DEVICE_ID_REL_MAX); sprintf(alias + strlen(alias), "a*"); if (id->flags & INPUT_DEVICE_ID_MATCH_ABSBIT) do_input(alias, id->absbit, 0, INPUT_DEVICE_ID_ABS_MAX); sprintf(alias + strlen(alias), "m*"); if (id->flags & INPUT_DEVICE_ID_MATCH_MSCIT) do_input(alias, id->mscbit, 0, INPUT_DEVICE_ID_MSC_MAX); sprintf(alias + strlen(alias), "l*"); if (id->flags & INPUT_DEVICE_ID_MATCH_LEDBIT) do_input(alias, id->ledbit, 0, INPUT_DEVICE_ID_LED_MAX); sprintf(alias + strlen(alias), "s*"); if (id->flags & INPUT_DEVICE_ID_MATCH_SNDBIT) do_input(alias, id->sndbit, 0, INPUT_DEVICE_ID_SND_MAX); sprintf(alias + strlen(alias), "f*"); if (id->flags & INPUT_DEVICE_ID_MATCH_FFBIT) do_input(alias, id->ffbit, 0, INPUT_DEVICE_ID_FF_MAX); sprintf(alias + strlen(alias), "w*"); if (id->flags & INPUT_DEVICE_ID_MATCH_SWBIT) do_input(alias, id->swbit, 0, INPUT_DEVICE_ID_SW_MAX); return 1; } ADD_TO_DEVTABLE("input", struct input_device_id, do_input_entry); static int do_eisa_entry(const char *filename, struct eisa_device_id *eisa, char *alias) { if (eisa->sig[0]) sprintf(alias, EISA_DEVICE_MODALIAS_FMT "*", eisa->sig); else strcat(alias, "*"); return 1; } ADD_TO_DEVTABLE("eisa", struct eisa_device_id, do_eisa_entry); /* Looks like: parisc:tNhvNrevNsvN */ static int do_parisc_entry(const char *filename, struct parisc_device_id *id, char *alias) { id->hw_type = TO_NATIVE(id->hw_type); id->hversion = TO_NATIVE(id->hversion); id->hversion_rev = TO_NATIVE(id->hversion_rev); id->sversion = TO_NATIVE(id->sversion); strcpy(alias, "parisc:"); ADD(alias, "t", id->hw_type != PA_HWTYPE_ANY_ID, id->hw_type); ADD(alias, "hv", id->hversion != PA_HVERSION_ANY_ID, id->hversion); ADD(alias, "rev", id->hversion_rev != PA_HVERSION_REV_ANY_ID, id->hversion_rev); ADD(alias, "sv", id->sversion != PA_SVERSION_ANY_ID, id->sversion); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("parisc", struct parisc_device_id, do_parisc_entry); /* Looks like: sdio:cNvNdN. */ static int do_sdio_entry(const char *filename, struct sdio_device_id *id, char *alias) { id->class = TO_NATIVE(id->class); id->vendor = TO_NATIVE(id->vendor); id->device = TO_NATIVE(id->device); strcpy(alias, "sdio:"); ADD(alias, "c", id->class != (__u8)SDIO_ANY_ID, id->class); ADD(alias, "v", id->vendor != (__u16)SDIO_ANY_ID, id->vendor); ADD(alias, "d", id->device != (__u16)SDIO_ANY_ID, id->device); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("sdio", struct sdio_device_id, do_sdio_entry); /* Looks like: ssb:vNidNrevN. */ static int do_ssb_entry(const char *filename, struct ssb_device_id *id, char *alias) { id->vendor = TO_NATIVE(id->vendor); id->coreid = TO_NATIVE(id->coreid); id->revision = TO_NATIVE(id->revision); strcpy(alias, "ssb:"); ADD(alias, "v", id->vendor != SSB_ANY_VENDOR, id->vendor); ADD(alias, "id", id->coreid != SSB_ANY_ID, id->coreid); ADD(alias, "rev", id->revision != SSB_ANY_REV, id->revision); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("ssb", struct ssb_device_id, do_ssb_entry); /* Looks like: bcma:mNidNrevNclN. */ static int do_bcma_entry(const char *filename, struct bcma_device_id *id, char *alias) { id->manuf = TO_NATIVE(id->manuf); id->id = TO_NATIVE(id->id); id->rev = TO_NATIVE(id->rev); id->class = TO_NATIVE(id->class); strcpy(alias, "bcma:"); ADD(alias, "m", id->manuf != BCMA_ANY_MANUF, id->manuf); ADD(alias, "id", id->id != BCMA_ANY_ID, id->id); ADD(alias, "rev", id->rev != BCMA_ANY_REV, id->rev); ADD(alias, "cl", id->class != BCMA_ANY_CLASS, id->class); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("bcma", struct bcma_device_id, do_bcma_entry); /* Looks like: virtio:dNvN */ static int do_virtio_entry(const char *filename, struct virtio_device_id *id, char *alias) { id->device = TO_NATIVE(id->device); id->vendor = TO_NATIVE(id->vendor); strcpy(alias, "virtio:"); ADD(alias, "d", id->device != VIRTIO_DEV_ANY_ID, id->device); ADD(alias, "v", id->vendor != VIRTIO_DEV_ANY_ID, id->vendor); add_wildcard(alias); return 1; } ADD_TO_DEVTABLE("virtio", struct virtio_device_id, do_virtio_entry); /* * Looks like: vmbus:guid * Each byte of the guid will be represented by two hex characters * in the name. */ static int do_vmbus_entry(const char *filename, struct hv_vmbus_device_id *id, char *alias) { int i; char guid_name[((sizeof(id->guid) + 1)) * 2]; for (i = 0; i < (sizeof(id->guid) * 2); i += 2) sprintf(&guid_name[i], "%02x", id->guid[i/2]); strcpy(alias, "vmbus:"); strcat(alias, guid_name); return 1; } ADD_TO_DEVTABLE("vmbus", struct hv_vmbus_device_id, do_vmbus_entry); /* Looks like: i2c:S */ static int do_i2c_entry(const char *filename, struct i2c_device_id *id, char *alias) { sprintf(alias, I2C_MODULE_PREFIX "%s", id->name); return 1; } ADD_TO_DEVTABLE("i2c", struct i2c_device_id, do_i2c_entry); /* Looks like: spi:S */ static int do_spi_entry(const char *filename, struct spi_device_id *id, char *alias) { sprintf(alias, SPI_MODULE_PREFIX "%s", id->name); return 1; } ADD_TO_DEVTABLE("spi", struct spi_device_id, do_spi_entry); static const struct dmifield { const char *prefix; int field; } dmi_fields[] = { { "bvn", DMI_BIOS_VENDOR }, { "bvr", DMI_BIOS_VERSION }, { "bd", DMI_BIOS_DATE }, { "svn", DMI_SYS_VENDOR }, { "pn", DMI_PRODUCT_NAME }, { "pvr", DMI_PRODUCT_VERSION }, { "rvn", DMI_BOARD_VENDOR }, { "rn", DMI_BOARD_NAME }, { "rvr", DMI_BOARD_VERSION }, { "cvn", DMI_CHASSIS_VENDOR }, { "ct", DMI_CHASSIS_TYPE }, { "cvr", DMI_CHASSIS_VERSION }, { NULL, DMI_NONE } }; static void dmi_ascii_filter(char *d, const char *s) { /* Filter out characters we don't want to see in the modalias string */ for (; *s; s++) if (*s > ' ' && *s < 127 && *s != ':') *(d++) = *s; *d = 0; } static int do_dmi_entry(const char *filename, struct dmi_system_id *id, char *alias) { int i, j; sprintf(alias, "dmi*"); for (i = 0; i < ARRAY_SIZE(dmi_fields); i++) { for (j = 0; j < 4; j++) { if (id->matches[j].slot && id->matches[j].slot == dmi_fields[i].field) { sprintf(alias + strlen(alias), ":%s*", dmi_fields[i].prefix); dmi_ascii_filter(alias + strlen(alias), id->matches[j].substr); strcat(alias, "*"); } } } strcat(alias, ":"); return 1; } ADD_TO_DEVTABLE("dmi", struct dmi_system_id, do_dmi_entry); static int do_platform_entry(const char *filename, struct platform_device_id *id, char *alias) { sprintf(alias, PLATFORM_MODULE_PREFIX "%s", id->name); return 1; } ADD_TO_DEVTABLE("platform", struct platform_device_id, do_platform_entry); static int do_mdio_entry(const char *filename, struct mdio_device_id *id, char *alias) { int i; alias += sprintf(alias, MDIO_MODULE_PREFIX); for (i = 0; i < 32; i++) { if (!((id->phy_id_mask >> (31-i)) & 1)) *(alias++) = '?'; else if ((id->phy_id >> (31-i)) & 1) *(alias++) = '1'; else *(alias++) = '0'; } /* Terminate the string */ *alias = 0; return 1; } ADD_TO_DEVTABLE("mdio", struct mdio_device_id, do_mdio_entry); /* Looks like: zorro:iN. */ static int do_zorro_entry(const char *filename, struct zorro_device_id *id, char *alias) { id->id = TO_NATIVE(id->id); strcpy(alias, "zorro:"); ADD(alias, "i", id->id != ZORRO_WILDCARD, id->id); return 1; } ADD_TO_DEVTABLE("zorro", struct zorro_device_id, do_zorro_entry); /* looks like: "pnp:dD" */ static int do_isapnp_entry(const char *filename, struct isapnp_device_id *id, char *alias) { sprintf(alias, "pnp:d%c%c%c%x%x%x%x*", 'A' + ((id->vendor >> 2) & 0x3f) - 1, 'A' + (((id->vendor & 3) << 3) | ((id->vendor >> 13) & 7)) - 1, 'A' + ((id->vendor >> 8) & 0x1f) - 1, (id->function >> 4) & 0x0f, id->function & 0x0f, (id->function >> 12) & 0x0f, (id->function >> 8) & 0x0f); return 1; } ADD_TO_DEVTABLE("isapnp", struct isapnp_device_id, do_isapnp_entry); /* * Append a match expression for a single masked hex digit. * outp points to a pointer to the character at which to append. * *outp is updated on return to point just after the appended text, * to facilitate further appending. */ static void append_nibble_mask(char **outp, unsigned int nibble, unsigned int mask) { char *p = *outp; unsigned int i; switch (mask) { case 0: *p++ = '?'; break; case 0xf: p += sprintf(p, "%X", nibble); break; default: /* * Dumbly emit a match pattern for all possible matching * digits. This could be improved in some cases using ranges, * but it has the advantage of being trivially correct, and is * often optimal. */ *p++ = '['; for (i = 0; i < 0x10; i++) if ((i & mask) == nibble) p += sprintf(p, "%X", i); *p++ = ']'; } /* Ensure that the string remains NUL-terminated: */ *p = '\0'; /* Advance the caller's end-of-string pointer: */ *outp = p; } /* * looks like: "amba:dN" * * N is exactly 8 digits, where each is an upper-case hex digit, or * a ? or [] pattern matching exactly one digit. */ static int do_amba_entry(const char *filename, struct amba_id *id, char *alias) { unsigned int digit; char *p = alias; if ((id->id & id->mask) != id->id) fatal("%s: Masked-off bit(s) of AMBA device ID are non-zero: " "id=0x%08X, mask=0x%08X. Please fix this driver.\n", filename, id->id, id->mask); p += sprintf(alias, "amba:d"); for (digit = 0; digit < 8; digit++) append_nibble_mask(&p, (id->id >> (4 * (7 - digit))) & 0xf, (id->mask >> (4 * (7 - digit))) & 0xf); return 1; } ADD_TO_DEVTABLE("amba", struct amba_id, do_amba_entry); /* LOOKS like x86cpu:vendor:VVVV:family:FFFF:model:MMMM:feature:*,FEAT,* * All fields are numbers. It would be nicer to use strings for vendor * and feature, but getting those out of the build system here is too * complicated. */ static int do_x86cpu_entry(const char *filename, struct x86_cpu_id *id, char *alias) { id->feature = TO_NATIVE(id->feature); id->family = TO_NATIVE(id->family); id->model = TO_NATIVE(id->model); id->vendor = TO_NATIVE(id->vendor); strcpy(alias, "x86cpu:"); ADD(alias, "vendor:", id->vendor != X86_VENDOR_ANY, id->vendor); ADD(alias, ":family:", id->family != X86_FAMILY_ANY, id->family); ADD(alias, ":model:", id->model != X86_MODEL_ANY, id->model); strcat(alias, ":feature:*"); if (id->feature != X86_FEATURE_ANY) sprintf(alias + strlen(alias), "%04X*", id->feature); return 1; } ADD_TO_DEVTABLE("x86cpu", struct x86_cpu_id, do_x86cpu_entry); /* Does namelen bytes of name exactly match the symbol? */ static bool sym_is(const char *name, unsigned namelen, const char *symbol) { if (namelen != strlen(symbol)) return false; return memcmp(name, symbol, namelen) == 0; } static void do_table(void *symval, unsigned long size, unsigned long id_size, const char *device_id, void *function, struct module *mod) { unsigned int i; char alias[500]; int (*do_entry)(const char *, void *entry, char *alias) = function; device_id_check(mod->name, device_id, size, id_size, symval); /* Leave last one: it's the terminator. */ size -= id_size; for (i = 0; i < size; i += id_size) { if (do_entry(mod->name, symval+i, alias)) { buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias); } } } /* Create MODULE_ALIAS() statements. * At this time, we cannot write the actual output C source yet, * so we write into the mod->dev_table_buf buffer. */ void handle_moddevtable(struct module *mod, struct elf_info *info, Elf_Sym *sym, const char *symname) { void *symval; char *zeros = NULL; const char *name; unsigned int namelen; /* We're looking for a section relative symbol */ if (!sym->st_shndx || get_secindex(info, sym) >= info->num_sections) return; /* We're looking for an object */ if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT) return; /* All our symbols are of form <prefix>__mod_XXX_device_table. */ name = strstr(symname, "__mod_"); if (!name) return; name += strlen("__mod_"); namelen = strlen(name); if (namelen < strlen("_device_table")) return; if (strcmp(name + namelen - strlen("_device_table"), "_device_table")) return; namelen -= strlen("_device_table"); /* Handle all-NULL symbols allocated into .bss */ if (info->sechdrs[get_secindex(info, sym)].sh_type & SHT_NOBITS) { zeros = calloc(1, sym->st_size); symval = zeros; } else { symval = (void *)info->hdr + info->sechdrs[get_secindex(info, sym)].sh_offset + sym->st_value; } /* First handle the "special" cases */ if (sym_is(name, namelen, "usb")) do_usb_table(symval, sym->st_size, mod); else if (sym_is(name, namelen, "pnp")) do_pnp_device_entry(symval, sym->st_size, mod); else if (sym_is(name, namelen, "pnp_card")) do_pnp_card_entries(symval, sym->st_size, mod); else { struct devtable **p; INIT_SECTION(__devtable); for (p = __start___devtable; p < __stop___devtable; p++) { if (sym_is(name, namelen, (*p)->device_id)) { do_table(symval, sym->st_size, (*p)->id_size, (*p)->device_id, (*p)->function, mod); break; } } } free(zeros); } /* Now add out buffered information to the generated C source */ void add_moddevtable(struct buffer *buf, struct module *mod) { buf_printf(buf, "\n"); buf_write(buf, mod->dev_table_buf.p, mod->dev_table_buf.pos); free(mod->dev_table_buf.p); }
gpl-2.0
chilisom/linux-stable
drivers/watchdog/acquirewdt.c
1541
8692
/* * Acquire Single Board Computer Watchdog Timer driver * * Based on wdt.c. Original copyright messages: * * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide * warranty for any of this software. This material is provided * "AS-IS" and at no charge. * * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk> * * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com> * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT * Can't add timeout - driver doesn't allow changing value */ /* * Theory of Operation: * The Watch-Dog Timer is provided to ensure that standalone * Systems can always recover from catastrophic conditions that * caused the CPU to crash. This condition may have occurred by * external EMI or a software bug. When the CPU stops working * correctly, hardware on the board will either perform a hardware * reset (cold boot) or a non-maskable interrupt (NMI) to bring the * system back to a known state. * * The Watch-Dog Timer is controlled by two I/O Ports. * 443 hex - Read - Enable or refresh the Watch-Dog Timer * 043 hex - Read - Disable the Watch-Dog Timer * * To enable the Watch-Dog Timer, a read from I/O port 443h must * be performed. This will enable and activate the countdown timer * which will eventually time out and either reset the CPU or cause * an NMI depending on the setting of a jumper. To ensure that this * reset condition does not occur, the Watch-Dog Timer must be * periodically refreshed by reading the same I/O port 443h. * The Watch-Dog Timer is disabled by reading I/O port 043h. * * The Watch-Dog Timer Time-Out Period is set via jumpers. * It can be 1, 2, 10, 20, 110 or 220 seconds. */ /* * Includes, defines, variables, module parameters, ... */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* Includes */ #include <linux/module.h> /* For module specific items */ #include <linux/moduleparam.h> /* For new moduleparam's */ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/miscdevice.h> /* For struct miscdevice */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/fs.h> /* For file operations */ #include <linux/ioport.h> /* For io-port access */ #include <linux/platform_device.h> /* For platform_driver framework */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ #include <linux/io.h> /* For inb/outb/... */ /* Module information */ #define DRV_NAME "acquirewdt" #define WATCHDOG_NAME "Acquire WDT" /* There is no way to see what the correct time-out period is */ #define WATCHDOG_HEARTBEAT 0 /* internal variables */ /* the watchdog platform device */ static struct platform_device *acq_platform_device; static unsigned long acq_is_open; static char expect_close; /* module parameters */ /* You must set this - there is no sane way to probe for this board. */ static int wdt_stop = 0x43; module_param(wdt_stop, int, 0); MODULE_PARM_DESC(wdt_stop, "Acquire WDT 'stop' io port (default 0x43)"); /* You must set this - there is no sane way to probe for this board. */ static int wdt_start = 0x443; module_param(wdt_start, int, 0); MODULE_PARM_DESC(wdt_start, "Acquire WDT 'start' io port (default 0x443)"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* * Watchdog Operations */ static void acq_keepalive(void) { /* Write a watchdog value */ inb_p(wdt_start); } static void acq_stop(void) { /* Turn the card off */ inb_p(wdt_stop); } /* * /dev/watchdog handling */ static ssize_t acq_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (count) { if (!nowayout) { size_t i; /* note: just in case someone wrote the magic character five months ago... */ expect_close = 0; /* scan to see whether or not we got the magic character */ for (i = 0; i != count; i++) { char c; if (get_user(c, buf + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } /* Well, anyhow someone wrote to us, we should return that favour */ acq_keepalive(); } return count; } static long acq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int options, retval = -EINVAL; void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = WATCHDOG_NAME, }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: { if (get_user(options, p)) return -EFAULT; if (options & WDIOS_DISABLECARD) { acq_stop(); retval = 0; } if (options & WDIOS_ENABLECARD) { acq_keepalive(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: acq_keepalive(); return 0; case WDIOC_GETTIMEOUT: return put_user(WATCHDOG_HEARTBEAT, p); default: return -ENOTTY; } } static int acq_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &acq_is_open)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Activate */ acq_keepalive(); return nonseekable_open(inode, file); } static int acq_close(struct inode *inode, struct file *file) { if (expect_close == 42) { acq_stop(); } else { pr_crit("Unexpected close, not stopping watchdog!\n"); acq_keepalive(); } clear_bit(0, &acq_is_open); expect_close = 0; return 0; } /* * Kernel Interfaces */ static const struct file_operations acq_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = acq_write, .unlocked_ioctl = acq_ioctl, .open = acq_open, .release = acq_close, }; static struct miscdevice acq_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &acq_fops, }; /* * Init & exit routines */ static int __init acq_probe(struct platform_device *dev) { int ret; if (wdt_stop != wdt_start) { if (!request_region(wdt_stop, 1, WATCHDOG_NAME)) { pr_err("I/O address 0x%04x already in use\n", wdt_stop); ret = -EIO; goto out; } } if (!request_region(wdt_start, 1, WATCHDOG_NAME)) { pr_err("I/O address 0x%04x already in use\n", wdt_start); ret = -EIO; goto unreg_stop; } ret = misc_register(&acq_miscdev); if (ret != 0) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto unreg_regions; } pr_info("initialized. (nowayout=%d)\n", nowayout); return 0; unreg_regions: release_region(wdt_start, 1); unreg_stop: if (wdt_stop != wdt_start) release_region(wdt_stop, 1); out: return ret; } static int acq_remove(struct platform_device *dev) { misc_deregister(&acq_miscdev); release_region(wdt_start, 1); if (wdt_stop != wdt_start) release_region(wdt_stop, 1); return 0; } static void acq_shutdown(struct platform_device *dev) { /* Turn the WDT off if we have a soft shutdown */ acq_stop(); } static struct platform_driver acquirewdt_driver = { .remove = acq_remove, .shutdown = acq_shutdown, .driver = { .name = DRV_NAME, }, }; static int __init acq_init(void) { int err; pr_info("WDT driver for Acquire single board computer initialising\n"); acq_platform_device = platform_device_register_simple(DRV_NAME, -1, NULL, 0); if (IS_ERR(acq_platform_device)) return PTR_ERR(acq_platform_device); err = platform_driver_probe(&acquirewdt_driver, acq_probe); if (err) goto unreg_platform_device; return 0; unreg_platform_device: platform_device_unregister(acq_platform_device); return err; } static void __exit acq_exit(void) { platform_device_unregister(acq_platform_device); platform_driver_unregister(&acquirewdt_driver); pr_info("Watchdog Module Unloaded\n"); } module_init(acq_init); module_exit(acq_exit); MODULE_AUTHOR("David Woodhouse"); MODULE_DESCRIPTION("Acquire Inc. Single Board Computer Watchdog Timer driver"); MODULE_LICENSE("GPL");
gpl-2.0
MISL-EBU-System-SW/lsp-public
sound/oss/kahlua.c
1797
5041
/* * Initialisation code for Cyrix/NatSemi VSA1 softaudio * * (C) Copyright 2003 Red Hat Inc <alan@lxorguk.ukuu.org.uk> * * XpressAudio(tm) is used on the Cyrix MediaGX (now NatSemi Geode) systems. * The older version (VSA1) provides fairly good soundblaster emulation * although there are a couple of bugs: large DMA buffers break record, * and the MPU event handling seems suspect. VSA2 allows the native driver * to control the AC97 audio engine directly and requires a different driver. * * Thanks to National Semiconductor for providing the needed information * on the XpressAudio(tm) internals. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * TO DO: * Investigate whether we can portably support Cognac (5520) in the * same manner. */ #include <linux/delay.h> #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include "sound_config.h" #include "sb.h" /* * Read a soundblaster compatible mixer register. * In this case we are actually reading an SMI trap * not real hardware. */ static u8 mixer_read(unsigned long io, u8 reg) { outb(reg, io + 4); udelay(20); reg = inb(io + 5); udelay(20); return reg; } static int probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct address_info *hw_config; unsigned long base; void __iomem *mem; unsigned long io; u16 map; u8 irq, dma8, dma16; int oldquiet; extern int sb_be_quiet; base = pci_resource_start(pdev, 0); if(base == 0UL) return 1; mem = ioremap(base, 128); if (!mem) return 1; map = readw(mem + 0x18); /* Read the SMI enables */ iounmap(mem); /* Map bits 0:1 * 0x20 + 0x200 = sb base 2 sb enable 3 adlib enable 5 MPU enable 0x330 6 MPU enable 0x300 The other bits may be used internally so must be masked */ io = 0x220 + 0x20 * (map & 3); if(map & (1<<2)) printk(KERN_INFO "kahlua: XpressAudio at 0x%lx\n", io); else return 1; if(map & (1<<5)) printk(KERN_INFO "kahlua: MPU at 0x300\n"); else if(map & (1<<6)) printk(KERN_INFO "kahlua: MPU at 0x330\n"); irq = mixer_read(io, 0x80) & 0x0F; dma8 = mixer_read(io, 0x81); // printk("IRQ=%x MAP=%x DMA=%x\n", irq, map, dma8); if(dma8 & 0x20) dma16 = 5; else if(dma8 & 0x40) dma16 = 6; else if(dma8 & 0x80) dma16 = 7; else { printk(KERN_ERR "kahlua: No 16bit DMA enabled.\n"); return 1; } if(dma8 & 0x01) dma8 = 0; else if(dma8 & 0x02) dma8 = 1; else if(dma8 & 0x08) dma8 = 3; else { printk(KERN_ERR "kahlua: No 8bit DMA enabled.\n"); return 1; } if(irq & 1) irq = 9; else if(irq & 2) irq = 5; else if(irq & 4) irq = 7; else if(irq & 8) irq = 10; else { printk(KERN_ERR "kahlua: SB IRQ not set.\n"); return 1; } printk(KERN_INFO "kahlua: XpressAudio on IRQ %d, DMA %d, %d\n", irq, dma8, dma16); hw_config = kzalloc(sizeof(struct address_info), GFP_KERNEL); if(hw_config == NULL) { printk(KERN_ERR "kahlua: out of memory.\n"); return 1; } pci_set_drvdata(pdev, hw_config); hw_config->io_base = io; hw_config->irq = irq; hw_config->dma = dma8; hw_config->dma2 = dma16; hw_config->name = "Cyrix XpressAudio"; hw_config->driver_use_1 = SB_NO_MIDI | SB_PCI_IRQ; if (!request_region(io, 16, "soundblaster")) goto err_out_free; if(sb_dsp_detect(hw_config, 0, 0, NULL)==0) { printk(KERN_ERR "kahlua: audio not responding.\n"); release_region(io, 16); goto err_out_free; } oldquiet = sb_be_quiet; sb_be_quiet = 1; if(sb_dsp_init(hw_config, THIS_MODULE)) { sb_be_quiet = oldquiet; goto err_out_free; } sb_be_quiet = oldquiet; return 0; err_out_free: kfree(hw_config); return 1; } static void remove_one(struct pci_dev *pdev) { struct address_info *hw_config = pci_get_drvdata(pdev); sb_dsp_unload(hw_config, 0); kfree(hw_config); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("Kahlua VSA1 PCI Audio"); MODULE_LICENSE("GPL"); /* * 5530 only. The 5510/5520 decode is different. */ static const struct pci_device_id id_tbl[] = { { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_AUDIO), 0 }, { } }; MODULE_DEVICE_TABLE(pci, id_tbl); static struct pci_driver kahlua_driver = { .name = "kahlua", .id_table = id_tbl, .probe = probe_one, .remove = remove_one, }; static int __init kahlua_init_module(void) { printk(KERN_INFO "Cyrix Kahlua VSA1 XpressAudio support (c) Copyright 2003 Red Hat Inc\n"); return pci_register_driver(&kahlua_driver); } static void kahlua_cleanup_module(void) { pci_unregister_driver(&kahlua_driver); } module_init(kahlua_init_module); module_exit(kahlua_cleanup_module);
gpl-2.0
sandymanu/manufooty_yu_lp
drivers/pci/pci-acpi.c
1797
10341
/* * File: pci-acpi.c * Purpose: Provide PCI support in ACPI * * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com> * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com> * Copyright (C) 2004 Intel Corp. */ #include <linux/delay.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/pci-aspm.h> #include <acpi/acpi.h> #include <acpi/acpi_bus.h> #include <linux/pci-acpi.h> #include <linux/pm_runtime.h> #include <linux/pm_qos.h> #include "pci.h" /** * pci_acpi_wake_bus - Wake-up notification handler for root buses. * @handle: ACPI handle of a device the notification is for. * @event: Type of the signaled event. * @context: PCI root bus to wake up devices on. */ static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context) { struct pci_bus *pci_bus = context; if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus) pci_pme_wakeup_bus(pci_bus); } /** * pci_acpi_wake_dev - Wake-up notification handler for PCI devices. * @handle: ACPI handle of a device the notification is for. * @event: Type of the signaled event. * @context: PCI device object to wake up. */ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) { struct pci_dev *pci_dev = context; if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev) return; if (pci_dev->pme_poll) pci_dev->pme_poll = false; if (pci_dev->current_state == PCI_D3cold) { pci_wakeup_event(pci_dev); pm_runtime_resume(&pci_dev->dev); return; } /* Clear PME Status if set. */ if (pci_dev->pme_support) pci_check_pme_status(pci_dev); pci_wakeup_event(pci_dev); pm_runtime_resume(&pci_dev->dev); if (pci_dev->subordinate) pci_pme_wakeup_bus(pci_dev->subordinate); } /** * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus. * @dev: ACPI device to add the notifier for. * @pci_bus: PCI bus to walk checking for PME status if an event is signaled. */ acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, struct pci_bus *pci_bus) { return acpi_add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); } /** * pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier. * @dev: ACPI device to remove the notifier from. */ acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) { return acpi_remove_pm_notifier(dev, pci_acpi_wake_bus); } /** * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. * @dev: ACPI device to add the notifier for. * @pci_dev: PCI device to check for the PME status if an event is signaled. */ acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, struct pci_dev *pci_dev) { return acpi_add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); } /** * pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier. * @dev: ACPI device to remove the notifier from. */ acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) { return acpi_remove_pm_notifier(dev, pci_acpi_wake_dev); } phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) { acpi_status status = AE_NOT_EXIST; unsigned long long mcfg_addr; if (handle) status = acpi_evaluate_integer(handle, METHOD_NAME__CBA, NULL, &mcfg_addr); if (ACPI_FAILURE(status)) return 0; return (phys_addr_t)mcfg_addr; } /* * _SxD returns the D-state with the highest power * (lowest D-state number) supported in the S-state "x". * * If the devices does not have a _PRW * (Power Resources for Wake) supporting system wakeup from "x" * then the OS is free to choose a lower power (higher number * D-state) than the return value from _SxD. * * But if _PRW is enabled at S-state "x", the OS * must not choose a power lower than _SxD -- * unless the device has an _SxW method specifying * the lowest power (highest D-state number) the device * may enter while still able to wake the system. * * ie. depending on global OS policy: * * if (_PRW at S-state x) * choose from highest power _SxD to lowest power _SxW * else // no _PRW at S-state x * choose highest power _SxD or any lower power */ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) { int acpi_state, d_max; if (pdev->no_d3cold) d_max = ACPI_STATE_D3_HOT; else d_max = ACPI_STATE_D3_COLD; acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max); if (acpi_state < 0) return PCI_POWER_ERROR; switch (acpi_state) { case ACPI_STATE_D0: return PCI_D0; case ACPI_STATE_D1: return PCI_D1; case ACPI_STATE_D2: return PCI_D2; case ACPI_STATE_D3_HOT: return PCI_D3hot; case ACPI_STATE_D3_COLD: return PCI_D3cold; } return PCI_POWER_ERROR; } static bool acpi_pci_power_manageable(struct pci_dev *dev) { acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); return handle ? acpi_bus_power_manageable(handle) : false; } static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) { acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); acpi_handle tmp; static const u8 state_conv[] = { [PCI_D0] = ACPI_STATE_D0, [PCI_D1] = ACPI_STATE_D1, [PCI_D2] = ACPI_STATE_D2, [PCI_D3hot] = ACPI_STATE_D3, [PCI_D3cold] = ACPI_STATE_D3 }; int error = -EINVAL; /* If the ACPI device has _EJ0, ignore the device */ if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) return -ENODEV; switch (state) { case PCI_D3cold: if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == PM_QOS_FLAGS_ALL) { error = -EBUSY; break; } case PCI_D0: case PCI_D1: case PCI_D2: case PCI_D3hot: error = acpi_bus_set_power(handle, state_conv[state]); } if (!error) dev_info(&dev->dev, "power state changed by ACPI to %s\n", pci_power_name(state)); return error; } static bool acpi_pci_can_wakeup(struct pci_dev *dev) { acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); return handle ? acpi_bus_can_wakeup(handle) : false; } static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable) { while (bus->parent) { if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable)) return; bus = bus->parent; } /* We have reached the root bus. */ if (bus->bridge) acpi_pm_device_sleep_wake(bus->bridge, enable); } static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) { if (acpi_pci_can_wakeup(dev)) return acpi_pm_device_sleep_wake(&dev->dev, enable); acpi_pci_propagate_wakeup_enable(dev->bus, enable); return 0; } static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) { while (bus->parent) { struct pci_dev *bridge = bus->self; if (bridge->pme_interrupt) return; if (!acpi_pm_device_run_wake(&bridge->dev, enable)) return; bus = bus->parent; } /* We have reached the root bus. */ if (bus->bridge) acpi_pm_device_run_wake(bus->bridge, enable); } static int acpi_pci_run_wake(struct pci_dev *dev, bool enable) { /* * Per PCI Express Base Specification Revision 2.0 section * 5.3.3.2 Link Wakeup, platform support is needed for D3cold * waking up to power on the main link even if there is PME * support for D3cold */ if (dev->pme_interrupt && !dev->runtime_d3cold) return 0; if (!acpi_pm_device_run_wake(&dev->dev, enable)) return 0; acpi_pci_propagate_run_wake(dev->bus, enable); return 0; } static struct pci_platform_pm_ops acpi_pci_platform_pm = { .is_manageable = acpi_pci_power_manageable, .set_state = acpi_pci_set_power_state, .choose_state = acpi_pci_choose_state, .sleep_wake = acpi_pci_sleep_wake, .run_wake = acpi_pci_run_wake, }; void acpi_pci_add_bus(struct pci_bus *bus) { acpi_handle handle = NULL; if (bus->bridge) handle = ACPI_HANDLE(bus->bridge); if (acpi_pci_disabled || handle == NULL) return; acpi_pci_slot_enumerate(bus, handle); acpiphp_enumerate_slots(bus, handle); } void acpi_pci_remove_bus(struct pci_bus *bus) { /* * bus->bridge->acpi_node.handle has already been reset to NULL * when acpi_pci_remove_bus() is called, so don't check ACPI handle. */ if (acpi_pci_disabled) return; acpiphp_remove_slots(bus); acpi_pci_slot_remove(bus); } /* ACPI bus type */ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) { struct pci_dev *pci_dev = to_pci_dev(dev); bool is_bridge; u64 addr; /* * pci_is_bridge() is not suitable here, because pci_dev->subordinate * is set only after acpi_pci_find_device() has been called for the * given device. */ is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; /* Please ref to ACPI spec for the syntax of _ADR */ addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge); if (!*handle) return -ENODEV; return 0; } static void pci_acpi_setup(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); acpi_handle handle = ACPI_HANDLE(dev); struct acpi_device *adev; if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid) return; device_set_wakeup_capable(dev, true); acpi_pci_sleep_wake(pci_dev, false); pci_acpi_add_pm_notifier(adev, pci_dev); if (adev->wakeup.flags.run_wake) device_set_run_wake(dev, true); } static void pci_acpi_cleanup(struct device *dev) { acpi_handle handle = ACPI_HANDLE(dev); struct acpi_device *adev; if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) { device_set_wakeup_capable(dev, false); device_set_run_wake(dev, false); pci_acpi_remove_pm_notifier(adev); } } static bool pci_acpi_bus_match(struct device *dev) { return dev->bus == &pci_bus_type; } static struct acpi_bus_type acpi_pci_bus = { .name = "PCI", .match = pci_acpi_bus_match, .find_device = acpi_pci_find_device, .setup = pci_acpi_setup, .cleanup = pci_acpi_cleanup, }; static int __init acpi_pci_init(void) { int ret; if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) { printk(KERN_INFO"ACPI FADT declares the system doesn't support MSI, so disable it\n"); pci_no_msi(); } if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); pcie_no_aspm(); } ret = register_acpi_bus_type(&acpi_pci_bus); if (ret) return 0; pci_set_platform_pm(&acpi_pci_platform_pm); acpi_pci_slot_init(); acpiphp_init(); return 0; } arch_initcall(acpi_pci_init);
gpl-2.0
Hacker432-Y550/android_kernel_huawei_msm8916
net/netfilter/nf_conntrack_proto_gre.c
2565
12576
/* * ip_conntrack_proto_gre.c - Version 3.0 * * Connection tracking protocol helper module for GRE. * * GRE is a generic encapsulation protocol, which is generally not very * suited for NAT, as it has no protocol-specific part as port numbers. * * It has an optional key field, which may help us distinguishing two * connections between the same two hosts. * * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784 * * PPTP is built on top of a modified version of GRE, and has a mandatory * field called "CallID", which serves us for the same purpose as the key * field in plain GRE. * * Documentation about PPTP can be found in RFC 2637 * * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org> * * Development of this code funded by Astaro AG (http://www.astaro.com/) * * (C) 2006-2012 Patrick McHardy <kaber@trash.net> */ #include <linux/module.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/list.h> #include <linux/seq_file.h> #include <linux/in.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/dst.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_core.h> #include <linux/netfilter/nf_conntrack_proto_gre.h> #include <linux/netfilter/nf_conntrack_pptp.h> enum grep_conntrack { GRE_CT_UNREPLIED, GRE_CT_REPLIED, GRE_CT_MAX }; static unsigned int gre_timeouts[GRE_CT_MAX] = { [GRE_CT_UNREPLIED] = 30*HZ, [GRE_CT_REPLIED] = 180*HZ, }; static int proto_gre_net_id __read_mostly; struct netns_proto_gre { struct nf_proto_net nf; rwlock_t keymap_lock; struct list_head keymap_list; unsigned int gre_timeouts[GRE_CT_MAX]; }; static inline struct netns_proto_gre *gre_pernet(struct net *net) { return net_generic(net, proto_gre_net_id); } void nf_ct_gre_keymap_flush(struct net *net) { struct netns_proto_gre *net_gre = gre_pernet(net); struct nf_ct_gre_keymap *km, *tmp; write_lock_bh(&net_gre->keymap_lock); list_for_each_entry_safe(km, tmp, &net_gre->keymap_list, list) { list_del(&km->list); kfree(km); } write_unlock_bh(&net_gre->keymap_lock); } EXPORT_SYMBOL(nf_ct_gre_keymap_flush); static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km, const struct nf_conntrack_tuple *t) { return km->tuple.src.l3num == t->src.l3num && !memcmp(&km->tuple.src.u3, &t->src.u3, sizeof(t->src.u3)) && !memcmp(&km->tuple.dst.u3, &t->dst.u3, sizeof(t->dst.u3)) && km->tuple.dst.protonum == t->dst.protonum && km->tuple.dst.u.all == t->dst.u.all; } /* look up the source key for a given tuple */ static __be16 gre_keymap_lookup(struct net *net, struct nf_conntrack_tuple *t) { struct netns_proto_gre *net_gre = gre_pernet(net); struct nf_ct_gre_keymap *km; __be16 key = 0; read_lock_bh(&net_gre->keymap_lock); list_for_each_entry(km, &net_gre->keymap_list, list) { if (gre_key_cmpfn(km, t)) { key = km->tuple.src.u.gre.key; break; } } read_unlock_bh(&net_gre->keymap_lock); pr_debug("lookup src key 0x%x for ", key); nf_ct_dump_tuple(t); return key; } /* add a single keymap entry, associate with specified master ct */ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, struct nf_conntrack_tuple *t) { struct net *net = nf_ct_net(ct); struct netns_proto_gre *net_gre = gre_pernet(net); struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct); struct nf_ct_gre_keymap **kmp, *km; kmp = &ct_pptp_info->keymap[dir]; if (*kmp) { /* check whether it's a retransmission */ read_lock_bh(&net_gre->keymap_lock); list_for_each_entry(km, &net_gre->keymap_list, list) { if (gre_key_cmpfn(km, t) && km == *kmp) { read_unlock_bh(&net_gre->keymap_lock); return 0; } } read_unlock_bh(&net_gre->keymap_lock); pr_debug("trying to override keymap_%s for ct %p\n", dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct); return -EEXIST; } km = kmalloc(sizeof(*km), GFP_ATOMIC); if (!km) return -ENOMEM; memcpy(&km->tuple, t, sizeof(*t)); *kmp = km; pr_debug("adding new entry %p: ", km); nf_ct_dump_tuple(&km->tuple); write_lock_bh(&net_gre->keymap_lock); list_add_tail(&km->list, &net_gre->keymap_list); write_unlock_bh(&net_gre->keymap_lock); return 0; } EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_add); /* destroy the keymap entries associated with specified master ct */ void nf_ct_gre_keymap_destroy(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); struct netns_proto_gre *net_gre = gre_pernet(net); struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct); enum ip_conntrack_dir dir; pr_debug("entering for ct %p\n", ct); write_lock_bh(&net_gre->keymap_lock); for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) { if (ct_pptp_info->keymap[dir]) { pr_debug("removing %p from list\n", ct_pptp_info->keymap[dir]); list_del(&ct_pptp_info->keymap[dir]->list); kfree(ct_pptp_info->keymap[dir]); ct_pptp_info->keymap[dir] = NULL; } } write_unlock_bh(&net_gre->keymap_lock); } EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_destroy); /* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */ /* invert gre part of tuple */ static bool gre_invert_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig) { tuple->dst.u.gre.key = orig->src.u.gre.key; tuple->src.u.gre.key = orig->dst.u.gre.key; return true; } /* gre hdr info to tuple */ static bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct nf_conntrack_tuple *tuple) { struct net *net = dev_net(skb->dev ? skb->dev : skb_dst(skb)->dev); const struct gre_hdr_pptp *pgrehdr; struct gre_hdr_pptp _pgrehdr; __be16 srckey; const struct gre_hdr *grehdr; struct gre_hdr _grehdr; /* first only delinearize old RFC1701 GRE header */ grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr); if (!grehdr || grehdr->version != GRE_VERSION_PPTP) { /* try to behave like "nf_conntrack_proto_generic" */ tuple->src.u.all = 0; tuple->dst.u.all = 0; return true; } /* PPTP header is variable length, only need up to the call_id field */ pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr); if (!pgrehdr) return true; if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) { pr_debug("GRE_VERSION_PPTP but unknown proto\n"); return false; } tuple->dst.u.gre.key = pgrehdr->call_id; srckey = gre_keymap_lookup(net, tuple); tuple->src.u.gre.key = srckey; return true; } /* print gre part of tuple */ static int gre_print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple) { return seq_printf(s, "srckey=0x%x dstkey=0x%x ", ntohs(tuple->src.u.gre.key), ntohs(tuple->dst.u.gre.key)); } /* print private data for conntrack */ static int gre_print_conntrack(struct seq_file *s, struct nf_conn *ct) { return seq_printf(s, "timeout=%u, stream_timeout=%u ", (ct->proto.gre.timeout / HZ), (ct->proto.gre.stream_timeout / HZ)); } static unsigned int *gre_get_timeouts(struct net *net) { return gre_pernet(net)->gre_timeouts; } /* Returns verdict for packet, and may modify conntrack */ static int gre_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeouts) { /* If we've seen traffic both ways, this is a GRE connection. * Extend timeout. */ if (ct->status & IPS_SEEN_REPLY) { nf_ct_refresh_acct(ct, ctinfo, skb, ct->proto.gre.stream_timeout); /* Also, more likely to be important, and not a probe. */ if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) nf_conntrack_event_cache(IPCT_ASSURED, ct); } else nf_ct_refresh_acct(ct, ctinfo, skb, ct->proto.gre.timeout); return NF_ACCEPT; } /* Called when a new connection for this protocol found. */ static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { pr_debug(": "); nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); /* initialize to sane value. Ideally a conntrack helper * (e.g. in case of pptp) is increasing them */ ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED]; ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED]; return true; } /* Called when a conntrack entry has already been removed from the hashes * and is about to be deleted from memory */ static void gre_destroy(struct nf_conn *ct) { struct nf_conn *master = ct->master; pr_debug(" entering\n"); if (!master) pr_debug("no master !?!\n"); else nf_ct_gre_keymap_destroy(master); } #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_cttimeout.h> static int gre_timeout_nlattr_to_obj(struct nlattr *tb[], struct net *net, void *data) { unsigned int *timeouts = data; struct netns_proto_gre *net_gre = gre_pernet(net); /* set default timeouts for GRE. */ timeouts[GRE_CT_UNREPLIED] = net_gre->gre_timeouts[GRE_CT_UNREPLIED]; timeouts[GRE_CT_REPLIED] = net_gre->gre_timeouts[GRE_CT_REPLIED]; if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) { timeouts[GRE_CT_UNREPLIED] = ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_UNREPLIED])) * HZ; } if (tb[CTA_TIMEOUT_GRE_REPLIED]) { timeouts[GRE_CT_REPLIED] = ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_REPLIED])) * HZ; } return 0; } static int gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; if (nla_put_be32(skb, CTA_TIMEOUT_GRE_UNREPLIED, htonl(timeouts[GRE_CT_UNREPLIED] / HZ)) || nla_put_be32(skb, CTA_TIMEOUT_GRE_REPLIED, htonl(timeouts[GRE_CT_REPLIED] / HZ))) goto nla_put_failure; return 0; nla_put_failure: return -ENOSPC; } static const struct nla_policy gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = { [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 }, [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 }, }; #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ static int gre_init_net(struct net *net, u_int16_t proto) { struct netns_proto_gre *net_gre = gre_pernet(net); int i; rwlock_init(&net_gre->keymap_lock); INIT_LIST_HEAD(&net_gre->keymap_list); for (i = 0; i < GRE_CT_MAX; i++) net_gre->gre_timeouts[i] = gre_timeouts[i]; return 0; } /* protocol helper struct */ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = { .l3proto = AF_INET, .l4proto = IPPROTO_GRE, .name = "gre", .pkt_to_tuple = gre_pkt_to_tuple, .invert_tuple = gre_invert_tuple, .print_tuple = gre_print_tuple, .print_conntrack = gre_print_conntrack, .get_timeouts = gre_get_timeouts, .packet = gre_packet, .new = gre_new, .destroy = gre_destroy, .me = THIS_MODULE, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nla_policy = nf_ct_port_nla_policy, #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { .nlattr_to_obj = gre_timeout_nlattr_to_obj, .obj_to_nlattr = gre_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_GRE_MAX, .obj_size = sizeof(unsigned int) * GRE_CT_MAX, .nla_policy = gre_timeout_nla_policy, }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .net_id = &proto_gre_net_id, .init_net = gre_init_net, }; static int proto_gre_net_init(struct net *net) { int ret = 0; ret = nf_ct_l4proto_pernet_register(net, &nf_conntrack_l4proto_gre4); if (ret < 0) pr_err("nf_conntrack_gre4: pernet registration failed.\n"); return ret; } static void proto_gre_net_exit(struct net *net) { nf_ct_l4proto_pernet_unregister(net, &nf_conntrack_l4proto_gre4); nf_ct_gre_keymap_flush(net); } static struct pernet_operations proto_gre_net_ops = { .init = proto_gre_net_init, .exit = proto_gre_net_exit, .id = &proto_gre_net_id, .size = sizeof(struct netns_proto_gre), }; static int __init nf_ct_proto_gre_init(void) { int ret; ret = register_pernet_subsys(&proto_gre_net_ops); if (ret < 0) goto out_pernet; ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4); if (ret < 0) goto out_gre4; return 0; out_gre4: unregister_pernet_subsys(&proto_gre_net_ops); out_pernet: return ret; } static void __exit nf_ct_proto_gre_fini(void) { nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4); unregister_pernet_subsys(&proto_gre_net_ops); } module_init(nf_ct_proto_gre_init); module_exit(nf_ct_proto_gre_fini); MODULE_LICENSE("GPL");
gpl-2.0
ShevT/kernel_asus_tf101g_eos4
arch/arm/mach-integrator/impd1.c
3077
10567
/* * linux/arch/arm/mach-integrator/impd1.c * * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file provides the core support for the IM-PD1 module. * * Module / boot parameters. * lmid=n impd1.lmid=n - set the logic module position in stack to 'n' */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/amba/bus.h> #include <linux/amba/clcd.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/clkdev.h> #include <asm/hardware/icst.h> #include <mach/lm.h> #include <mach/impd1.h> #include <asm/sizes.h> static int module_id; module_param_named(lmid, module_id, int, 0444); MODULE_PARM_DESC(lmid, "logic module stack position"); struct impd1_module { void __iomem *base; struct clk vcos[2]; struct clk_lookup *clks[3]; }; static const struct icst_params impd1_vco_params = { .ref = 24000000, /* 24 MHz */ .vco_max = ICST525_VCO_MAX_3V, .vco_min = ICST525_VCO_MIN, .vd_min = 12, .vd_max = 519, .rd_min = 3, .rd_max = 120, .s2div = icst525_s2div, .idx2s = icst525_idx2s, }; static void impd1_setvco(struct clk *clk, struct icst_vco vco) { struct impd1_module *impd1 = clk->data; u32 val = vco.v | (vco.r << 9) | (vco.s << 16); writel(0xa05f, impd1->base + IMPD1_LOCK); writel(val, clk->vcoreg); writel(0, impd1->base + IMPD1_LOCK); #ifdef DEBUG vco.v = val & 0x1ff; vco.r = (val >> 9) & 0x7f; vco.s = (val >> 16) & 7; pr_debug("IM-PD1: VCO%d clock is %ld Hz\n", vconr, icst525_hz(&impd1_vco_params, vco)); #endif } static const struct clk_ops impd1_clk_ops = { .round = icst_clk_round, .set = icst_clk_set, .setvco = impd1_setvco, }; void impd1_tweak_control(struct device *dev, u32 mask, u32 val) { struct impd1_module *impd1 = dev_get_drvdata(dev); u32 cur; val &= mask; cur = readl(impd1->base + IMPD1_CTRL) & ~mask; writel(cur | val, impd1->base + IMPD1_CTRL); } EXPORT_SYMBOL(impd1_tweak_control); /* * CLCD support */ #define PANEL PROSPECTOR #define LTM10C209 1 #define PROSPECTOR 2 #define SVGA 3 #define VGA 4 #if PANEL == VGA #define PANELTYPE vga static struct clcd_panel vga = { .mode = { .name = "VGA", .refresh = 60, .xres = 640, .yres = 480, .pixclock = 39721, .left_margin = 40, .right_margin = 24, .upper_margin = 32, .lower_margin = 11, .hsync_len = 96, .vsync_len = 2, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, .width = -1, .height = -1, .tim2 = TIM2_BCD | TIM2_IPC, .cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1), .caps = CLCD_CAP_5551, .connector = IMPD1_CTRL_DISP_VGA, .bpp = 16, .grayscale = 0, }; #elif PANEL == SVGA #define PANELTYPE svga static struct clcd_panel svga = { .mode = { .name = "SVGA", .refresh = 0, .xres = 800, .yres = 600, .pixclock = 27778, .left_margin = 20, .right_margin = 20, .upper_margin = 5, .lower_margin = 5, .hsync_len = 164, .vsync_len = 62, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, .width = -1, .height = -1, .tim2 = TIM2_BCD, .cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1), .connector = IMPD1_CTRL_DISP_VGA, .caps = CLCD_CAP_5551, .bpp = 16, .grayscale = 0, }; #elif PANEL == PROSPECTOR #define PANELTYPE prospector static struct clcd_panel prospector = { .mode = { .name = "PROSPECTOR", .refresh = 0, .xres = 640, .yres = 480, .pixclock = 40000, .left_margin = 33, .right_margin = 64, .upper_margin = 36, .lower_margin = 7, .hsync_len = 64, .vsync_len = 25, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED, }, .width = -1, .height = -1, .tim2 = TIM2_BCD, .cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1), .caps = CLCD_CAP_5551, .fixedtimings = 1, .connector = IMPD1_CTRL_DISP_LCD, .bpp = 16, .grayscale = 0, }; #elif PANEL == LTM10C209 #define PANELTYPE ltm10c209 /* * Untested. */ static struct clcd_panel ltm10c209 = { .mode = { .name = "LTM10C209", .refresh = 0, .xres = 640, .yres = 480, .pixclock = 40000, .left_margin = 20, .right_margin = 20, .upper_margin = 19, .lower_margin = 19, .hsync_len = 20, .vsync_len = 10, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED, }, .width = -1, .height = -1, .tim2 = TIM2_BCD, .cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1), .caps = CLCD_CAP_5551, .fixedtimings = 1, .connector = IMPD1_CTRL_DISP_LCD, .bpp = 16, .grayscale = 0, }; #endif /* * Disable all display connectors on the interface module. */ static void impd1fb_clcd_disable(struct clcd_fb *fb) { impd1_tweak_control(fb->dev->dev.parent, IMPD1_CTRL_DISP_MASK, 0); } /* * Enable the relevant connector on the interface module. */ static void impd1fb_clcd_enable(struct clcd_fb *fb) { impd1_tweak_control(fb->dev->dev.parent, IMPD1_CTRL_DISP_MASK, fb->panel->connector | IMPD1_CTRL_DISP_ENABLE); } static int impd1fb_clcd_setup(struct clcd_fb *fb) { unsigned long framebase = fb->dev->res.start + 0x01000000; unsigned long framesize = SZ_1M; int ret = 0; fb->panel = &PANELTYPE; if (!request_mem_region(framebase, framesize, "clcd framebuffer")) { printk(KERN_ERR "IM-PD1: unable to reserve framebuffer\n"); return -EBUSY; } fb->fb.screen_base = ioremap(framebase, framesize); if (!fb->fb.screen_base) { printk(KERN_ERR "IM-PD1: unable to map framebuffer\n"); ret = -ENOMEM; goto free_buffer; } fb->fb.fix.smem_start = framebase; fb->fb.fix.smem_len = framesize; return 0; free_buffer: release_mem_region(framebase, framesize); return ret; } static int impd1fb_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma) { unsigned long start, size; start = vma->vm_pgoff + (fb->fb.fix.smem_start >> PAGE_SHIFT); size = vma->vm_end - vma->vm_start; return remap_pfn_range(vma, vma->vm_start, start, size, vma->vm_page_prot); } static void impd1fb_clcd_remove(struct clcd_fb *fb) { iounmap(fb->fb.screen_base); release_mem_region(fb->fb.fix.smem_start, fb->fb.fix.smem_len); } static struct clcd_board impd1_clcd_data = { .name = "IM-PD/1", .caps = CLCD_CAP_5551 | CLCD_CAP_888, .check = clcdfb_check, .decode = clcdfb_decode, .disable = impd1fb_clcd_disable, .enable = impd1fb_clcd_enable, .setup = impd1fb_clcd_setup, .mmap = impd1fb_clcd_mmap, .remove = impd1fb_clcd_remove, }; struct impd1_device { unsigned long offset; unsigned int irq[2]; unsigned int id; void *platform_data; }; static struct impd1_device impd1_devs[] = { { .offset = 0x03000000, .id = 0x00041190, }, { .offset = 0x00100000, .irq = { 1 }, .id = 0x00141011, }, { .offset = 0x00200000, .irq = { 2 }, .id = 0x00141011, }, { .offset = 0x00300000, .irq = { 3 }, .id = 0x00041022, }, { .offset = 0x00400000, .irq = { 4 }, .id = 0x00041061, }, { .offset = 0x00500000, .irq = { 5 }, .id = 0x00041061, }, { .offset = 0x00600000, .irq = { 6 }, .id = 0x00041130, }, { .offset = 0x00700000, .irq = { 7, 8 }, .id = 0x00041181, }, { .offset = 0x00800000, .irq = { 9 }, .id = 0x00041041, }, { .offset = 0x01000000, .irq = { 11 }, .id = 0x00041110, .platform_data = &impd1_clcd_data, } }; static struct clk fixed_14745600 = { .rate = 14745600, }; static int impd1_probe(struct lm_device *dev) { struct impd1_module *impd1; int i, ret; if (dev->id != module_id) return -EINVAL; if (!request_mem_region(dev->resource.start, SZ_4K, "LM registers")) return -EBUSY; impd1 = kzalloc(sizeof(struct impd1_module), GFP_KERNEL); if (!impd1) { ret = -ENOMEM; goto release_lm; } impd1->base = ioremap(dev->resource.start, SZ_4K); if (!impd1->base) { ret = -ENOMEM; goto free_impd1; } lm_set_drvdata(dev, impd1); printk("IM-PD1 found at 0x%08lx\n", (unsigned long)dev->resource.start); for (i = 0; i < ARRAY_SIZE(impd1->vcos); i++) { impd1->vcos[i].ops = &impd1_clk_ops, impd1->vcos[i].owner = THIS_MODULE, impd1->vcos[i].params = &impd1_vco_params, impd1->vcos[i].data = impd1; } impd1->vcos[0].vcoreg = impd1->base + IMPD1_OSC1; impd1->vcos[1].vcoreg = impd1->base + IMPD1_OSC2; impd1->clks[0] = clkdev_alloc(&impd1->vcos[0], NULL, "lm%x:01000", dev->id); impd1->clks[1] = clkdev_alloc(&fixed_14745600, NULL, "lm%x:00100", dev->id); impd1->clks[2] = clkdev_alloc(&fixed_14745600, NULL, "lm%x:00200", dev->id); for (i = 0; i < ARRAY_SIZE(impd1->clks); i++) clkdev_add(impd1->clks[i]); for (i = 0; i < ARRAY_SIZE(impd1_devs); i++) { struct impd1_device *idev = impd1_devs + i; struct amba_device *d; unsigned long pc_base; pc_base = dev->resource.start + idev->offset; d = kzalloc(sizeof(struct amba_device), GFP_KERNEL); if (!d) continue; dev_set_name(&d->dev, "lm%x:%5.5lx", dev->id, idev->offset >> 12); d->dev.parent = &dev->dev; d->res.start = dev->resource.start + idev->offset; d->res.end = d->res.start + SZ_4K - 1; d->res.flags = IORESOURCE_MEM; d->irq[0] = dev->irq; d->irq[1] = dev->irq; d->periphid = idev->id; d->dev.platform_data = idev->platform_data; ret = amba_device_register(d, &dev->resource); if (ret) { dev_err(&d->dev, "unable to register device: %d\n", ret); kfree(d); } } return 0; free_impd1: if (impd1 && impd1->base) iounmap(impd1->base); kfree(impd1); release_lm: release_mem_region(dev->resource.start, SZ_4K); return ret; } static int impd1_remove_one(struct device *dev, void *data) { device_unregister(dev); return 0; } static void impd1_remove(struct lm_device *dev) { struct impd1_module *impd1 = lm_get_drvdata(dev); int i; device_for_each_child(&dev->dev, NULL, impd1_remove_one); for (i = 0; i < ARRAY_SIZE(impd1->clks); i++) clkdev_drop(impd1->clks[i]); lm_set_drvdata(dev, NULL); iounmap(impd1->base); kfree(impd1); release_mem_region(dev->resource.start, SZ_4K); } static struct lm_driver impd1_driver = { .drv = { .name = "impd1", }, .probe = impd1_probe, .remove = impd1_remove, }; static int __init impd1_init(void) { return lm_driver_register(&impd1_driver); } static void __exit impd1_exit(void) { lm_driver_unregister(&impd1_driver); } module_init(impd1_init); module_exit(impd1_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Integrator/IM-PD1 logic module core driver"); MODULE_AUTHOR("Deep Blue Solutions Ltd");
gpl-2.0
aeroevan/htc_kernel_msm7x30
drivers/w1/masters/ds2490.c
3589
24284
/* * dscore.c * * Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/usb.h> #include <linux/slab.h> #include "../w1_int.h" #include "../w1.h" /* COMMAND TYPE CODES */ #define CONTROL_CMD 0x00 #define COMM_CMD 0x01 #define MODE_CMD 0x02 /* CONTROL COMMAND CODES */ #define CTL_RESET_DEVICE 0x0000 #define CTL_START_EXE 0x0001 #define CTL_RESUME_EXE 0x0002 #define CTL_HALT_EXE_IDLE 0x0003 #define CTL_HALT_EXE_DONE 0x0004 #define CTL_FLUSH_COMM_CMDS 0x0007 #define CTL_FLUSH_RCV_BUFFER 0x0008 #define CTL_FLUSH_XMT_BUFFER 0x0009 #define CTL_GET_COMM_CMDS 0x000A /* MODE COMMAND CODES */ #define MOD_PULSE_EN 0x0000 #define MOD_SPEED_CHANGE_EN 0x0001 #define MOD_1WIRE_SPEED 0x0002 #define MOD_STRONG_PU_DURATION 0x0003 #define MOD_PULLDOWN_SLEWRATE 0x0004 #define MOD_PROG_PULSE_DURATION 0x0005 #define MOD_WRITE1_LOWTIME 0x0006 #define MOD_DSOW0_TREC 0x0007 /* COMMUNICATION COMMAND CODES */ #define COMM_ERROR_ESCAPE 0x0601 #define COMM_SET_DURATION 0x0012 #define COMM_BIT_IO 0x0020 #define COMM_PULSE 0x0030 #define COMM_1_WIRE_RESET 0x0042 #define COMM_BYTE_IO 0x0052 #define COMM_MATCH_ACCESS 0x0064 #define COMM_BLOCK_IO 0x0074 #define COMM_READ_STRAIGHT 0x0080 #define COMM_DO_RELEASE 0x6092 #define COMM_SET_PATH 0x00A2 #define COMM_WRITE_SRAM_PAGE 0x00B2 #define COMM_WRITE_EPROM 0x00C4 #define COMM_READ_CRC_PROT_PAGE 0x00D4 #define COMM_READ_REDIRECT_PAGE_CRC 0x21E4 #define COMM_SEARCH_ACCESS 0x00F4 /* Communication command bits */ #define COMM_TYPE 0x0008 #define COMM_SE 0x0008 #define COMM_D 0x0008 #define COMM_Z 0x0008 #define COMM_CH 0x0008 #define COMM_SM 0x0008 #define COMM_R 0x0008 #define COMM_IM 0x0001 #define COMM_PS 0x4000 #define COMM_PST 0x4000 #define COMM_CIB 0x4000 #define COMM_RTS 0x4000 #define COMM_DT 0x2000 #define COMM_SPU 0x1000 #define COMM_F 0x0800 #define COMM_NTF 0x0400 #define COMM_ICP 0x0200 #define COMM_RST 0x0100 #define PULSE_PROG 0x01 #define PULSE_SPUE 0x02 #define BRANCH_MAIN 0xCC #define BRANCH_AUX 0x33 /* Status flags */ #define ST_SPUA 0x01 /* Strong Pull-up is active */ #define ST_PRGA 0x02 /* 12V programming pulse is being generated */ #define ST_12VP 0x04 /* external 12V programming voltage is present */ #define ST_PMOD 0x08 /* DS2490 powered from USB and external sources */ #define ST_HALT 0x10 /* DS2490 is currently halted */ #define ST_IDLE 0x20 /* DS2490 is currently idle */ #define ST_EPOF 0x80 /* Result Register flags */ #define RR_DETECT 0xA5 /* New device detected */ #define RR_NRS 0x01 /* Reset no presence or ... */ #define RR_SH 0x02 /* short on reset or set path */ #define RR_APP 0x04 /* alarming presence on reset */ #define RR_VPP 0x08 /* 12V expected not seen */ #define RR_CMP 0x10 /* compare error */ #define RR_CRC 0x20 /* CRC error detected */ #define RR_RDP 0x40 /* redirected page */ #define RR_EOS 0x80 /* end of search error */ #define SPEED_NORMAL 0x00 #define SPEED_FLEXIBLE 0x01 #define SPEED_OVERDRIVE 0x02 #define NUM_EP 4 #define EP_CONTROL 0 #define EP_STATUS 1 #define EP_DATA_OUT 2 #define EP_DATA_IN 3 struct ds_device { struct list_head ds_entry; struct usb_device *udev; struct usb_interface *intf; int ep[NUM_EP]; /* Strong PullUp * 0: pullup not active, else duration in milliseconds */ int spu_sleep; /* spu_bit contains COMM_SPU or 0 depending on if the strong pullup * should be active or not for writes. */ u16 spu_bit; struct w1_bus_master master; }; struct ds_status { u8 enable; u8 speed; u8 pullup_dur; u8 ppuls_dur; u8 pulldown_slew; u8 write1_time; u8 write0_time; u8 reserved0; u8 status; u8 command0; u8 command1; u8 command_buffer_status; u8 data_out_buffer_status; u8 data_in_buffer_status; u8 reserved1; u8 reserved2; }; static struct usb_device_id ds_id_table [] = { { USB_DEVICE(0x04fa, 0x2490) }, { }, }; MODULE_DEVICE_TABLE(usb, ds_id_table); static int ds_probe(struct usb_interface *, const struct usb_device_id *); static void ds_disconnect(struct usb_interface *); static int ds_send_control(struct ds_device *, u16, u16); static int ds_send_control_cmd(struct ds_device *, u16, u16); static LIST_HEAD(ds_devices); static DEFINE_MUTEX(ds_mutex); static struct usb_driver ds_driver = { .name = "DS9490R", .probe = ds_probe, .disconnect = ds_disconnect, .id_table = ds_id_table, }; static int ds_send_control_cmd(struct ds_device *dev, u16 value, u16 index) { int err; err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), CONTROL_CMD, 0x40, value, index, NULL, 0, 1000); if (err < 0) { printk(KERN_ERR "Failed to send command control message %x.%x: err=%d.\n", value, index, err); return err; } return err; } static int ds_send_control_mode(struct ds_device *dev, u16 value, u16 index) { int err; err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), MODE_CMD, 0x40, value, index, NULL, 0, 1000); if (err < 0) { printk(KERN_ERR "Failed to send mode control message %x.%x: err=%d.\n", value, index, err); return err; } return err; } static int ds_send_control(struct ds_device *dev, u16 value, u16 index) { int err; err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, dev->ep[EP_CONTROL]), COMM_CMD, 0x40, value, index, NULL, 0, 1000); if (err < 0) { printk(KERN_ERR "Failed to send control message %x.%x: err=%d.\n", value, index, err); return err; } return err; } static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st, unsigned char *buf, int size) { int count, err; memset(st, 0, sizeof(*st)); count = 0; err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_STATUS]), buf, size, &count, 100); if (err < 0) { printk(KERN_ERR "Failed to read 1-wire data from 0x%x: err=%d.\n", dev->ep[EP_STATUS], err); return err; } if (count >= sizeof(*st)) memcpy(st, buf, sizeof(*st)); return count; } static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off) { printk(KERN_INFO "%45s: %8x\n", str, buf[off]); } static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count) { int i; printk(KERN_INFO "0x%x: count=%d, status: ", dev->ep[EP_STATUS], count); for (i=0; i<count; ++i) printk("%02x ", buf[i]); printk(KERN_INFO "\n"); if (count >= 16) { ds_print_msg(buf, "enable flag", 0); ds_print_msg(buf, "1-wire speed", 1); ds_print_msg(buf, "strong pullup duration", 2); ds_print_msg(buf, "programming pulse duration", 3); ds_print_msg(buf, "pulldown slew rate control", 4); ds_print_msg(buf, "write-1 low time", 5); ds_print_msg(buf, "data sample offset/write-0 recovery time", 6); ds_print_msg(buf, "reserved (test register)", 7); ds_print_msg(buf, "device status flags", 8); ds_print_msg(buf, "communication command byte 1", 9); ds_print_msg(buf, "communication command byte 2", 10); ds_print_msg(buf, "communication command buffer status", 11); ds_print_msg(buf, "1-wire data output buffer status", 12); ds_print_msg(buf, "1-wire data input buffer status", 13); ds_print_msg(buf, "reserved", 14); ds_print_msg(buf, "reserved", 15); } for (i = 16; i < count; ++i) { if (buf[i] == RR_DETECT) { ds_print_msg(buf, "new device detect", i); continue; } ds_print_msg(buf, "Result Register Value: ", i); if (buf[i] & RR_NRS) printk(KERN_INFO "NRS: Reset no presence or ...\n"); if (buf[i] & RR_SH) printk(KERN_INFO "SH: short on reset or set path\n"); if (buf[i] & RR_APP) printk(KERN_INFO "APP: alarming presence on reset\n"); if (buf[i] & RR_VPP) printk(KERN_INFO "VPP: 12V expected not seen\n"); if (buf[i] & RR_CMP) printk(KERN_INFO "CMP: compare error\n"); if (buf[i] & RR_CRC) printk(KERN_INFO "CRC: CRC error detected\n"); if (buf[i] & RR_RDP) printk(KERN_INFO "RDP: redirected page\n"); if (buf[i] & RR_EOS) printk(KERN_INFO "EOS: end of search error\n"); } } static void ds_reset_device(struct ds_device *dev) { ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0); /* Always allow strong pullup which allow individual writes to use * the strong pullup. */ if (ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_SPUE)) printk(KERN_ERR "ds_reset_device: " "Error allowing strong pullup\n"); /* Chip strong pullup time was cleared. */ if (dev->spu_sleep) { /* lower 4 bits are 0, see ds_set_pullup */ u8 del = dev->spu_sleep>>4; if (ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del)) printk(KERN_ERR "ds_reset_device: " "Error setting duration\n"); } } static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size) { int count, err; struct ds_status st; /* Careful on size. If size is less than what is available in * the input buffer, the device fails the bulk transfer and * clears the input buffer. It could read the maximum size of * the data buffer, but then do you return the first, last, or * some set of the middle size bytes? As long as the rest of * the code is correct there will be size bytes waiting. A * call to ds_wait_status will wait until the device is idle * and any data to be received would have been available. */ count = 0; err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]), buf, size, &count, 1000); if (err < 0) { u8 buf[0x20]; int count; printk(KERN_INFO "Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]); usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN])); count = ds_recv_status_nodump(dev, &st, buf, sizeof(buf)); ds_dump_status(dev, buf, count); return err; } #if 0 { int i; printk("%s: count=%d: ", __func__, count); for (i=0; i<count; ++i) printk("%02x ", buf[i]); printk("\n"); } #endif return count; } static int ds_send_data(struct ds_device *dev, unsigned char *buf, int len) { int count, err; count = 0; err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->ep[EP_DATA_OUT]), buf, len, &count, 1000); if (err < 0) { printk(KERN_ERR "Failed to write 1-wire data to ep0x%x: " "err=%d.\n", dev->ep[EP_DATA_OUT], err); return err; } return err; } #if 0 int ds_stop_pulse(struct ds_device *dev, int limit) { struct ds_status st; int count = 0, err = 0; u8 buf[0x20]; do { err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0); if (err) break; err = ds_send_control(dev, CTL_RESUME_EXE, 0); if (err) break; err = ds_recv_status_nodump(dev, &st, buf, sizeof(buf)); if (err) break; if ((st.status & ST_SPUA) == 0) { err = ds_send_control_mode(dev, MOD_PULSE_EN, 0); if (err) break; } } while(++count < limit); return err; } int ds_detect(struct ds_device *dev, struct ds_status *st) { int err; err = ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0); if (err) return err; err = ds_send_control(dev, COMM_SET_DURATION | COMM_IM, 0); if (err) return err; err = ds_send_control(dev, COMM_SET_DURATION | COMM_IM | COMM_TYPE, 0x40); if (err) return err; err = ds_send_control_mode(dev, MOD_PULSE_EN, PULSE_PROG); if (err) return err; err = ds_dump_status(dev, st); return err; } #endif /* 0 */ static int ds_wait_status(struct ds_device *dev, struct ds_status *st) { u8 buf[0x20]; int err, count = 0; do { err = ds_recv_status_nodump(dev, st, buf, sizeof(buf)); #if 0 if (err >= 0) { int i; printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err); for (i=0; i<err; ++i) printk("%02x ", buf[i]); printk("\n"); } #endif } while (!(buf[0x08] & ST_IDLE) && !(err < 0) && ++count < 100); if (err >= 16 && st->status & ST_EPOF) { printk(KERN_INFO "Resetting device after ST_EPOF.\n"); ds_reset_device(dev); /* Always dump the device status. */ count = 101; } /* Dump the status for errors or if there is extended return data. * The extended status includes new device detection (maybe someone * can do something with it). */ if (err > 16 || count >= 100 || err < 0) ds_dump_status(dev, buf, err); /* Extended data isn't an error. Well, a short is, but the dump * would have already told the user that and we can't do anything * about it in software anyway. */ if (count >= 100 || err < 0) return -1; else return 0; } static int ds_reset(struct ds_device *dev) { int err; /* Other potentionally interesting flags for reset. * * COMM_NTF: Return result register feedback. This could be used to * detect some conditions such as short, alarming presence, or * detect if a new device was detected. * * COMM_SE which allows SPEED_NORMAL, SPEED_FLEXIBLE, SPEED_OVERDRIVE: * Select the data transfer rate. */ err = ds_send_control(dev, COMM_1_WIRE_RESET | COMM_IM, SPEED_NORMAL); if (err) return err; return 0; } #if 0 static int ds_set_speed(struct ds_device *dev, int speed) { int err; if (speed != SPEED_NORMAL && speed != SPEED_FLEXIBLE && speed != SPEED_OVERDRIVE) return -EINVAL; if (speed != SPEED_OVERDRIVE) speed = SPEED_FLEXIBLE; speed &= 0xff; err = ds_send_control_mode(dev, MOD_1WIRE_SPEED, speed); if (err) return err; return err; } #endif /* 0 */ static int ds_set_pullup(struct ds_device *dev, int delay) { int err = 0; u8 del = 1 + (u8)(delay >> 4); /* Just storing delay would not get the trunication and roundup. */ int ms = del<<4; /* Enable spu_bit if a delay is set. */ dev->spu_bit = delay ? COMM_SPU : 0; /* If delay is zero, it has already been disabled, if the time is * the same as the hardware was last programmed to, there is also * nothing more to do. Compare with the recalculated value ms * rather than del or delay which can have a different value. */ if (delay == 0 || ms == dev->spu_sleep) return err; err = ds_send_control(dev, COMM_SET_DURATION | COMM_IM, del); if (err) return err; dev->spu_sleep = ms; return err; } static int ds_touch_bit(struct ds_device *dev, u8 bit, u8 *tbit) { int err; struct ds_status st; err = ds_send_control(dev, COMM_BIT_IO | COMM_IM | (bit ? COMM_D : 0), 0); if (err) return err; ds_wait_status(dev, &st); err = ds_recv_data(dev, tbit, sizeof(*tbit)); if (err < 0) return err; return 0; } #if 0 static int ds_write_bit(struct ds_device *dev, u8 bit) { int err; struct ds_status st; /* Set COMM_ICP to write without a readback. Note, this will * produce one time slot, a down followed by an up with COMM_D * only determing the timing. */ err = ds_send_control(dev, COMM_BIT_IO | COMM_IM | COMM_ICP | (bit ? COMM_D : 0), 0); if (err) return err; ds_wait_status(dev, &st); return 0; } #endif static int ds_write_byte(struct ds_device *dev, u8 byte) { int err; struct ds_status st; u8 rbyte; err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM | dev->spu_bit, byte); if (err) return err; if (dev->spu_bit) msleep(dev->spu_sleep); err = ds_wait_status(dev, &st); if (err) return err; err = ds_recv_data(dev, &rbyte, sizeof(rbyte)); if (err < 0) return err; return !(byte == rbyte); } static int ds_read_byte(struct ds_device *dev, u8 *byte) { int err; struct ds_status st; err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM , 0xff); if (err) return err; ds_wait_status(dev, &st); err = ds_recv_data(dev, byte, sizeof(*byte)); if (err < 0) return err; return 0; } static int ds_read_block(struct ds_device *dev, u8 *buf, int len) { struct ds_status st; int err; if (len > 64*1024) return -E2BIG; memset(buf, 0xFF, len); err = ds_send_data(dev, buf, len); if (err < 0) return err; err = ds_send_control(dev, COMM_BLOCK_IO | COMM_IM, len); if (err) return err; ds_wait_status(dev, &st); memset(buf, 0x00, len); err = ds_recv_data(dev, buf, len); return err; } static int ds_write_block(struct ds_device *dev, u8 *buf, int len) { int err; struct ds_status st; err = ds_send_data(dev, buf, len); if (err < 0) return err; err = ds_send_control(dev, COMM_BLOCK_IO | COMM_IM | dev->spu_bit, len); if (err) return err; if (dev->spu_bit) msleep(dev->spu_sleep); ds_wait_status(dev, &st); err = ds_recv_data(dev, buf, len); if (err < 0) return err; return !(err == len); } #if 0 static int ds_search(struct ds_device *dev, u64 init, u64 *buf, u8 id_number, int conditional_search) { int err; u16 value, index; struct ds_status st; memset(buf, 0, sizeof(buf)); err = ds_send_data(ds_dev, (unsigned char *)&init, 8); if (err) return err; ds_wait_status(ds_dev, &st); value = COMM_SEARCH_ACCESS | COMM_IM | COMM_SM | COMM_F | COMM_RTS; index = (conditional_search ? 0xEC : 0xF0) | (id_number << 8); err = ds_send_control(ds_dev, value, index); if (err) return err; ds_wait_status(ds_dev, &st); err = ds_recv_data(ds_dev, (unsigned char *)buf, 8*id_number); if (err < 0) return err; return err/8; } static int ds_match_access(struct ds_device *dev, u64 init) { int err; struct ds_status st; err = ds_send_data(dev, (unsigned char *)&init, sizeof(init)); if (err) return err; ds_wait_status(dev, &st); err = ds_send_control(dev, COMM_MATCH_ACCESS | COMM_IM | COMM_RST, 0x0055); if (err) return err; ds_wait_status(dev, &st); return 0; } static int ds_set_path(struct ds_device *dev, u64 init) { int err; struct ds_status st; u8 buf[9]; memcpy(buf, &init, 8); buf[8] = BRANCH_MAIN; err = ds_send_data(dev, buf, sizeof(buf)); if (err) return err; ds_wait_status(dev, &st); err = ds_send_control(dev, COMM_SET_PATH | COMM_IM | COMM_RST, 0); if (err) return err; ds_wait_status(dev, &st); return 0; } #endif /* 0 */ static u8 ds9490r_touch_bit(void *data, u8 bit) { u8 ret; struct ds_device *dev = data; if (ds_touch_bit(dev, bit, &ret)) return 0; return ret; } #if 0 static void ds9490r_write_bit(void *data, u8 bit) { struct ds_device *dev = data; ds_write_bit(dev, bit); } static u8 ds9490r_read_bit(void *data) { struct ds_device *dev = data; int err; u8 bit = 0; err = ds_touch_bit(dev, 1, &bit); if (err) return 0; return bit & 1; } #endif static void ds9490r_write_byte(void *data, u8 byte) { struct ds_device *dev = data; ds_write_byte(dev, byte); } static u8 ds9490r_read_byte(void *data) { struct ds_device *dev = data; int err; u8 byte = 0; err = ds_read_byte(dev, &byte); if (err) return 0; return byte; } static void ds9490r_write_block(void *data, const u8 *buf, int len) { struct ds_device *dev = data; ds_write_block(dev, (u8 *)buf, len); } static u8 ds9490r_read_block(void *data, u8 *buf, int len) { struct ds_device *dev = data; int err; err = ds_read_block(dev, buf, len); if (err < 0) return 0; return len; } static u8 ds9490r_reset(void *data) { struct ds_device *dev = data; int err; err = ds_reset(dev); if (err) return 1; return 0; } static u8 ds9490r_set_pullup(void *data, int delay) { struct ds_device *dev = data; if (ds_set_pullup(dev, delay)) return 1; return 0; } static int ds_w1_init(struct ds_device *dev) { memset(&dev->master, 0, sizeof(struct w1_bus_master)); /* Reset the device as it can be in a bad state. * This is necessary because a block write will wait for data * to be placed in the output buffer and block any later * commands which will keep accumulating and the device will * not be idle. Another case is removing the ds2490 module * while a bus search is in progress, somehow a few commands * get through, but the input transfers fail leaving data in * the input buffer. This will cause the next read to fail * see the note in ds_recv_data. */ ds_reset_device(dev); dev->master.data = dev; dev->master.touch_bit = &ds9490r_touch_bit; /* read_bit and write_bit in w1_bus_master are expected to set and * sample the line level. For write_bit that means it is expected to * set it to that value and leave it there. ds2490 only supports an * individual time slot at the lowest level. The requirement from * pulling the bus state down to reading the state is 15us, something * that isn't realistic on the USB bus anyway. dev->master.read_bit = &ds9490r_read_bit; dev->master.write_bit = &ds9490r_write_bit; */ dev->master.read_byte = &ds9490r_read_byte; dev->master.write_byte = &ds9490r_write_byte; dev->master.read_block = &ds9490r_read_block; dev->master.write_block = &ds9490r_write_block; dev->master.reset_bus = &ds9490r_reset; dev->master.set_pullup = &ds9490r_set_pullup; return w1_add_master_device(&dev->master); } static void ds_w1_fini(struct ds_device *dev) { w1_remove_master_device(&dev->master); } static int ds_probe(struct usb_interface *intf, const struct usb_device_id *udev_id) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *endpoint; struct usb_host_interface *iface_desc; struct ds_device *dev; int i, err; dev = kmalloc(sizeof(struct ds_device), GFP_KERNEL); if (!dev) { printk(KERN_INFO "Failed to allocate new DS9490R structure.\n"); return -ENOMEM; } dev->spu_sleep = 0; dev->spu_bit = 0; dev->udev = usb_get_dev(udev); if (!dev->udev) { err = -ENOMEM; goto err_out_free; } memset(dev->ep, 0, sizeof(dev->ep)); usb_set_intfdata(intf, dev); err = usb_set_interface(dev->udev, intf->altsetting[0].desc.bInterfaceNumber, 3); if (err) { printk(KERN_ERR "Failed to set alternative setting 3 for %d interface: err=%d.\n", intf->altsetting[0].desc.bInterfaceNumber, err); goto err_out_clear; } err = usb_reset_configuration(dev->udev); if (err) { printk(KERN_ERR "Failed to reset configuration: err=%d.\n", err); goto err_out_clear; } iface_desc = &intf->altsetting[0]; if (iface_desc->desc.bNumEndpoints != NUM_EP-1) { printk(KERN_INFO "Num endpoints=%d. It is not DS9490R.\n", iface_desc->desc.bNumEndpoints); err = -EINVAL; goto err_out_clear; } /* * This loop doesn'd show control 0 endpoint, * so we will fill only 1-3 endpoints entry. */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; dev->ep[i+1] = endpoint->bEndpointAddress; #if 0 printk("%d: addr=%x, size=%d, dir=%s, type=%x\n", i, endpoint->bEndpointAddress, le16_to_cpu(endpoint->wMaxPacketSize), (endpoint->bEndpointAddress & USB_DIR_IN)?"IN":"OUT", endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); #endif } err = ds_w1_init(dev); if (err) goto err_out_clear; mutex_lock(&ds_mutex); list_add_tail(&dev->ds_entry, &ds_devices); mutex_unlock(&ds_mutex); return 0; err_out_clear: usb_set_intfdata(intf, NULL); usb_put_dev(dev->udev); err_out_free: kfree(dev); return err; } static void ds_disconnect(struct usb_interface *intf) { struct ds_device *dev; dev = usb_get_intfdata(intf); if (!dev) return; mutex_lock(&ds_mutex); list_del(&dev->ds_entry); mutex_unlock(&ds_mutex); ds_w1_fini(dev); usb_set_intfdata(intf, NULL); usb_put_dev(dev->udev); kfree(dev); } static int ds_init(void) { int err; err = usb_register(&ds_driver); if (err) { printk(KERN_INFO "Failed to register DS9490R USB device: err=%d.\n", err); return err; } return 0; } static void ds_fini(void) { usb_deregister(&ds_driver); } module_init(ds_init); module_exit(ds_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>"); MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)");
gpl-2.0
Splitter/android_kernel_htc_msm7x30
sound/pci/sonicvibes.c
3589
52997
/* * Driver for S3 SonicVibes soundcard * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * BUGS: * It looks like 86c617 rev 3 doesn't supports DDMA buffers above 16MB? * Driver sometimes hangs... Nobody knows why at this moment... * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/gameport.h> #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/info.h> #include <sound/control.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/initval.h> #include <asm/io.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("S3 SonicVibes PCI"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{S3,SonicVibes PCI}}"); #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) #define SUPPORT_JOYSTICK 1 #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static int reverb[SNDRV_CARDS]; static int mge[SNDRV_CARDS]; static unsigned int dmaio = 0x7a00; /* DDMA i/o address */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for S3 SonicVibes soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for S3 SonicVibes soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable S3 SonicVibes soundcard."); module_param_array(reverb, bool, NULL, 0444); MODULE_PARM_DESC(reverb, "Enable reverb (SRAM is present) for S3 SonicVibes soundcard."); module_param_array(mge, bool, NULL, 0444); MODULE_PARM_DESC(mge, "MIC Gain Enable for S3 SonicVibes soundcard."); module_param(dmaio, uint, 0444); MODULE_PARM_DESC(dmaio, "DDMA i/o base address for S3 SonicVibes soundcard."); /* * Enhanced port direct registers */ #define SV_REG(sonic, x) ((sonic)->enh_port + SV_REG_##x) #define SV_REG_CONTROL 0x00 /* R/W: CODEC/Mixer control register */ #define SV_ENHANCED 0x01 /* audio mode select - enhanced mode */ #define SV_TEST 0x02 /* test bit */ #define SV_REVERB 0x04 /* reverb enable */ #define SV_WAVETABLE 0x08 /* wavetable active / FM active if not set */ #define SV_INTA 0x20 /* INTA driving - should be always 1 */ #define SV_RESET 0x80 /* reset chip */ #define SV_REG_IRQMASK 0x01 /* R/W: CODEC/Mixer interrupt mask register */ #define SV_DMAA_MASK 0x01 /* mask DMA-A interrupt */ #define SV_DMAC_MASK 0x04 /* mask DMA-C interrupt */ #define SV_SPEC_MASK 0x08 /* special interrupt mask - should be always masked */ #define SV_UD_MASK 0x40 /* Up/Down button interrupt mask */ #define SV_MIDI_MASK 0x80 /* mask MIDI interrupt */ #define SV_REG_STATUS 0x02 /* R/O: CODEC/Mixer status register */ #define SV_DMAA_IRQ 0x01 /* DMA-A interrupt */ #define SV_DMAC_IRQ 0x04 /* DMA-C interrupt */ #define SV_SPEC_IRQ 0x08 /* special interrupt */ #define SV_UD_IRQ 0x40 /* Up/Down interrupt */ #define SV_MIDI_IRQ 0x80 /* MIDI interrupt */ #define SV_REG_INDEX 0x04 /* R/W: CODEC/Mixer index address register */ #define SV_MCE 0x40 /* mode change enable */ #define SV_TRD 0x80 /* DMA transfer request disabled */ #define SV_REG_DATA 0x05 /* R/W: CODEC/Mixer index data register */ /* * Enhanced port indirect registers */ #define SV_IREG_LEFT_ADC 0x00 /* Left ADC Input Control */ #define SV_IREG_RIGHT_ADC 0x01 /* Right ADC Input Control */ #define SV_IREG_LEFT_AUX1 0x02 /* Left AUX1 Input Control */ #define SV_IREG_RIGHT_AUX1 0x03 /* Right AUX1 Input Control */ #define SV_IREG_LEFT_CD 0x04 /* Left CD Input Control */ #define SV_IREG_RIGHT_CD 0x05 /* Right CD Input Control */ #define SV_IREG_LEFT_LINE 0x06 /* Left Line Input Control */ #define SV_IREG_RIGHT_LINE 0x07 /* Right Line Input Control */ #define SV_IREG_MIC 0x08 /* MIC Input Control */ #define SV_IREG_GAME_PORT 0x09 /* Game Port Control */ #define SV_IREG_LEFT_SYNTH 0x0a /* Left Synth Input Control */ #define SV_IREG_RIGHT_SYNTH 0x0b /* Right Synth Input Control */ #define SV_IREG_LEFT_AUX2 0x0c /* Left AUX2 Input Control */ #define SV_IREG_RIGHT_AUX2 0x0d /* Right AUX2 Input Control */ #define SV_IREG_LEFT_ANALOG 0x0e /* Left Analog Mixer Output Control */ #define SV_IREG_RIGHT_ANALOG 0x0f /* Right Analog Mixer Output Control */ #define SV_IREG_LEFT_PCM 0x10 /* Left PCM Input Control */ #define SV_IREG_RIGHT_PCM 0x11 /* Right PCM Input Control */ #define SV_IREG_DMA_DATA_FMT 0x12 /* DMA Data Format */ #define SV_IREG_PC_ENABLE 0x13 /* Playback/Capture Enable Register */ #define SV_IREG_UD_BUTTON 0x14 /* Up/Down Button Register */ #define SV_IREG_REVISION 0x15 /* Revision */ #define SV_IREG_ADC_OUTPUT_CTRL 0x16 /* ADC Output Control */ #define SV_IREG_DMA_A_UPPER 0x18 /* DMA A Upper Base Count */ #define SV_IREG_DMA_A_LOWER 0x19 /* DMA A Lower Base Count */ #define SV_IREG_DMA_C_UPPER 0x1c /* DMA C Upper Base Count */ #define SV_IREG_DMA_C_LOWER 0x1d /* DMA C Lower Base Count */ #define SV_IREG_PCM_RATE_LOW 0x1e /* PCM Sampling Rate Low Byte */ #define SV_IREG_PCM_RATE_HIGH 0x1f /* PCM Sampling Rate High Byte */ #define SV_IREG_SYNTH_RATE_LOW 0x20 /* Synthesizer Sampling Rate Low Byte */ #define SV_IREG_SYNTH_RATE_HIGH 0x21 /* Synthesizer Sampling Rate High Byte */ #define SV_IREG_ADC_CLOCK 0x22 /* ADC Clock Source Selection */ #define SV_IREG_ADC_ALT_RATE 0x23 /* ADC Alternative Sampling Rate Selection */ #define SV_IREG_ADC_PLL_M 0x24 /* ADC PLL M Register */ #define SV_IREG_ADC_PLL_N 0x25 /* ADC PLL N Register */ #define SV_IREG_SYNTH_PLL_M 0x26 /* Synthesizer PLL M Register */ #define SV_IREG_SYNTH_PLL_N 0x27 /* Synthesizer PLL N Register */ #define SV_IREG_MPU401 0x2a /* MPU-401 UART Operation */ #define SV_IREG_DRIVE_CTRL 0x2b /* Drive Control */ #define SV_IREG_SRS_SPACE 0x2c /* SRS Space Control */ #define SV_IREG_SRS_CENTER 0x2d /* SRS Center Control */ #define SV_IREG_WAVE_SOURCE 0x2e /* Wavetable Sample Source Select */ #define SV_IREG_ANALOG_POWER 0x30 /* Analog Power Down Control */ #define SV_IREG_DIGITAL_POWER 0x31 /* Digital Power Down Control */ #define SV_IREG_ADC_PLL SV_IREG_ADC_PLL_M #define SV_IREG_SYNTH_PLL SV_IREG_SYNTH_PLL_M /* * DMA registers */ #define SV_DMA_ADDR0 0x00 #define SV_DMA_ADDR1 0x01 #define SV_DMA_ADDR2 0x02 #define SV_DMA_ADDR3 0x03 #define SV_DMA_COUNT0 0x04 #define SV_DMA_COUNT1 0x05 #define SV_DMA_COUNT2 0x06 #define SV_DMA_MODE 0x0b #define SV_DMA_RESET 0x0d #define SV_DMA_MASK 0x0f /* * Record sources */ #define SV_RECSRC_RESERVED (0x00<<5) #define SV_RECSRC_CD (0x01<<5) #define SV_RECSRC_DAC (0x02<<5) #define SV_RECSRC_AUX2 (0x03<<5) #define SV_RECSRC_LINE (0x04<<5) #define SV_RECSRC_AUX1 (0x05<<5) #define SV_RECSRC_MIC (0x06<<5) #define SV_RECSRC_OUT (0x07<<5) /* * constants */ #define SV_FULLRATE 48000 #define SV_REFFREQUENCY 24576000 #define SV_ADCMULT 512 #define SV_MODE_PLAY 1 #define SV_MODE_CAPTURE 2 /* */ struct sonicvibes { unsigned long dma1size; unsigned long dma2size; int irq; unsigned long sb_port; unsigned long enh_port; unsigned long synth_port; unsigned long midi_port; unsigned long game_port; unsigned int dmaa_port; struct resource *res_dmaa; unsigned int dmac_port; struct resource *res_dmac; unsigned char enable; unsigned char irqmask; unsigned char revision; unsigned char format; unsigned char srs_space; unsigned char srs_center; unsigned char mpu_switch; unsigned char wave_source; unsigned int mode; struct pci_dev *pci; struct snd_card *card; struct snd_pcm *pcm; struct snd_pcm_substream *playback_substream; struct snd_pcm_substream *capture_substream; struct snd_rawmidi *rmidi; struct snd_hwdep *fmsynth; /* S3FM */ spinlock_t reg_lock; unsigned int p_dma_size; unsigned int c_dma_size; struct snd_kcontrol *master_mute; struct snd_kcontrol *master_volume; #ifdef SUPPORT_JOYSTICK struct gameport *gameport; #endif }; static DEFINE_PCI_DEVICE_TABLE(snd_sonic_ids) = { { PCI_VDEVICE(S3, 0xca00), 0, }, { 0, } }; MODULE_DEVICE_TABLE(pci, snd_sonic_ids); static struct snd_ratden sonicvibes_adc_clock = { .num_min = 4000 * 65536, .num_max = 48000UL * 65536, .num_step = 1, .den = 65536, }; static struct snd_pcm_hw_constraint_ratdens snd_sonicvibes_hw_constraints_adc_clock = { .nrats = 1, .rats = &sonicvibes_adc_clock, }; /* * common I/O routines */ static inline void snd_sonicvibes_setdmaa(struct sonicvibes * sonic, unsigned int addr, unsigned int count) { count--; outl(addr, sonic->dmaa_port + SV_DMA_ADDR0); outl(count, sonic->dmaa_port + SV_DMA_COUNT0); outb(0x18, sonic->dmaa_port + SV_DMA_MODE); #if 0 printk(KERN_DEBUG "program dmaa: addr = 0x%x, paddr = 0x%x\n", addr, inl(sonic->dmaa_port + SV_DMA_ADDR0)); #endif } static inline void snd_sonicvibes_setdmac(struct sonicvibes * sonic, unsigned int addr, unsigned int count) { /* note: dmac is working in word mode!!! */ count >>= 1; count--; outl(addr, sonic->dmac_port + SV_DMA_ADDR0); outl(count, sonic->dmac_port + SV_DMA_COUNT0); outb(0x14, sonic->dmac_port + SV_DMA_MODE); #if 0 printk(KERN_DEBUG "program dmac: addr = 0x%x, paddr = 0x%x\n", addr, inl(sonic->dmac_port + SV_DMA_ADDR0)); #endif } static inline unsigned int snd_sonicvibes_getdmaa(struct sonicvibes * sonic) { return (inl(sonic->dmaa_port + SV_DMA_COUNT0) & 0xffffff) + 1; } static inline unsigned int snd_sonicvibes_getdmac(struct sonicvibes * sonic) { /* note: dmac is working in word mode!!! */ return ((inl(sonic->dmac_port + SV_DMA_COUNT0) & 0xffffff) + 1) << 1; } static void snd_sonicvibes_out1(struct sonicvibes * sonic, unsigned char reg, unsigned char value) { outb(reg, SV_REG(sonic, INDEX)); udelay(10); outb(value, SV_REG(sonic, DATA)); udelay(10); } static void snd_sonicvibes_out(struct sonicvibes * sonic, unsigned char reg, unsigned char value) { unsigned long flags; spin_lock_irqsave(&sonic->reg_lock, flags); outb(reg, SV_REG(sonic, INDEX)); udelay(10); outb(value, SV_REG(sonic, DATA)); udelay(10); spin_unlock_irqrestore(&sonic->reg_lock, flags); } static unsigned char snd_sonicvibes_in1(struct sonicvibes * sonic, unsigned char reg) { unsigned char value; outb(reg, SV_REG(sonic, INDEX)); udelay(10); value = inb(SV_REG(sonic, DATA)); udelay(10); return value; } static unsigned char snd_sonicvibes_in(struct sonicvibes * sonic, unsigned char reg) { unsigned long flags; unsigned char value; spin_lock_irqsave(&sonic->reg_lock, flags); outb(reg, SV_REG(sonic, INDEX)); udelay(10); value = inb(SV_REG(sonic, DATA)); udelay(10); spin_unlock_irqrestore(&sonic->reg_lock, flags); return value; } #if 0 static void snd_sonicvibes_debug(struct sonicvibes * sonic) { printk(KERN_DEBUG "SV REGS: INDEX = 0x%02x ", inb(SV_REG(sonic, INDEX))); printk(" STATUS = 0x%02x\n", inb(SV_REG(sonic, STATUS))); printk(KERN_DEBUG " 0x00: left input = 0x%02x ", snd_sonicvibes_in(sonic, 0x00)); printk(" 0x20: synth rate low = 0x%02x\n", snd_sonicvibes_in(sonic, 0x20)); printk(KERN_DEBUG " 0x01: right input = 0x%02x ", snd_sonicvibes_in(sonic, 0x01)); printk(" 0x21: synth rate high = 0x%02x\n", snd_sonicvibes_in(sonic, 0x21)); printk(KERN_DEBUG " 0x02: left AUX1 = 0x%02x ", snd_sonicvibes_in(sonic, 0x02)); printk(" 0x22: ADC clock = 0x%02x\n", snd_sonicvibes_in(sonic, 0x22)); printk(KERN_DEBUG " 0x03: right AUX1 = 0x%02x ", snd_sonicvibes_in(sonic, 0x03)); printk(" 0x23: ADC alt rate = 0x%02x\n", snd_sonicvibes_in(sonic, 0x23)); printk(KERN_DEBUG " 0x04: left CD = 0x%02x ", snd_sonicvibes_in(sonic, 0x04)); printk(" 0x24: ADC pll M = 0x%02x\n", snd_sonicvibes_in(sonic, 0x24)); printk(KERN_DEBUG " 0x05: right CD = 0x%02x ", snd_sonicvibes_in(sonic, 0x05)); printk(" 0x25: ADC pll N = 0x%02x\n", snd_sonicvibes_in(sonic, 0x25)); printk(KERN_DEBUG " 0x06: left line = 0x%02x ", snd_sonicvibes_in(sonic, 0x06)); printk(" 0x26: Synth pll M = 0x%02x\n", snd_sonicvibes_in(sonic, 0x26)); printk(KERN_DEBUG " 0x07: right line = 0x%02x ", snd_sonicvibes_in(sonic, 0x07)); printk(" 0x27: Synth pll N = 0x%02x\n", snd_sonicvibes_in(sonic, 0x27)); printk(KERN_DEBUG " 0x08: MIC = 0x%02x ", snd_sonicvibes_in(sonic, 0x08)); printk(" 0x28: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x28)); printk(KERN_DEBUG " 0x09: Game port = 0x%02x ", snd_sonicvibes_in(sonic, 0x09)); printk(" 0x29: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x29)); printk(KERN_DEBUG " 0x0a: left synth = 0x%02x ", snd_sonicvibes_in(sonic, 0x0a)); printk(" 0x2a: MPU401 = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2a)); printk(KERN_DEBUG " 0x0b: right synth = 0x%02x ", snd_sonicvibes_in(sonic, 0x0b)); printk(" 0x2b: drive ctrl = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2b)); printk(KERN_DEBUG " 0x0c: left AUX2 = 0x%02x ", snd_sonicvibes_in(sonic, 0x0c)); printk(" 0x2c: SRS space = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2c)); printk(KERN_DEBUG " 0x0d: right AUX2 = 0x%02x ", snd_sonicvibes_in(sonic, 0x0d)); printk(" 0x2d: SRS center = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2d)); printk(KERN_DEBUG " 0x0e: left analog = 0x%02x ", snd_sonicvibes_in(sonic, 0x0e)); printk(" 0x2e: wave source = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2e)); printk(KERN_DEBUG " 0x0f: right analog = 0x%02x ", snd_sonicvibes_in(sonic, 0x0f)); printk(" 0x2f: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x2f)); printk(KERN_DEBUG " 0x10: left PCM = 0x%02x ", snd_sonicvibes_in(sonic, 0x10)); printk(" 0x30: analog power = 0x%02x\n", snd_sonicvibes_in(sonic, 0x30)); printk(KERN_DEBUG " 0x11: right PCM = 0x%02x ", snd_sonicvibes_in(sonic, 0x11)); printk(" 0x31: analog power = 0x%02x\n", snd_sonicvibes_in(sonic, 0x31)); printk(KERN_DEBUG " 0x12: DMA data format = 0x%02x ", snd_sonicvibes_in(sonic, 0x12)); printk(" 0x32: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x32)); printk(KERN_DEBUG " 0x13: P/C enable = 0x%02x ", snd_sonicvibes_in(sonic, 0x13)); printk(" 0x33: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x33)); printk(KERN_DEBUG " 0x14: U/D button = 0x%02x ", snd_sonicvibes_in(sonic, 0x14)); printk(" 0x34: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x34)); printk(KERN_DEBUG " 0x15: revision = 0x%02x ", snd_sonicvibes_in(sonic, 0x15)); printk(" 0x35: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x35)); printk(KERN_DEBUG " 0x16: ADC output ctrl = 0x%02x ", snd_sonicvibes_in(sonic, 0x16)); printk(" 0x36: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x36)); printk(KERN_DEBUG " 0x17: --- = 0x%02x ", snd_sonicvibes_in(sonic, 0x17)); printk(" 0x37: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x37)); printk(KERN_DEBUG " 0x18: DMA A upper cnt = 0x%02x ", snd_sonicvibes_in(sonic, 0x18)); printk(" 0x38: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x38)); printk(KERN_DEBUG " 0x19: DMA A lower cnt = 0x%02x ", snd_sonicvibes_in(sonic, 0x19)); printk(" 0x39: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x39)); printk(KERN_DEBUG " 0x1a: --- = 0x%02x ", snd_sonicvibes_in(sonic, 0x1a)); printk(" 0x3a: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3a)); printk(KERN_DEBUG " 0x1b: --- = 0x%02x ", snd_sonicvibes_in(sonic, 0x1b)); printk(" 0x3b: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3b)); printk(KERN_DEBUG " 0x1c: DMA C upper cnt = 0x%02x ", snd_sonicvibes_in(sonic, 0x1c)); printk(" 0x3c: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3c)); printk(KERN_DEBUG " 0x1d: DMA C upper cnt = 0x%02x ", snd_sonicvibes_in(sonic, 0x1d)); printk(" 0x3d: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3d)); printk(KERN_DEBUG " 0x1e: PCM rate low = 0x%02x ", snd_sonicvibes_in(sonic, 0x1e)); printk(" 0x3e: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3e)); printk(KERN_DEBUG " 0x1f: PCM rate high = 0x%02x ", snd_sonicvibes_in(sonic, 0x1f)); printk(" 0x3f: --- = 0x%02x\n", snd_sonicvibes_in(sonic, 0x3f)); } #endif static void snd_sonicvibes_setfmt(struct sonicvibes * sonic, unsigned char mask, unsigned char value) { unsigned long flags; spin_lock_irqsave(&sonic->reg_lock, flags); outb(SV_MCE | SV_IREG_DMA_DATA_FMT, SV_REG(sonic, INDEX)); if (mask) { sonic->format = inb(SV_REG(sonic, DATA)); udelay(10); } sonic->format = (sonic->format & mask) | value; outb(sonic->format, SV_REG(sonic, DATA)); udelay(10); outb(0, SV_REG(sonic, INDEX)); udelay(10); spin_unlock_irqrestore(&sonic->reg_lock, flags); } static void snd_sonicvibes_pll(unsigned int rate, unsigned int *res_r, unsigned int *res_m, unsigned int *res_n) { unsigned int r, m = 0, n = 0; unsigned int xm, xn, xr, xd, metric = ~0U; if (rate < 625000 / SV_ADCMULT) rate = 625000 / SV_ADCMULT; if (rate > 150000000 / SV_ADCMULT) rate = 150000000 / SV_ADCMULT; /* slight violation of specs, needed for continuous sampling rates */ for (r = 0; rate < 75000000 / SV_ADCMULT; r += 0x20, rate <<= 1); for (xn = 3; xn < 33; xn++) /* 35 */ for (xm = 3; xm < 257; xm++) { xr = ((SV_REFFREQUENCY / SV_ADCMULT) * xm) / xn; if (xr >= rate) xd = xr - rate; else xd = rate - xr; if (xd < metric) { metric = xd; m = xm - 2; n = xn - 2; } } *res_r = r; *res_m = m; *res_n = n; #if 0 printk(KERN_DEBUG "metric = %i, xm = %i, xn = %i\n", metric, xm, xn); printk(KERN_DEBUG "pll: m = 0x%x, r = 0x%x, n = 0x%x\n", reg, m, r, n); #endif } static void snd_sonicvibes_setpll(struct sonicvibes * sonic, unsigned char reg, unsigned int rate) { unsigned long flags; unsigned int r, m, n; snd_sonicvibes_pll(rate, &r, &m, &n); if (sonic != NULL) { spin_lock_irqsave(&sonic->reg_lock, flags); snd_sonicvibes_out1(sonic, reg, m); snd_sonicvibes_out1(sonic, reg + 1, r | n); spin_unlock_irqrestore(&sonic->reg_lock, flags); } } static void snd_sonicvibes_set_adc_rate(struct sonicvibes * sonic, unsigned int rate) { unsigned long flags; unsigned int div; unsigned char clock; div = 48000 / rate; if (div > 8) div = 8; if ((48000 / div) == rate) { /* use the alternate clock */ clock = 0x10; } else { /* use the PLL source */ clock = 0x00; snd_sonicvibes_setpll(sonic, SV_IREG_ADC_PLL, rate); } spin_lock_irqsave(&sonic->reg_lock, flags); snd_sonicvibes_out1(sonic, SV_IREG_ADC_ALT_RATE, (div - 1) << 4); snd_sonicvibes_out1(sonic, SV_IREG_ADC_CLOCK, clock); spin_unlock_irqrestore(&sonic->reg_lock, flags); } static int snd_sonicvibes_hw_constraint_dac_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { unsigned int rate, div, r, m, n; if (hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->min == hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->max) { rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->min; div = 48000 / rate; if (div > 8) div = 8; if ((48000 / div) == rate) { params->rate_num = rate; params->rate_den = 1; } else { snd_sonicvibes_pll(rate, &r, &m, &n); snd_BUG_ON(SV_REFFREQUENCY % 16); snd_BUG_ON(SV_ADCMULT % 512); params->rate_num = (SV_REFFREQUENCY/16) * (n+2) * r; params->rate_den = (SV_ADCMULT/512) * (m+2); } } return 0; } static void snd_sonicvibes_set_dac_rate(struct sonicvibes * sonic, unsigned int rate) { unsigned int div; unsigned long flags; div = (rate * 65536 + SV_FULLRATE / 2) / SV_FULLRATE; if (div > 65535) div = 65535; spin_lock_irqsave(&sonic->reg_lock, flags); snd_sonicvibes_out1(sonic, SV_IREG_PCM_RATE_HIGH, div >> 8); snd_sonicvibes_out1(sonic, SV_IREG_PCM_RATE_LOW, div); spin_unlock_irqrestore(&sonic->reg_lock, flags); } static int snd_sonicvibes_trigger(struct sonicvibes * sonic, int what, int cmd) { int result = 0; spin_lock(&sonic->reg_lock); if (cmd == SNDRV_PCM_TRIGGER_START) { if (!(sonic->enable & what)) { sonic->enable |= what; snd_sonicvibes_out1(sonic, SV_IREG_PC_ENABLE, sonic->enable); } } else if (cmd == SNDRV_PCM_TRIGGER_STOP) { if (sonic->enable & what) { sonic->enable &= ~what; snd_sonicvibes_out1(sonic, SV_IREG_PC_ENABLE, sonic->enable); } } else { result = -EINVAL; } spin_unlock(&sonic->reg_lock); return result; } static irqreturn_t snd_sonicvibes_interrupt(int irq, void *dev_id) { struct sonicvibes *sonic = dev_id; unsigned char status; status = inb(SV_REG(sonic, STATUS)); if (!(status & (SV_DMAA_IRQ | SV_DMAC_IRQ | SV_MIDI_IRQ))) return IRQ_NONE; if (status == 0xff) { /* failure */ outb(sonic->irqmask = ~0, SV_REG(sonic, IRQMASK)); snd_printk(KERN_ERR "IRQ failure - interrupts disabled!!\n"); return IRQ_HANDLED; } if (sonic->pcm) { if (status & SV_DMAA_IRQ) snd_pcm_period_elapsed(sonic->playback_substream); if (status & SV_DMAC_IRQ) snd_pcm_period_elapsed(sonic->capture_substream); } if (sonic->rmidi) { if (status & SV_MIDI_IRQ) snd_mpu401_uart_interrupt(irq, sonic->rmidi->private_data); } if (status & SV_UD_IRQ) { unsigned char udreg; int vol, oleft, oright, mleft, mright; spin_lock(&sonic->reg_lock); udreg = snd_sonicvibes_in1(sonic, SV_IREG_UD_BUTTON); vol = udreg & 0x3f; if (!(udreg & 0x40)) vol = -vol; oleft = mleft = snd_sonicvibes_in1(sonic, SV_IREG_LEFT_ANALOG); oright = mright = snd_sonicvibes_in1(sonic, SV_IREG_RIGHT_ANALOG); oleft &= 0x1f; oright &= 0x1f; oleft += vol; if (oleft < 0) oleft = 0; if (oleft > 0x1f) oleft = 0x1f; oright += vol; if (oright < 0) oright = 0; if (oright > 0x1f) oright = 0x1f; if (udreg & 0x80) { mleft ^= 0x80; mright ^= 0x80; } oleft |= mleft & 0x80; oright |= mright & 0x80; snd_sonicvibes_out1(sonic, SV_IREG_LEFT_ANALOG, oleft); snd_sonicvibes_out1(sonic, SV_IREG_RIGHT_ANALOG, oright); spin_unlock(&sonic->reg_lock); snd_ctl_notify(sonic->card, SNDRV_CTL_EVENT_MASK_VALUE, &sonic->master_mute->id); snd_ctl_notify(sonic->card, SNDRV_CTL_EVENT_MASK_VALUE, &sonic->master_volume->id); } return IRQ_HANDLED; } /* * PCM part */ static int snd_sonicvibes_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); return snd_sonicvibes_trigger(sonic, 1, cmd); } static int snd_sonicvibes_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); return snd_sonicvibes_trigger(sonic, 2, cmd); } static int snd_sonicvibes_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_sonicvibes_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int snd_sonicvibes_playback_prepare(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned char fmt = 0; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); sonic->p_dma_size = size; count--; if (runtime->channels > 1) fmt |= 1; if (snd_pcm_format_width(runtime->format) == 16) fmt |= 2; snd_sonicvibes_setfmt(sonic, ~3, fmt); snd_sonicvibes_set_dac_rate(sonic, runtime->rate); spin_lock_irq(&sonic->reg_lock); snd_sonicvibes_setdmaa(sonic, runtime->dma_addr, size); snd_sonicvibes_out1(sonic, SV_IREG_DMA_A_UPPER, count >> 8); snd_sonicvibes_out1(sonic, SV_IREG_DMA_A_LOWER, count); spin_unlock_irq(&sonic->reg_lock); return 0; } static int snd_sonicvibes_capture_prepare(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned char fmt = 0; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); sonic->c_dma_size = size; count >>= 1; count--; if (runtime->channels > 1) fmt |= 0x10; if (snd_pcm_format_width(runtime->format) == 16) fmt |= 0x20; snd_sonicvibes_setfmt(sonic, ~0x30, fmt); snd_sonicvibes_set_adc_rate(sonic, runtime->rate); spin_lock_irq(&sonic->reg_lock); snd_sonicvibes_setdmac(sonic, runtime->dma_addr, size); snd_sonicvibes_out1(sonic, SV_IREG_DMA_C_UPPER, count >> 8); snd_sonicvibes_out1(sonic, SV_IREG_DMA_C_LOWER, count); spin_unlock_irq(&sonic->reg_lock); return 0; } static snd_pcm_uframes_t snd_sonicvibes_playback_pointer(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); size_t ptr; if (!(sonic->enable & 1)) return 0; ptr = sonic->p_dma_size - snd_sonicvibes_getdmaa(sonic); return bytes_to_frames(substream->runtime, ptr); } static snd_pcm_uframes_t snd_sonicvibes_capture_pointer(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); size_t ptr; if (!(sonic->enable & 2)) return 0; ptr = sonic->c_dma_size - snd_sonicvibes_getdmac(sonic); return bytes_to_frames(substream->runtime, ptr); } static struct snd_pcm_hardware snd_sonicvibes_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 32, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_sonicvibes_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 32, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static int snd_sonicvibes_playback_open(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; sonic->mode |= SV_MODE_PLAY; sonic->playback_substream = substream; runtime->hw = snd_sonicvibes_playback; snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, snd_sonicvibes_hw_constraint_dac_rate, NULL, SNDRV_PCM_HW_PARAM_RATE, -1); return 0; } static int snd_sonicvibes_capture_open(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; sonic->mode |= SV_MODE_CAPTURE; sonic->capture_substream = substream; runtime->hw = snd_sonicvibes_capture; snd_pcm_hw_constraint_ratdens(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &snd_sonicvibes_hw_constraints_adc_clock); return 0; } static int snd_sonicvibes_playback_close(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); sonic->playback_substream = NULL; sonic->mode &= ~SV_MODE_PLAY; return 0; } static int snd_sonicvibes_capture_close(struct snd_pcm_substream *substream) { struct sonicvibes *sonic = snd_pcm_substream_chip(substream); sonic->capture_substream = NULL; sonic->mode &= ~SV_MODE_CAPTURE; return 0; } static struct snd_pcm_ops snd_sonicvibes_playback_ops = { .open = snd_sonicvibes_playback_open, .close = snd_sonicvibes_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_sonicvibes_hw_params, .hw_free = snd_sonicvibes_hw_free, .prepare = snd_sonicvibes_playback_prepare, .trigger = snd_sonicvibes_playback_trigger, .pointer = snd_sonicvibes_playback_pointer, }; static struct snd_pcm_ops snd_sonicvibes_capture_ops = { .open = snd_sonicvibes_capture_open, .close = snd_sonicvibes_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_sonicvibes_hw_params, .hw_free = snd_sonicvibes_hw_free, .prepare = snd_sonicvibes_capture_prepare, .trigger = snd_sonicvibes_capture_trigger, .pointer = snd_sonicvibes_capture_pointer, }; static int __devinit snd_sonicvibes_pcm(struct sonicvibes * sonic, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if ((err = snd_pcm_new(sonic->card, "s3_86c617", device, 1, 1, &pcm)) < 0) return err; if (snd_BUG_ON(!pcm)) return -EINVAL; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sonicvibes_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sonicvibes_capture_ops); pcm->private_data = sonic; pcm->info_flags = 0; strcpy(pcm->name, "S3 SonicVibes"); sonic->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(sonic->pci), 64*1024, 128*1024); if (rpcm) *rpcm = pcm; return 0; } /* * Mixer part */ #define SONICVIBES_MUX(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_sonicvibes_info_mux, \ .get = snd_sonicvibes_get_mux, .put = snd_sonicvibes_put_mux } static int snd_sonicvibes_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[7] = { "CD", "PCM", "Aux1", "Line", "Aux0", "Mic", "Mix" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 2; uinfo->value.enumerated.items = 7; if (uinfo->value.enumerated.item >= 7) uinfo->value.enumerated.item = 6; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_sonicvibes_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); spin_lock_irq(&sonic->reg_lock); ucontrol->value.enumerated.item[0] = ((snd_sonicvibes_in1(sonic, SV_IREG_LEFT_ADC) & SV_RECSRC_OUT) >> 5) - 1; ucontrol->value.enumerated.item[1] = ((snd_sonicvibes_in1(sonic, SV_IREG_RIGHT_ADC) & SV_RECSRC_OUT) >> 5) - 1; spin_unlock_irq(&sonic->reg_lock); return 0; } static int snd_sonicvibes_put_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); unsigned short left, right, oval1, oval2; int change; if (ucontrol->value.enumerated.item[0] >= 7 || ucontrol->value.enumerated.item[1] >= 7) return -EINVAL; left = (ucontrol->value.enumerated.item[0] + 1) << 5; right = (ucontrol->value.enumerated.item[1] + 1) << 5; spin_lock_irq(&sonic->reg_lock); oval1 = snd_sonicvibes_in1(sonic, SV_IREG_LEFT_ADC); oval2 = snd_sonicvibes_in1(sonic, SV_IREG_RIGHT_ADC); left = (oval1 & ~SV_RECSRC_OUT) | left; right = (oval2 & ~SV_RECSRC_OUT) | right; change = left != oval1 || right != oval2; snd_sonicvibes_out1(sonic, SV_IREG_LEFT_ADC, left); snd_sonicvibes_out1(sonic, SV_IREG_RIGHT_ADC, right); spin_unlock_irq(&sonic->reg_lock); return change; } #define SONICVIBES_SINGLE(xname, xindex, reg, shift, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_sonicvibes_info_single, \ .get = snd_sonicvibes_get_single, .put = snd_sonicvibes_put_single, \ .private_value = reg | (shift << 8) | (mask << 16) | (invert << 24) } static int snd_sonicvibes_info_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 16) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_sonicvibes_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; spin_lock_irq(&sonic->reg_lock); ucontrol->value.integer.value[0] = (snd_sonicvibes_in1(sonic, reg)>> shift) & mask; spin_unlock_irq(&sonic->reg_lock); if (invert) ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; return 0; } static int snd_sonicvibes_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; int change; unsigned short val, oval; val = (ucontrol->value.integer.value[0] & mask); if (invert) val = mask - val; val <<= shift; spin_lock_irq(&sonic->reg_lock); oval = snd_sonicvibes_in1(sonic, reg); val = (oval & ~(mask << shift)) | val; change = val != oval; snd_sonicvibes_out1(sonic, reg, val); spin_unlock_irq(&sonic->reg_lock); return change; } #define SONICVIBES_DOUBLE(xname, xindex, left_reg, right_reg, shift_left, shift_right, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_sonicvibes_info_double, \ .get = snd_sonicvibes_get_double, .put = snd_sonicvibes_put_double, \ .private_value = left_reg | (right_reg << 8) | (shift_left << 16) | (shift_right << 19) | (mask << 24) | (invert << 22) } static int snd_sonicvibes_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 24) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_sonicvibes_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; spin_lock_irq(&sonic->reg_lock); ucontrol->value.integer.value[0] = (snd_sonicvibes_in1(sonic, left_reg) >> shift_left) & mask; ucontrol->value.integer.value[1] = (snd_sonicvibes_in1(sonic, right_reg) >> shift_right) & mask; spin_unlock_irq(&sonic->reg_lock); if (invert) { ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; ucontrol->value.integer.value[1] = mask - ucontrol->value.integer.value[1]; } return 0; } static int snd_sonicvibes_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; int change; unsigned short val1, val2, oval1, oval2; val1 = ucontrol->value.integer.value[0] & mask; val2 = ucontrol->value.integer.value[1] & mask; if (invert) { val1 = mask - val1; val2 = mask - val2; } val1 <<= shift_left; val2 <<= shift_right; spin_lock_irq(&sonic->reg_lock); oval1 = snd_sonicvibes_in1(sonic, left_reg); oval2 = snd_sonicvibes_in1(sonic, right_reg); val1 = (oval1 & ~(mask << shift_left)) | val1; val2 = (oval2 & ~(mask << shift_right)) | val2; change = val1 != oval1 || val2 != oval2; snd_sonicvibes_out1(sonic, left_reg, val1); snd_sonicvibes_out1(sonic, right_reg, val2); spin_unlock_irq(&sonic->reg_lock); return change; } static struct snd_kcontrol_new snd_sonicvibes_controls[] __devinitdata = { SONICVIBES_DOUBLE("Capture Volume", 0, SV_IREG_LEFT_ADC, SV_IREG_RIGHT_ADC, 0, 0, 15, 0), SONICVIBES_DOUBLE("Aux Playback Switch", 0, SV_IREG_LEFT_AUX1, SV_IREG_RIGHT_AUX1, 7, 7, 1, 1), SONICVIBES_DOUBLE("Aux Playback Volume", 0, SV_IREG_LEFT_AUX1, SV_IREG_RIGHT_AUX1, 0, 0, 31, 1), SONICVIBES_DOUBLE("CD Playback Switch", 0, SV_IREG_LEFT_CD, SV_IREG_RIGHT_CD, 7, 7, 1, 1), SONICVIBES_DOUBLE("CD Playback Volume", 0, SV_IREG_LEFT_CD, SV_IREG_RIGHT_CD, 0, 0, 31, 1), SONICVIBES_DOUBLE("Line Playback Switch", 0, SV_IREG_LEFT_LINE, SV_IREG_RIGHT_LINE, 7, 7, 1, 1), SONICVIBES_DOUBLE("Line Playback Volume", 0, SV_IREG_LEFT_LINE, SV_IREG_RIGHT_LINE, 0, 0, 31, 1), SONICVIBES_SINGLE("Mic Playback Switch", 0, SV_IREG_MIC, 7, 1, 1), SONICVIBES_SINGLE("Mic Playback Volume", 0, SV_IREG_MIC, 0, 15, 1), SONICVIBES_SINGLE("Mic Boost", 0, SV_IREG_LEFT_ADC, 4, 1, 0), SONICVIBES_DOUBLE("Synth Playback Switch", 0, SV_IREG_LEFT_SYNTH, SV_IREG_RIGHT_SYNTH, 7, 7, 1, 1), SONICVIBES_DOUBLE("Synth Playback Volume", 0, SV_IREG_LEFT_SYNTH, SV_IREG_RIGHT_SYNTH, 0, 0, 31, 1), SONICVIBES_DOUBLE("Aux Playback Switch", 1, SV_IREG_LEFT_AUX2, SV_IREG_RIGHT_AUX2, 7, 7, 1, 1), SONICVIBES_DOUBLE("Aux Playback Volume", 1, SV_IREG_LEFT_AUX2, SV_IREG_RIGHT_AUX2, 0, 0, 31, 1), SONICVIBES_DOUBLE("Master Playback Switch", 0, SV_IREG_LEFT_ANALOG, SV_IREG_RIGHT_ANALOG, 7, 7, 1, 1), SONICVIBES_DOUBLE("Master Playback Volume", 0, SV_IREG_LEFT_ANALOG, SV_IREG_RIGHT_ANALOG, 0, 0, 31, 1), SONICVIBES_DOUBLE("PCM Playback Switch", 0, SV_IREG_LEFT_PCM, SV_IREG_RIGHT_PCM, 7, 7, 1, 1), SONICVIBES_DOUBLE("PCM Playback Volume", 0, SV_IREG_LEFT_PCM, SV_IREG_RIGHT_PCM, 0, 0, 63, 1), SONICVIBES_SINGLE("Loopback Capture Switch", 0, SV_IREG_ADC_OUTPUT_CTRL, 0, 1, 0), SONICVIBES_SINGLE("Loopback Capture Volume", 0, SV_IREG_ADC_OUTPUT_CTRL, 2, 63, 1), SONICVIBES_MUX("Capture Source", 0) }; static void snd_sonicvibes_master_free(struct snd_kcontrol *kcontrol) { struct sonicvibes *sonic = snd_kcontrol_chip(kcontrol); sonic->master_mute = NULL; sonic->master_volume = NULL; } static int __devinit snd_sonicvibes_mixer(struct sonicvibes * sonic) { struct snd_card *card; struct snd_kcontrol *kctl; unsigned int idx; int err; if (snd_BUG_ON(!sonic || !sonic->card)) return -EINVAL; card = sonic->card; strcpy(card->mixername, "S3 SonicVibes"); for (idx = 0; idx < ARRAY_SIZE(snd_sonicvibes_controls); idx++) { if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_sonicvibes_controls[idx], sonic))) < 0) return err; switch (idx) { case 0: case 1: kctl->private_free = snd_sonicvibes_master_free; break; } } return 0; } /* */ static void snd_sonicvibes_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct sonicvibes *sonic = entry->private_data; unsigned char tmp; tmp = sonic->srs_space & 0x0f; snd_iprintf(buffer, "SRS 3D : %s\n", sonic->srs_space & 0x80 ? "off" : "on"); snd_iprintf(buffer, "SRS Space : %s\n", tmp == 0x00 ? "100%" : tmp == 0x01 ? "75%" : tmp == 0x02 ? "50%" : tmp == 0x03 ? "25%" : "0%"); tmp = sonic->srs_center & 0x0f; snd_iprintf(buffer, "SRS Center : %s\n", tmp == 0x00 ? "100%" : tmp == 0x01 ? "75%" : tmp == 0x02 ? "50%" : tmp == 0x03 ? "25%" : "0%"); tmp = sonic->wave_source & 0x03; snd_iprintf(buffer, "WaveTable Source : %s\n", tmp == 0x00 ? "on-board ROM" : tmp == 0x01 ? "PCI bus" : "on-board ROM + PCI bus"); tmp = sonic->mpu_switch; snd_iprintf(buffer, "Onboard synth : %s\n", tmp & 0x01 ? "on" : "off"); snd_iprintf(buffer, "Ext. Rx to synth : %s\n", tmp & 0x02 ? "on" : "off"); snd_iprintf(buffer, "MIDI to ext. Tx : %s\n", tmp & 0x04 ? "on" : "off"); } static void __devinit snd_sonicvibes_proc_init(struct sonicvibes * sonic) { struct snd_info_entry *entry; if (! snd_card_proc_new(sonic->card, "sonicvibes", &entry)) snd_info_set_text_ops(entry, sonic, snd_sonicvibes_proc_read); } /* */ #ifdef SUPPORT_JOYSTICK static struct snd_kcontrol_new snd_sonicvibes_game_control __devinitdata = SONICVIBES_SINGLE("Joystick Speed", 0, SV_IREG_GAME_PORT, 1, 15, 0); static int __devinit snd_sonicvibes_create_gameport(struct sonicvibes *sonic) { struct gameport *gp; sonic->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "sonicvibes: cannot allocate memory for gameport\n"); return -ENOMEM; } gameport_set_name(gp, "SonicVibes Gameport"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(sonic->pci)); gameport_set_dev_parent(gp, &sonic->pci->dev); gp->io = sonic->game_port; gameport_register_port(gp); snd_ctl_add(sonic->card, snd_ctl_new1(&snd_sonicvibes_game_control, sonic)); return 0; } static void snd_sonicvibes_free_gameport(struct sonicvibes *sonic) { if (sonic->gameport) { gameport_unregister_port(sonic->gameport); sonic->gameport = NULL; } } #else static inline int snd_sonicvibes_create_gameport(struct sonicvibes *sonic) { return -ENOSYS; } static inline void snd_sonicvibes_free_gameport(struct sonicvibes *sonic) { } #endif static int snd_sonicvibes_free(struct sonicvibes *sonic) { snd_sonicvibes_free_gameport(sonic); pci_write_config_dword(sonic->pci, 0x40, sonic->dmaa_port); pci_write_config_dword(sonic->pci, 0x48, sonic->dmac_port); if (sonic->irq >= 0) free_irq(sonic->irq, sonic); release_and_free_resource(sonic->res_dmaa); release_and_free_resource(sonic->res_dmac); pci_release_regions(sonic->pci); pci_disable_device(sonic->pci); kfree(sonic); return 0; } static int snd_sonicvibes_dev_free(struct snd_device *device) { struct sonicvibes *sonic = device->device_data; return snd_sonicvibes_free(sonic); } static int __devinit snd_sonicvibes_create(struct snd_card *card, struct pci_dev *pci, int reverb, int mge, struct sonicvibes ** rsonic) { struct sonicvibes *sonic; unsigned int dmaa, dmac; int err; static struct snd_device_ops ops = { .dev_free = snd_sonicvibes_dev_free, }; *rsonic = NULL; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; /* check, if we can restrict PCI DMA transfers to 24 bits */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(24)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(24)) < 0) { snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; } sonic = kzalloc(sizeof(*sonic), GFP_KERNEL); if (sonic == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&sonic->reg_lock); sonic->card = card; sonic->pci = pci; sonic->irq = -1; if ((err = pci_request_regions(pci, "S3 SonicVibes")) < 0) { kfree(sonic); pci_disable_device(pci); return err; } sonic->sb_port = pci_resource_start(pci, 0); sonic->enh_port = pci_resource_start(pci, 1); sonic->synth_port = pci_resource_start(pci, 2); sonic->midi_port = pci_resource_start(pci, 3); sonic->game_port = pci_resource_start(pci, 4); if (request_irq(pci->irq, snd_sonicvibes_interrupt, IRQF_SHARED, "S3 SonicVibes", sonic)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_sonicvibes_free(sonic); return -EBUSY; } sonic->irq = pci->irq; pci_read_config_dword(pci, 0x40, &dmaa); pci_read_config_dword(pci, 0x48, &dmac); dmaio &= ~0x0f; dmaa &= ~0x0f; dmac &= ~0x0f; if (!dmaa) { dmaa = dmaio; dmaio += 0x10; snd_printk(KERN_INFO "BIOS did not allocate DDMA channel A i/o, allocated at 0x%x\n", dmaa); } if (!dmac) { dmac = dmaio; dmaio += 0x10; snd_printk(KERN_INFO "BIOS did not allocate DDMA channel C i/o, allocated at 0x%x\n", dmac); } pci_write_config_dword(pci, 0x40, dmaa); pci_write_config_dword(pci, 0x48, dmac); if ((sonic->res_dmaa = request_region(dmaa, 0x10, "S3 SonicVibes DDMA-A")) == NULL) { snd_sonicvibes_free(sonic); snd_printk(KERN_ERR "unable to grab DDMA-A port at 0x%x-0x%x\n", dmaa, dmaa + 0x10 - 1); return -EBUSY; } if ((sonic->res_dmac = request_region(dmac, 0x10, "S3 SonicVibes DDMA-C")) == NULL) { snd_sonicvibes_free(sonic); snd_printk(KERN_ERR "unable to grab DDMA-C port at 0x%x-0x%x\n", dmac, dmac + 0x10 - 1); return -EBUSY; } pci_read_config_dword(pci, 0x40, &sonic->dmaa_port); pci_read_config_dword(pci, 0x48, &sonic->dmac_port); sonic->dmaa_port &= ~0x0f; sonic->dmac_port &= ~0x0f; pci_write_config_dword(pci, 0x40, sonic->dmaa_port | 9); /* enable + enhanced */ pci_write_config_dword(pci, 0x48, sonic->dmac_port | 9); /* enable */ /* ok.. initialize S3 SonicVibes chip */ outb(SV_RESET, SV_REG(sonic, CONTROL)); /* reset chip */ udelay(100); outb(0, SV_REG(sonic, CONTROL)); /* release reset */ udelay(100); outb(SV_ENHANCED | SV_INTA | (reverb ? SV_REVERB : 0), SV_REG(sonic, CONTROL)); inb(SV_REG(sonic, STATUS)); /* clear IRQs */ #if 1 snd_sonicvibes_out(sonic, SV_IREG_DRIVE_CTRL, 0); /* drive current 16mA */ #else snd_sonicvibes_out(sonic, SV_IREG_DRIVE_CTRL, 0x40); /* drive current 8mA */ #endif snd_sonicvibes_out(sonic, SV_IREG_PC_ENABLE, sonic->enable = 0); /* disable playback & capture */ outb(sonic->irqmask = ~(SV_DMAA_MASK | SV_DMAC_MASK | SV_UD_MASK), SV_REG(sonic, IRQMASK)); inb(SV_REG(sonic, STATUS)); /* clear IRQs */ snd_sonicvibes_out(sonic, SV_IREG_ADC_CLOCK, 0); /* use PLL as clock source */ snd_sonicvibes_out(sonic, SV_IREG_ANALOG_POWER, 0); /* power up analog parts */ snd_sonicvibes_out(sonic, SV_IREG_DIGITAL_POWER, 0); /* power up digital parts */ snd_sonicvibes_setpll(sonic, SV_IREG_ADC_PLL, 8000); snd_sonicvibes_out(sonic, SV_IREG_SRS_SPACE, sonic->srs_space = 0x80); /* SRS space off */ snd_sonicvibes_out(sonic, SV_IREG_SRS_CENTER, sonic->srs_center = 0x00);/* SRS center off */ snd_sonicvibes_out(sonic, SV_IREG_MPU401, sonic->mpu_switch = 0x05); /* MPU-401 switch */ snd_sonicvibes_out(sonic, SV_IREG_WAVE_SOURCE, sonic->wave_source = 0x00); /* onboard ROM */ snd_sonicvibes_out(sonic, SV_IREG_PCM_RATE_LOW, (8000 * 65536 / SV_FULLRATE) & 0xff); snd_sonicvibes_out(sonic, SV_IREG_PCM_RATE_HIGH, ((8000 * 65536 / SV_FULLRATE) >> 8) & 0xff); snd_sonicvibes_out(sonic, SV_IREG_LEFT_ADC, mge ? 0xd0 : 0xc0); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_ADC, 0xc0); snd_sonicvibes_out(sonic, SV_IREG_LEFT_AUX1, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_AUX1, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_CD, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_CD, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_LINE, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_LINE, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_MIC, 0x8f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_SYNTH, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_SYNTH, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_AUX2, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_AUX2, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_ANALOG, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_ANALOG, 0x9f); snd_sonicvibes_out(sonic, SV_IREG_LEFT_PCM, 0xbf); snd_sonicvibes_out(sonic, SV_IREG_RIGHT_PCM, 0xbf); snd_sonicvibes_out(sonic, SV_IREG_ADC_OUTPUT_CTRL, 0xfc); #if 0 snd_sonicvibes_debug(sonic); #endif sonic->revision = snd_sonicvibes_in(sonic, SV_IREG_REVISION); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, sonic, &ops)) < 0) { snd_sonicvibes_free(sonic); return err; } snd_sonicvibes_proc_init(sonic); snd_card_set_dev(card, &pci->dev); *rsonic = sonic; return 0; } /* * MIDI section */ static struct snd_kcontrol_new snd_sonicvibes_midi_controls[] __devinitdata = { SONICVIBES_SINGLE("SonicVibes Wave Source RAM", 0, SV_IREG_WAVE_SOURCE, 0, 1, 0), SONICVIBES_SINGLE("SonicVibes Wave Source RAM+ROM", 0, SV_IREG_WAVE_SOURCE, 1, 1, 0), SONICVIBES_SINGLE("SonicVibes Onboard Synth", 0, SV_IREG_MPU401, 0, 1, 0), SONICVIBES_SINGLE("SonicVibes External Rx to Synth", 0, SV_IREG_MPU401, 1, 1, 0), SONICVIBES_SINGLE("SonicVibes External Tx", 0, SV_IREG_MPU401, 2, 1, 0) }; static int snd_sonicvibes_midi_input_open(struct snd_mpu401 * mpu) { struct sonicvibes *sonic = mpu->private_data; outb(sonic->irqmask &= ~SV_MIDI_MASK, SV_REG(sonic, IRQMASK)); return 0; } static void snd_sonicvibes_midi_input_close(struct snd_mpu401 * mpu) { struct sonicvibes *sonic = mpu->private_data; outb(sonic->irqmask |= SV_MIDI_MASK, SV_REG(sonic, IRQMASK)); } static int __devinit snd_sonicvibes_midi(struct sonicvibes * sonic, struct snd_rawmidi *rmidi) { struct snd_mpu401 * mpu = rmidi->private_data; struct snd_card *card = sonic->card; struct snd_rawmidi_str *dir; unsigned int idx; int err; mpu->private_data = sonic; mpu->open_input = snd_sonicvibes_midi_input_open; mpu->close_input = snd_sonicvibes_midi_input_close; dir = &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT]; for (idx = 0; idx < ARRAY_SIZE(snd_sonicvibes_midi_controls); idx++) if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_sonicvibes_midi_controls[idx], sonic))) < 0) return err; return 0; } static int __devinit snd_sonic_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct sonicvibes *sonic; struct snd_rawmidi *midi_uart; struct snd_opl3 *opl3; int idx, err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; for (idx = 0; idx < 5; idx++) { if (pci_resource_start(pci, idx) == 0 || !(pci_resource_flags(pci, idx) & IORESOURCE_IO)) { snd_card_free(card); return -ENODEV; } } if ((err = snd_sonicvibes_create(card, pci, reverb[dev] ? 1 : 0, mge[dev] ? 1 : 0, &sonic)) < 0) { snd_card_free(card); return err; } strcpy(card->driver, "SonicVibes"); strcpy(card->shortname, "S3 SonicVibes"); sprintf(card->longname, "%s rev %i at 0x%llx, irq %i", card->shortname, sonic->revision, (unsigned long long)pci_resource_start(pci, 1), sonic->irq); if ((err = snd_sonicvibes_pcm(sonic, 0, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_sonicvibes_mixer(sonic)) < 0) { snd_card_free(card); return err; } if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_SONICVIBES, sonic->midi_port, MPU401_INFO_INTEGRATED, sonic->irq, 0, &midi_uart)) < 0) { snd_card_free(card); return err; } snd_sonicvibes_midi(sonic, midi_uart); if ((err = snd_opl3_create(card, sonic->synth_port, sonic->synth_port + 2, OPL3_HW_OPL3_SV, 1, &opl3)) < 0) { snd_card_free(card); return err; } if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { snd_card_free(card); return err; } snd_sonicvibes_create_gameport(sonic); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_sonic_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = "S3 SonicVibes", .id_table = snd_sonic_ids, .probe = snd_sonic_probe, .remove = __devexit_p(snd_sonic_remove), }; static int __init alsa_card_sonicvibes_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_sonicvibes_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_sonicvibes_init) module_exit(alsa_card_sonicvibes_exit)
gpl-2.0
Android4Lumia/kernel_nokia_msm8x27
drivers/media/video/s5p-mfc/s5p_mfc_dec.c
4869
29403
/* * linux/drivers/media/video/s5p-mfc/s5p_mfc_dec.c * * Copyright (C) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * Kamil Debski, <k.debski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/version.h> #include <linux/videodev2.h> #include <linux/workqueue.h> #include <media/v4l2-ctrls.h> #include <media/videobuf2-core.h> #include "regs-mfc.h" #include "s5p_mfc_common.h" #include "s5p_mfc_debug.h" #include "s5p_mfc_dec.h" #include "s5p_mfc_intr.h" #include "s5p_mfc_opr.h" #include "s5p_mfc_pm.h" #include "s5p_mfc_shm.h" static struct s5p_mfc_fmt formats[] = { { .name = "4:2:0 2 Planes 64x32 Tiles", .fourcc = V4L2_PIX_FMT_NV12MT, .codec_mode = S5P_FIMV_CODEC_NONE, .type = MFC_FMT_RAW, .num_planes = 2, }, { .name = "4:2:0 2 Planes", .fourcc = V4L2_PIX_FMT_NV12M, .codec_mode = S5P_FIMV_CODEC_NONE, .type = MFC_FMT_RAW, .num_planes = 2, }, { .name = "H264 Encoded Stream", .fourcc = V4L2_PIX_FMT_H264, .codec_mode = S5P_FIMV_CODEC_H264_DEC, .type = MFC_FMT_DEC, .num_planes = 1, }, { .name = "H263 Encoded Stream", .fourcc = V4L2_PIX_FMT_H263, .codec_mode = S5P_FIMV_CODEC_H263_DEC, .type = MFC_FMT_DEC, .num_planes = 1, }, { .name = "MPEG1 Encoded Stream", .fourcc = V4L2_PIX_FMT_MPEG1, .codec_mode = S5P_FIMV_CODEC_MPEG2_DEC, .type = MFC_FMT_DEC, .num_planes = 1, }, { .name = "MPEG2 Encoded Stream", .fourcc = V4L2_PIX_FMT_MPEG2, .codec_mode = S5P_FIMV_CODEC_MPEG2_DEC, .type = MFC_FMT_DEC, .num_planes = 1, }, { .name = "MPEG4 Encoded Stream", .fourcc = V4L2_PIX_FMT_MPEG4, .codec_mode = S5P_FIMV_CODEC_MPEG4_DEC, .type = MFC_FMT_DEC, .num_planes = 1, }, { .name = "XviD Encoded Stream", .fourcc = V4L2_PIX_FMT_XVID, .codec_mode = S5P_FIMV_CODEC_MPEG4_DEC, .type = MFC_FMT_DEC, .num_planes = 1, }, { .name = "VC1 Encoded Stream", .fourcc = V4L2_PIX_FMT_VC1_ANNEX_G, .codec_mode = S5P_FIMV_CODEC_VC1_DEC, .type = MFC_FMT_DEC, .num_planes = 1, }, { .name = "VC1 RCV Encoded Stream", .fourcc = V4L2_PIX_FMT_VC1_ANNEX_L, .codec_mode = S5P_FIMV_CODEC_VC1RCV_DEC, .type = MFC_FMT_DEC, .num_planes = 1, }, }; #define NUM_FORMATS ARRAY_SIZE(formats) /* Find selected format description */ static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t) { unsigned int i; for (i = 0; i < NUM_FORMATS; i++) { if (formats[i].fourcc == f->fmt.pix_mp.pixelformat && formats[i].type == t) return &formats[i]; } return NULL; } static struct mfc_control controls[] = { { .id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY, .type = V4L2_CTRL_TYPE_INTEGER, .name = "H264 Display Delay", .minimum = 0, .maximum = 16383, .step = 1, .default_value = 0, }, { .id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "H264 Display Delay Enable", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { .id = V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Mpeg4 Loop Filter Enable", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { .id = V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Slice Interface Enable", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, { .id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Minimum number of cap bufs", .minimum = 1, .maximum = 32, .step = 1, .default_value = 1, .is_volatile = 1, }, }; #define NUM_CTRLS ARRAY_SIZE(controls) /* Check whether a context should be run on hardware */ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) { /* Context is to parse header */ if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_GOT_INST) return 1; /* Context is to decode a frame */ if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_RUNNING && ctx->dst_queue_cnt >= ctx->dpb_count) return 1; /* Context is to return last frame */ if (ctx->state == MFCINST_FINISHING && ctx->dst_queue_cnt >= ctx->dpb_count) return 1; /* Context is to set buffers */ if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_HEAD_PARSED && ctx->capture_state == QUEUE_BUFS_MMAPED) return 1; /* Resolution change */ if ((ctx->state == MFCINST_RES_CHANGE_INIT || ctx->state == MFCINST_RES_CHANGE_FLUSH) && ctx->dst_queue_cnt >= ctx->dpb_count) return 1; if (ctx->state == MFCINST_RES_CHANGE_END && ctx->src_queue_cnt >= 1) return 1; mfc_debug(2, "ctx is not ready\n"); return 0; } static struct s5p_mfc_codec_ops decoder_codec_ops = { .pre_seq_start = NULL, .post_seq_start = NULL, .pre_frame_start = NULL, .post_frame_start = NULL, }; /* Query capabilities of the device */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct s5p_mfc_dev *dev = video_drvdata(file); strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1); strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); cap->bus_info[0] = 0; cap->version = KERNEL_VERSION(1, 0, 0); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING; return 0; } /* Enumerate format */ static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool mplane, bool out) { struct s5p_mfc_fmt *fmt; int i, j = 0; for (i = 0; i < ARRAY_SIZE(formats); ++i) { if (mplane && formats[i].num_planes == 1) continue; else if (!mplane && formats[i].num_planes > 1) continue; if (out && formats[i].type != MFC_FMT_DEC) continue; else if (!out && formats[i].type != MFC_FMT_RAW) continue; if (j == f->index) break; ++j; } if (i == ARRAY_SIZE(formats)) return -EINVAL; fmt = &formats[i]; strlcpy(f->description, fmt->name, sizeof(f->description)); f->pixelformat = fmt->fourcc; return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv, struct v4l2_fmtdesc *f) { return vidioc_enum_fmt(f, false, false); } static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv, struct v4l2_fmtdesc *f) { return vidioc_enum_fmt(f, true, false); } static int vidioc_enum_fmt_vid_out(struct file *file, void *prov, struct v4l2_fmtdesc *f) { return vidioc_enum_fmt(f, false, true); } static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov, struct v4l2_fmtdesc *f) { return vidioc_enum_fmt(f, true, true); } /* Get format */ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); struct v4l2_pix_format_mplane *pix_mp; mfc_debug_enter(); pix_mp = &f->fmt.pix_mp; if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && (ctx->state == MFCINST_GOT_INST || ctx->state == MFCINST_RES_CHANGE_END)) { /* If the MFC is parsing the header, * so wait until it is finished */ s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_SEQ_DONE_RET, 0); } if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && ctx->state >= MFCINST_HEAD_PARSED && ctx->state < MFCINST_ABORT) { /* This is run on CAPTURE (decode output) */ /* Width and height are set to the dimensions of the movie, the buffer is bigger and further processing stages should crop to this rectangle. */ pix_mp->width = ctx->buf_width; pix_mp->height = ctx->buf_height; pix_mp->field = V4L2_FIELD_NONE; pix_mp->num_planes = 2; /* Set pixelformat to the format in which MFC outputs the decoded frame */ pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT; pix_mp->plane_fmt[0].bytesperline = ctx->buf_width; pix_mp->plane_fmt[0].sizeimage = ctx->luma_size; pix_mp->plane_fmt[1].bytesperline = ctx->buf_width; pix_mp->plane_fmt[1].sizeimage = ctx->chroma_size; } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { /* This is run on OUTPUT The buffer contains compressed image so width and height have no meaning */ pix_mp->width = 0; pix_mp->height = 0; pix_mp->field = V4L2_FIELD_NONE; pix_mp->plane_fmt[0].bytesperline = ctx->dec_src_buf_size; pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size; pix_mp->pixelformat = ctx->src_fmt->fourcc; pix_mp->num_planes = ctx->src_fmt->num_planes; } else { mfc_err("Format could not be read\n"); mfc_debug(2, "%s-- with error\n", __func__); return -EINVAL; } mfc_debug_leave(); return 0; } /* Try format */ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct s5p_mfc_fmt *fmt; if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { mfc_err("This node supports decoding only\n"); return -EINVAL; } fmt = find_format(f, MFC_FMT_DEC); if (!fmt) { mfc_err("Unsupported format\n"); return -EINVAL; } if (fmt->type != MFC_FMT_DEC) { mfc_err("\n"); return -EINVAL; } return 0; } /* Set format */ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); int ret = 0; struct s5p_mfc_fmt *fmt; struct v4l2_pix_format_mplane *pix_mp; mfc_debug_enter(); ret = vidioc_try_fmt(file, priv, f); pix_mp = &f->fmt.pix_mp; if (ret) return ret; if (ctx->vq_src.streaming || ctx->vq_dst.streaming) { v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__); ret = -EBUSY; goto out; } fmt = find_format(f, MFC_FMT_DEC); if (!fmt || fmt->codec_mode == S5P_FIMV_CODEC_NONE) { mfc_err("Unknown codec\n"); ret = -EINVAL; goto out; } if (fmt->type != MFC_FMT_DEC) { mfc_err("Wrong format selected, you should choose " "format for decoding\n"); ret = -EINVAL; goto out; } ctx->src_fmt = fmt; ctx->codec_mode = fmt->codec_mode; mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode); pix_mp->height = 0; pix_mp->width = 0; if (pix_mp->plane_fmt[0].sizeimage) ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage; else pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size = DEF_CPB_SIZE; pix_mp->plane_fmt[0].bytesperline = 0; ctx->state = MFCINST_INIT; out: mfc_debug_leave(); return ret; } /* Reqeust buffers */ static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbufs) { struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); int ret = 0; unsigned long flags; if (reqbufs->memory != V4L2_MEMORY_MMAP) { mfc_err("Only V4L2_MEMORY_MAP is supported\n"); return -EINVAL; } if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { /* Can only request buffers after an instance has been opened.*/ if (ctx->state == MFCINST_INIT) { ctx->src_bufs_cnt = 0; if (reqbufs->count == 0) { mfc_debug(2, "Freeing buffers\n"); s5p_mfc_clock_on(); ret = vb2_reqbufs(&ctx->vq_src, reqbufs); s5p_mfc_clock_off(); return ret; } /* Decoding */ if (ctx->output_state != QUEUE_FREE) { mfc_err("Bufs have already been requested\n"); return -EINVAL; } s5p_mfc_clock_on(); ret = vb2_reqbufs(&ctx->vq_src, reqbufs); s5p_mfc_clock_off(); if (ret) { mfc_err("vb2_reqbufs on output failed\n"); return ret; } mfc_debug(2, "vb2_reqbufs: %d\n", ret); ctx->output_state = QUEUE_BUFS_REQUESTED; } } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { ctx->dst_bufs_cnt = 0; if (reqbufs->count == 0) { mfc_debug(2, "Freeing buffers\n"); s5p_mfc_clock_on(); ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); s5p_mfc_clock_off(); return ret; } if (ctx->capture_state != QUEUE_FREE) { mfc_err("Bufs have already been requested\n"); return -EINVAL; } ctx->capture_state = QUEUE_BUFS_REQUESTED; s5p_mfc_clock_on(); ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); s5p_mfc_clock_off(); if (ret) { mfc_err("vb2_reqbufs on capture failed\n"); return ret; } if (reqbufs->count < ctx->dpb_count) { mfc_err("Not enough buffers allocated\n"); reqbufs->count = 0; s5p_mfc_clock_on(); ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); s5p_mfc_clock_off(); return -ENOMEM; } ctx->total_dpb_count = reqbufs->count; ret = s5p_mfc_alloc_codec_buffers(ctx); if (ret) { mfc_err("Failed to allocate decoding buffers\n"); reqbufs->count = 0; s5p_mfc_clock_on(); ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); s5p_mfc_clock_off(); return -ENOMEM; } if (ctx->dst_bufs_cnt == ctx->total_dpb_count) { ctx->capture_state = QUEUE_BUFS_MMAPED; } else { mfc_err("Not all buffers passed to buf_init\n"); reqbufs->count = 0; s5p_mfc_clock_on(); ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); s5p_mfc_release_codec_buffers(ctx); s5p_mfc_clock_off(); return -ENOMEM; } if (s5p_mfc_ctx_ready(ctx)) { spin_lock_irqsave(&dev->condlock, flags); set_bit(ctx->num, &dev->ctx_work_bits); spin_unlock_irqrestore(&dev->condlock, flags); } s5p_mfc_try_run(dev); s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET, 0); } return ret; } /* Query buffer */ static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); int ret; int i; if (buf->memory != V4L2_MEMORY_MMAP) { mfc_err("Only mmaped buffers can be used\n"); return -EINVAL; } mfc_debug(2, "State: %d, buf->type: %d\n", ctx->state, buf->type); if (ctx->state == MFCINST_INIT && buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { ret = vb2_querybuf(&ctx->vq_src, buf); } else if (ctx->state == MFCINST_RUNNING && buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { ret = vb2_querybuf(&ctx->vq_dst, buf); for (i = 0; i < buf->length; i++) buf->m.planes[i].m.mem_offset += DST_QUEUE_OFF_BASE; } else { mfc_err("vidioc_querybuf called in an inappropriate state\n"); ret = -EINVAL; } mfc_debug_leave(); return ret; } /* Queue a buffer */ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); if (ctx->state == MFCINST_ERROR) { mfc_err("Call on QBUF after unrecoverable error\n"); return -EIO; } if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) return vb2_qbuf(&ctx->vq_src, buf); else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return vb2_qbuf(&ctx->vq_dst, buf); return -EINVAL; } /* Dequeue a buffer */ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); if (ctx->state == MFCINST_ERROR) { mfc_err("Call on DQBUF after unrecoverable error\n"); return -EIO; } if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) return vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK); else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK); return -EINVAL; } /* Stream on */ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; int ret = -EINVAL; mfc_debug_enter(); if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { if (ctx->state == MFCINST_INIT) { ctx->dst_bufs_cnt = 0; ctx->src_bufs_cnt = 0; ctx->capture_state = QUEUE_FREE; ctx->output_state = QUEUE_FREE; s5p_mfc_alloc_instance_buffer(ctx); s5p_mfc_alloc_dec_temp_buffers(ctx); spin_lock_irqsave(&dev->condlock, flags); set_bit(ctx->num, &dev->ctx_work_bits); spin_unlock_irqrestore(&dev->condlock, flags); s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_try_run(dev); if (s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET, 0)) { /* Error or timeout */ mfc_err("Error getting instance from hardware\n"); s5p_mfc_release_instance_buffer(ctx); s5p_mfc_release_dec_desc_buffer(ctx); return -EIO; } mfc_debug(2, "Got instance number: %d\n", ctx->inst_no); } ret = vb2_streamon(&ctx->vq_src, type); } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) ret = vb2_streamon(&ctx->vq_dst, type); mfc_debug_leave(); return ret; } /* Stream off, which equals to a pause */ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) return vb2_streamoff(&ctx->vq_src, type); else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return vb2_streamoff(&ctx->vq_dst, type); return -EINVAL; } /* Set controls - v4l2 control framework */ static int s5p_mfc_dec_s_ctrl(struct v4l2_ctrl *ctrl) { struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl); switch (ctrl->id) { case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY: ctx->loop_filter_mpeg4 = ctrl->val; break; case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE: ctx->display_delay_enable = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: ctx->display_delay = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: ctx->slice_interface = ctrl->val; break; default: mfc_err("Invalid control 0x%08x\n", ctrl->id); return -EINVAL; } return 0; } static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl) { struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl); struct s5p_mfc_dev *dev = ctx->dev; switch (ctrl->id) { case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: if (ctx->state >= MFCINST_HEAD_PARSED && ctx->state < MFCINST_ABORT) { ctrl->val = ctx->dpb_count; break; } else if (ctx->state != MFCINST_INIT) { v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n"); return -EINVAL; } /* Should wait for the header to be parsed */ s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_SEQ_DONE_RET, 0); if (ctx->state >= MFCINST_HEAD_PARSED && ctx->state < MFCINST_ABORT) { ctrl->val = ctx->dpb_count; } else { v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n"); return -EINVAL; } break; } return 0; } static const struct v4l2_ctrl_ops s5p_mfc_dec_ctrl_ops = { .s_ctrl = s5p_mfc_dec_s_ctrl, .g_volatile_ctrl = s5p_mfc_dec_g_v_ctrl, }; /* Get cropping information */ static int vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *cr) { struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); u32 left, right, top, bottom; if (ctx->state != MFCINST_HEAD_PARSED && ctx->state != MFCINST_RUNNING && ctx->state != MFCINST_FINISHING && ctx->state != MFCINST_FINISHED) { mfc_err("Cannont set crop\n"); return -EINVAL; } if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_H264) { left = s5p_mfc_read_shm(ctx, CROP_INFO_H); right = left >> S5P_FIMV_SHARED_CROP_RIGHT_SHIFT; left = left & S5P_FIMV_SHARED_CROP_LEFT_MASK; top = s5p_mfc_read_shm(ctx, CROP_INFO_V); bottom = top >> S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT; top = top & S5P_FIMV_SHARED_CROP_TOP_MASK; cr->c.left = left; cr->c.top = top; cr->c.width = ctx->img_width - left - right; cr->c.height = ctx->img_height - top - bottom; mfc_debug(2, "Cropping info [h264]: l=%d t=%d " "w=%d h=%d (r=%d b=%d fw=%d fh=%d\n", left, top, cr->c.width, cr->c.height, right, bottom, ctx->buf_width, ctx->buf_height); } else { cr->c.left = 0; cr->c.top = 0; cr->c.width = ctx->img_width; cr->c.height = ctx->img_height; mfc_debug(2, "Cropping info: w=%d h=%d fw=%d " "fh=%d\n", cr->c.width, cr->c.height, ctx->buf_width, ctx->buf_height); } return 0; } /* v4l2_ioctl_ops */ static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane, .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out, .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane, .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt, .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt, .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt, .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt, .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt, .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_crop = vidioc_g_crop, }; static int s5p_mfc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *buf_count, unsigned int *plane_count, unsigned int psize[], void *allocators[]) { struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); /* Video output for decoding (source) * this can be set after getting an instance */ if (ctx->state == MFCINST_INIT && vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { /* A single plane is required for input */ *plane_count = 1; if (*buf_count < 1) *buf_count = 1; if (*buf_count > MFC_MAX_BUFFERS) *buf_count = MFC_MAX_BUFFERS; /* Video capture for decoding (destination) * this can be set after the header was parsed */ } else if (ctx->state == MFCINST_HEAD_PARSED && vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { /* Output plane count is 2 - one for Y and one for CbCr */ *plane_count = 2; /* Setup buffer count */ if (*buf_count < ctx->dpb_count) *buf_count = ctx->dpb_count; if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB) *buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB; if (*buf_count > MFC_MAX_BUFFERS) *buf_count = MFC_MAX_BUFFERS; } else { mfc_err("State seems invalid. State = %d, vq->type = %d\n", ctx->state, vq->type); return -EINVAL; } mfc_debug(2, "Buffer count=%d, plane count=%d\n", *buf_count, *plane_count); if (ctx->state == MFCINST_HEAD_PARSED && vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { psize[0] = ctx->luma_size; psize[1] = ctx->chroma_size; allocators[0] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX]; allocators[1] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX]; } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && ctx->state == MFCINST_INIT) { psize[0] = ctx->dec_src_buf_size; allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX]; } else { mfc_err("This video node is dedicated to decoding. Decoding not initalised\n"); return -EINVAL; } return 0; } static void s5p_mfc_unlock(struct vb2_queue *q) { struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; mutex_unlock(&dev->mfc_mutex); } static void s5p_mfc_lock(struct vb2_queue *q) { struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; mutex_lock(&dev->mfc_mutex); } static int s5p_mfc_buf_init(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); unsigned int i; if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { if (ctx->capture_state == QUEUE_BUFS_MMAPED) return 0; for (i = 0; i <= ctx->src_fmt->num_planes ; i++) { if (IS_ERR_OR_NULL(ERR_PTR( vb2_dma_contig_plane_dma_addr(vb, i)))) { mfc_err("Plane mem not allocated\n"); return -EINVAL; } } if (vb2_plane_size(vb, 0) < ctx->luma_size || vb2_plane_size(vb, 1) < ctx->chroma_size) { mfc_err("Plane buffer (CAPTURE) is too small\n"); return -EINVAL; } i = vb->v4l2_buf.index; ctx->dst_bufs[i].b = vb; ctx->dst_bufs[i].cookie.raw.luma = vb2_dma_contig_plane_dma_addr(vb, 0); ctx->dst_bufs[i].cookie.raw.chroma = vb2_dma_contig_plane_dma_addr(vb, 1); ctx->dst_bufs_cnt++; } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { if (IS_ERR_OR_NULL(ERR_PTR( vb2_dma_contig_plane_dma_addr(vb, 0)))) { mfc_err("Plane memory not allocated\n"); return -EINVAL; } if (vb2_plane_size(vb, 0) < ctx->dec_src_buf_size) { mfc_err("Plane buffer (OUTPUT) is too small\n"); return -EINVAL; } i = vb->v4l2_buf.index; ctx->src_bufs[i].b = vb; ctx->src_bufs[i].cookie.stream = vb2_dma_contig_plane_dma_addr(vb, 0); ctx->src_bufs_cnt++; } else { mfc_err("s5p_mfc_buf_init: unknown queue type\n"); return -EINVAL; } return 0; } static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count) { struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; v4l2_ctrl_handler_setup(&ctx->ctrl_handler); if (ctx->state == MFCINST_FINISHING || ctx->state == MFCINST_FINISHED) ctx->state = MFCINST_RUNNING; /* If context is ready then dev = work->data;schedule it to run */ if (s5p_mfc_ctx_ready(ctx)) { spin_lock_irqsave(&dev->condlock, flags); set_bit(ctx->num, &dev->ctx_work_bits); spin_unlock_irqrestore(&dev->condlock, flags); } s5p_mfc_try_run(dev); return 0; } static int s5p_mfc_stop_streaming(struct vb2_queue *q) { unsigned long flags; struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; int aborted = 0; if ((ctx->state == MFCINST_FINISHING || ctx->state == MFCINST_RUNNING) && dev->curr_ctx == ctx->num && dev->hw_lock) { ctx->state = MFCINST_ABORT; s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_FRAME_DONE_RET, 0); aborted = 1; } spin_lock_irqsave(&dev->irqlock, flags); if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); INIT_LIST_HEAD(&ctx->dst_queue); ctx->dst_queue_cnt = 0; ctx->dpb_flush_flag = 1; ctx->dec_dst_flag = 0; } if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); INIT_LIST_HEAD(&ctx->src_queue); ctx->src_queue_cnt = 0; } if (aborted) ctx->state = MFCINST_RUNNING; spin_unlock_irqrestore(&dev->irqlock, flags); return 0; } static void s5p_mfc_buf_queue(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; struct s5p_mfc_buf *mfc_buf; if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index]; mfc_buf->used = 0; spin_lock_irqsave(&dev->irqlock, flags); list_add_tail(&mfc_buf->list, &ctx->src_queue); ctx->src_queue_cnt++; spin_unlock_irqrestore(&dev->irqlock, flags); } else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index]; mfc_buf->used = 0; /* Mark destination as available for use by MFC */ spin_lock_irqsave(&dev->irqlock, flags); set_bit(vb->v4l2_buf.index, &ctx->dec_dst_flag); list_add_tail(&mfc_buf->list, &ctx->dst_queue); ctx->dst_queue_cnt++; spin_unlock_irqrestore(&dev->irqlock, flags); } else { mfc_err("Unsupported buffer type (%d)\n", vq->type); } if (s5p_mfc_ctx_ready(ctx)) { spin_lock_irqsave(&dev->condlock, flags); set_bit(ctx->num, &dev->ctx_work_bits); spin_unlock_irqrestore(&dev->condlock, flags); } s5p_mfc_try_run(dev); } static struct vb2_ops s5p_mfc_dec_qops = { .queue_setup = s5p_mfc_queue_setup, .wait_prepare = s5p_mfc_unlock, .wait_finish = s5p_mfc_lock, .buf_init = s5p_mfc_buf_init, .start_streaming = s5p_mfc_start_streaming, .stop_streaming = s5p_mfc_stop_streaming, .buf_queue = s5p_mfc_buf_queue, }; struct s5p_mfc_codec_ops *get_dec_codec_ops(void) { return &decoder_codec_ops; } struct vb2_ops *get_dec_queue_ops(void) { return &s5p_mfc_dec_qops; } const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void) { return &s5p_mfc_dec_ioctl_ops; } #define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) \ && V4L2_CTRL_DRIVER_PRIV(x)) int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx) { struct v4l2_ctrl_config cfg; int i; v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS); if (ctx->ctrl_handler.error) { mfc_err("v4l2_ctrl_handler_init failed\n"); return ctx->ctrl_handler.error; } for (i = 0; i < NUM_CTRLS; i++) { if (IS_MFC51_PRIV(controls[i].id)) { cfg.ops = &s5p_mfc_dec_ctrl_ops; cfg.id = controls[i].id; cfg.min = controls[i].minimum; cfg.max = controls[i].maximum; cfg.def = controls[i].default_value; cfg.name = controls[i].name; cfg.type = controls[i].type; cfg.step = controls[i].step; cfg.menu_skip_mask = 0; ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler, &cfg, NULL); } else { ctx->ctrls[i] = v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_mfc_dec_ctrl_ops, controls[i].id, controls[i].minimum, controls[i].maximum, controls[i].step, controls[i].default_value); } if (ctx->ctrl_handler.error) { mfc_err("Adding control (%d) failed\n", i); return ctx->ctrl_handler.error; } if (controls[i].is_volatile && ctx->ctrls[i]) ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE; } return 0; } void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx) { int i; v4l2_ctrl_handler_free(&ctx->ctrl_handler); for (i = 0; i < NUM_CTRLS; i++) ctx->ctrls[i] = NULL; }
gpl-2.0
Loller79/Solid_Kernel-GEEHRC-LP
drivers/net/wireless/brcm80211/brcmutil/utils.c
4869
6055
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/netdevice.h> #include <linux/module.h> #include <brcmu_utils.h> MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver utilities."); MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); struct sk_buff *brcmu_pkt_buf_get_skb(uint len) { struct sk_buff *skb; skb = dev_alloc_skb(len); if (skb) { skb_put(skb, len); skb->priority = 0; } return skb; } EXPORT_SYMBOL(brcmu_pkt_buf_get_skb); /* Free the driver packet. Free the tag if present */ void brcmu_pkt_buf_free_skb(struct sk_buff *skb) { WARN_ON(skb->next); if (skb->destructor) /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if * destructor exists */ dev_kfree_skb_any(skb); else /* can free immediately (even in_irq()) if destructor * does not exist */ dev_kfree_skb(skb); } EXPORT_SYMBOL(brcmu_pkt_buf_free_skb); /* * osl multiple-precedence packet queue * hi_prec is always >= the number of the highest non-empty precedence */ struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p) { struct sk_buff_head *q; if (pktq_full(pq) || pktq_pfull(pq, prec)) return NULL; q = &pq->q[prec].skblist; skb_queue_tail(q, p); pq->len++; if (pq->hi_prec < prec) pq->hi_prec = (u8) prec; return p; } EXPORT_SYMBOL(brcmu_pktq_penq); struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec, struct sk_buff *p) { struct sk_buff_head *q; if (pktq_full(pq) || pktq_pfull(pq, prec)) return NULL; q = &pq->q[prec].skblist; skb_queue_head(q, p); pq->len++; if (pq->hi_prec < prec) pq->hi_prec = (u8) prec; return p; } EXPORT_SYMBOL(brcmu_pktq_penq_head); struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec) { struct sk_buff_head *q; struct sk_buff *p; q = &pq->q[prec].skblist; p = skb_dequeue(q); if (p == NULL) return NULL; pq->len--; return p; } EXPORT_SYMBOL(brcmu_pktq_pdeq); struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec) { struct sk_buff_head *q; struct sk_buff *p; q = &pq->q[prec].skblist; p = skb_dequeue_tail(q); if (p == NULL) return NULL; pq->len--; return p; } EXPORT_SYMBOL(brcmu_pktq_pdeq_tail); void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir, bool (*fn)(struct sk_buff *, void *), void *arg) { struct sk_buff_head *q; struct sk_buff *p, *next; q = &pq->q[prec].skblist; skb_queue_walk_safe(q, p, next) { if (fn == NULL || (*fn) (p, arg)) { skb_unlink(p, q); brcmu_pkt_buf_free_skb(p); pq->len--; } } } EXPORT_SYMBOL(brcmu_pktq_pflush); void brcmu_pktq_flush(struct pktq *pq, bool dir, bool (*fn)(struct sk_buff *, void *), void *arg) { int prec; for (prec = 0; prec < pq->num_prec; prec++) brcmu_pktq_pflush(pq, prec, dir, fn, arg); } EXPORT_SYMBOL(brcmu_pktq_flush); void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len) { int prec; /* pq is variable size; only zero out what's requested */ memset(pq, 0, offsetof(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); pq->num_prec = (u16) num_prec; pq->max = (u16) max_len; for (prec = 0; prec < num_prec; prec++) { pq->q[prec].max = pq->max; skb_queue_head_init(&pq->q[prec].skblist); } } EXPORT_SYMBOL(brcmu_pktq_init); struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out) { int prec; if (pq->len == 0) return NULL; for (prec = 0; prec < pq->hi_prec; prec++) if (!skb_queue_empty(&pq->q[prec].skblist)) break; if (prec_out) *prec_out = prec; return skb_peek_tail(&pq->q[prec].skblist); } EXPORT_SYMBOL(brcmu_pktq_peek_tail); /* Return sum of lengths of a specific set of precedences */ int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp) { int prec, len; len = 0; for (prec = 0; prec <= pq->hi_prec; prec++) if (prec_bmp & (1 << prec)) len += pq->q[prec].skblist.qlen; return len; } EXPORT_SYMBOL(brcmu_pktq_mlen); /* Priority dequeue from a specific set of precedences */ struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) { struct sk_buff_head *q; struct sk_buff *p; int prec; if (pq->len == 0) return NULL; while ((prec = pq->hi_prec) > 0 && skb_queue_empty(&pq->q[prec].skblist)) pq->hi_prec--; while ((prec_bmp & (1 << prec)) == 0 || skb_queue_empty(&pq->q[prec].skblist)) if (prec-- == 0) return NULL; q = &pq->q[prec].skblist; p = skb_dequeue(q); if (p == NULL) return NULL; pq->len--; if (prec_out) *prec_out = prec; return p; } EXPORT_SYMBOL(brcmu_pktq_mdeq); #if defined(DEBUG) /* pretty hex print a pkt buffer chain */ void brcmu_prpkt(const char *msg, struct sk_buff *p0) { struct sk_buff *p; if (msg && (msg[0] != '\0')) pr_debug("%s:\n", msg); for (p = p0; p; p = p->next) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, p->data, p->len); } EXPORT_SYMBOL(brcmu_prpkt); void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_debug("%pV", &vaf); va_end(args); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, data, size); } EXPORT_SYMBOL(brcmu_dbg_hex_dump); #endif /* defined(DEBUG) */
gpl-2.0
cheatman-xda/kyleopen-kernel
drivers/char/ppdev.c
7941
19633
/* * linux/drivers/char/ppdev.c * * This is the code behind /dev/parport* -- it allows a user-space * application to use the parport subsystem. * * Copyright (C) 1998-2000, 2002 Tim Waugh <tim@cyberelk.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * A /dev/parportx device node represents an arbitrary device * on port 'x'. The following operations are possible: * * open do nothing, set up default IEEE 1284 protocol to be COMPAT * close release port and unregister device (if necessary) * ioctl * EXCL register device exclusively (may fail) * CLAIM (register device first time) parport_claim_or_block * RELEASE parport_release * SETMODE set the IEEE 1284 protocol to use for read/write * SETPHASE set the IEEE 1284 phase of a particular mode. Not to be * confused with ioctl(fd, SETPHASER, &stun). ;-) * DATADIR data_forward / data_reverse * WDATA write_data * RDATA read_data * WCONTROL write_control * RCONTROL read_control * FCONTROL frob_control * RSTATUS read_status * NEGOT parport_negotiate * YIELD parport_yield_blocking * WCTLONIRQ on interrupt, set control lines * CLRIRQ clear (and return) interrupt count * SETTIME sets device timeout (struct timeval) * GETTIME gets device timeout (struct timeval) * GETMODES gets hardware supported modes (unsigned int) * GETMODE gets the current IEEE1284 mode * GETPHASE gets the current IEEE1284 phase * GETFLAGS gets current (user-visible) flags * SETFLAGS sets current (user-visible) flags * read/write read or write in current IEEE 1284 protocol * select wait for interrupt (in readfds) * * Changes: * Added SETTIME/GETTIME ioctl, Fred Barnes, 1999. * * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 2000/08/25 * - On error, copy_from_user and copy_to_user do not return -EFAULT, * They return the positive number of bytes *not* copied due to address * space errors. * * Added GETMODES/GETMODE/GETPHASE ioctls, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001. * Added GETFLAGS/SETFLAGS ioctls, Fred Barnes, 04/2001 */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/ioctl.h> #include <linux/parport.h> #include <linux/ctype.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/major.h> #include <linux/ppdev.h> #include <linux/mutex.h> #include <linux/uaccess.h> #define PP_VERSION "ppdev: user-space parallel port driver" #define CHRDEV "ppdev" struct pp_struct { struct pardevice * pdev; wait_queue_head_t irq_wait; atomic_t irqc; unsigned int flags; int irqresponse; unsigned char irqctl; struct ieee1284_info state; struct ieee1284_info saved_state; long default_inactivity; }; /* pp_struct.flags bitfields */ #define PP_CLAIMED (1<<0) #define PP_EXCL (1<<1) /* Other constants */ #define PP_INTERRUPT_TIMEOUT (10 * HZ) /* 10s */ #define PP_BUFFER_SIZE 1024 #define PARDEVICE_MAX 8 /* ROUND_UP macro from fs/select.c */ #define ROUND_UP(x,y) (((x)+(y)-1)/(y)) static DEFINE_MUTEX(pp_do_mutex); static inline void pp_enable_irq (struct pp_struct *pp) { struct parport *port = pp->pdev->port; port->ops->enable_irq (port); } static ssize_t pp_read (struct file * file, char __user * buf, size_t count, loff_t * ppos) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; char * kbuffer; ssize_t bytes_read = 0; struct parport *pport; int mode; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } /* Trivial case. */ if (count == 0) return 0; kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) { return -ENOMEM; } pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout (pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_read == 0) { ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE); if (mode == IEEE1284_MODE_EPP) { /* various specials for EPP mode */ int flags = 0; size_t (*fn)(struct parport *, void *, size_t, int); if (pp->flags & PP_W91284PIC) { flags |= PARPORT_W91284PIC; } if (pp->flags & PP_FASTREAD) { flags |= PARPORT_EPP_FAST; } if (pport->ieee1284.mode & IEEE1284_ADDR) { fn = pport->ops->epp_read_addr; } else { fn = pport->ops->epp_read_data; } bytes_read = (*fn)(pport, kbuffer, need, flags); } else { bytes_read = parport_read (pport, kbuffer, need); } if (bytes_read != 0) break; if (file->f_flags & O_NONBLOCK) { bytes_read = -EAGAIN; break; } if (signal_pending (current)) { bytes_read = -ERESTARTSYS; break; } cond_resched(); } parport_set_timeout (pp->pdev, pp->default_inactivity); if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read)) bytes_read = -EFAULT; kfree (kbuffer); pp_enable_irq (pp); return bytes_read; } static ssize_t pp_write (struct file * file, const char __user * buf, size_t count, loff_t * ppos) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; char * kbuffer; ssize_t bytes_written = 0; ssize_t wrote; int mode; struct parport *pport; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) { return -ENOMEM; } pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout (pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_written < count) { ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE); if (copy_from_user (kbuffer, buf + bytes_written, n)) { bytes_written = -EFAULT; break; } if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) { /* do a fast EPP write */ if (pport->ieee1284.mode & IEEE1284_ADDR) { wrote = pport->ops->epp_write_addr (pport, kbuffer, n, PARPORT_EPP_FAST); } else { wrote = pport->ops->epp_write_data (pport, kbuffer, n, PARPORT_EPP_FAST); } } else { wrote = parport_write (pp->pdev->port, kbuffer, n); } if (wrote <= 0) { if (!bytes_written) { bytes_written = wrote; } break; } bytes_written += wrote; if (file->f_flags & O_NONBLOCK) { if (!bytes_written) bytes_written = -EAGAIN; break; } if (signal_pending (current)) { if (!bytes_written) { bytes_written = -EINTR; } break; } cond_resched(); } parport_set_timeout (pp->pdev, pp->default_inactivity); kfree (kbuffer); pp_enable_irq (pp); return bytes_written; } static void pp_irq (void *private) { struct pp_struct *pp = private; if (pp->irqresponse) { parport_write_control (pp->pdev->port, pp->irqctl); pp->irqresponse = 0; } atomic_inc (&pp->irqc); wake_up_interruptible (&pp->irq_wait); } static int register_device (int minor, struct pp_struct *pp) { struct parport *port; struct pardevice * pdev = NULL; char *name; int fl; name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) return -ENOMEM; port = parport_find_number (minor); if (!port) { printk (KERN_WARNING "%s: no associated port!\n", name); kfree (name); return -ENXIO; } fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; pdev = parport_register_device (port, name, NULL, NULL, pp_irq, fl, pp); parport_put_port (port); if (!pdev) { printk (KERN_WARNING "%s: failed to register device!\n", name); kfree (name); return -ENXIO; } pp->pdev = pdev; pr_debug("%s: registered pardevice\n", name); return 0; } static enum ieee1284_phase init_phase (int mode) { switch (mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR)) { case IEEE1284_MODE_NIBBLE: case IEEE1284_MODE_BYTE: return IEEE1284_PH_REV_IDLE; } return IEEE1284_PH_FWD_IDLE; } static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; struct parport * port; void __user *argp = (void __user *)arg; /* First handle the cases that don't take arguments. */ switch (cmd) { case PPCLAIM: { struct ieee1284_info *info; int ret; if (pp->flags & PP_CLAIMED) { pr_debug(CHRDEV "%x: you've already got it!\n", minor); return -EINVAL; } /* Deferred device registration. */ if (!pp->pdev) { int err = register_device (minor, pp); if (err) { return err; } } ret = parport_claim_or_block (pp->pdev); if (ret < 0) return ret; pp->flags |= PP_CLAIMED; /* For interrupt-reporting to work, we need to be * informed of each interrupt. */ pp_enable_irq (pp); /* We may need to fix up the state machine. */ info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; pp->default_inactivity = parport_set_timeout (pp->pdev, 0); parport_set_timeout (pp->pdev, pp->default_inactivity); return 0; } case PPEXCL: if (pp->pdev) { pr_debug(CHRDEV "%x: too late for PPEXCL; " "already registered\n", minor); if (pp->flags & PP_EXCL) /* But it's not really an error. */ return 0; /* There's no chance of making the driver happy. */ return -EINVAL; } /* Just remember to register the device exclusively * when we finally do the registration. */ pp->flags |= PP_EXCL; return 0; case PPSETMODE: { int mode; if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; /* FIXME: validate mode */ pp->state.mode = mode; pp->state.phase = init_phase (mode); if (pp->flags & PP_CLAIMED) { pp->pdev->port->ieee1284.mode = mode; pp->pdev->port->ieee1284.phase = pp->state.phase; } return 0; } case PPGETMODE: { int mode; if (pp->flags & PP_CLAIMED) { mode = pp->pdev->port->ieee1284.mode; } else { mode = pp->state.mode; } if (copy_to_user (argp, &mode, sizeof (mode))) { return -EFAULT; } return 0; } case PPSETPHASE: { int phase; if (copy_from_user (&phase, argp, sizeof (phase))) { return -EFAULT; } /* FIXME: validate phase */ pp->state.phase = phase; if (pp->flags & PP_CLAIMED) { pp->pdev->port->ieee1284.phase = phase; } return 0; } case PPGETPHASE: { int phase; if (pp->flags & PP_CLAIMED) { phase = pp->pdev->port->ieee1284.phase; } else { phase = pp->state.phase; } if (copy_to_user (argp, &phase, sizeof (phase))) { return -EFAULT; } return 0; } case PPGETMODES: { unsigned int modes; port = parport_find_number (minor); if (!port) return -ENODEV; modes = port->modes; parport_put_port(port); if (copy_to_user (argp, &modes, sizeof (modes))) { return -EFAULT; } return 0; } case PPSETFLAGS: { int uflags; if (copy_from_user (&uflags, argp, sizeof (uflags))) { return -EFAULT; } pp->flags &= ~PP_FLAGMASK; pp->flags |= (uflags & PP_FLAGMASK); return 0; } case PPGETFLAGS: { int uflags; uflags = pp->flags & PP_FLAGMASK; if (copy_to_user (argp, &uflags, sizeof (uflags))) { return -EFAULT; } return 0; } } /* end switch() */ /* Everything else requires the port to be claimed, so check * that now. */ if ((pp->flags & PP_CLAIMED) == 0) { pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } port = pp->pdev->port; switch (cmd) { struct ieee1284_info *info; unsigned char reg; unsigned char mask; int mode; int ret; struct timeval par_timeout; long to_jiffies; case PPRSTATUS: reg = parport_read_status (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPRDATA: reg = parport_read_data (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPRCONTROL: reg = parport_read_control (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPYIELD: parport_yield_blocking (pp->pdev); return 0; case PPRELEASE: /* Save the state machine's state. */ info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release (pp->pdev); pp->flags &= ~PP_CLAIMED; return 0; case PPWCONTROL: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; parport_write_control (port, reg); return 0; case PPWDATA: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; parport_write_data (port, reg); return 0; case PPFCONTROL: if (copy_from_user (&mask, argp, sizeof (mask))) return -EFAULT; if (copy_from_user (&reg, 1 + (unsigned char __user *) arg, sizeof (reg))) return -EFAULT; parport_frob_control (port, mask, reg); return 0; case PPDATADIR: if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; if (mode) port->ops->data_reverse (port); else port->ops->data_forward (port); return 0; case PPNEGOT: if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; switch ((ret = parport_negotiate (port, mode))) { case 0: break; case -1: /* handshake failed, peripheral not IEEE 1284 */ ret = -EIO; break; case 1: /* handshake succeeded, peripheral rejected mode */ ret = -ENXIO; break; } pp_enable_irq (pp); return ret; case PPWCTLONIRQ: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; /* Remember what to set the control lines to, for next * time we get an interrupt. */ pp->irqctl = reg; pp->irqresponse = 1; return 0; case PPCLRIRQ: ret = atomic_read (&pp->irqc); if (copy_to_user (argp, &ret, sizeof (ret))) return -EFAULT; atomic_sub (ret, &pp->irqc); return 0; case PPSETTIME: if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) { return -EFAULT; } /* Convert to jiffies, place in pp->pdev->timeout */ if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) { return -EINVAL; } to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ); to_jiffies += par_timeout.tv_sec * (long)HZ; if (to_jiffies <= 0) { return -EINVAL; } pp->pdev->timeout = to_jiffies; return 0; case PPGETTIME: to_jiffies = pp->pdev->timeout; memset(&par_timeout, 0, sizeof(par_timeout)); par_timeout.tv_sec = to_jiffies / HZ; par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ); if (copy_to_user (argp, &par_timeout, sizeof(struct timeval))) return -EFAULT; return 0; default: pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd); return -EINVAL; } /* Keep the compiler happy */ return 0; } static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&pp_do_mutex); ret = pp_do_ioctl(file, cmd, arg); mutex_unlock(&pp_do_mutex); return ret; } static int pp_open (struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct pp_struct *pp; if (minor >= PARPORT_MAX) return -ENXIO; pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL); if (!pp) return -ENOMEM; pp->state.mode = IEEE1284_MODE_COMPAT; pp->state.phase = init_phase (pp->state.mode); pp->flags = 0; pp->irqresponse = 0; atomic_set (&pp->irqc, 0); init_waitqueue_head (&pp->irq_wait); /* Defer the actual device registration until the first claim. * That way, we know whether or not the driver wants to have * exclusive access to the port (PPEXCL). */ pp->pdev = NULL; file->private_data = pp; return 0; } static int pp_release (struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct pp_struct *pp = file->private_data; int compat_negot; compat_negot = 0; if (!(pp->flags & PP_CLAIMED) && pp->pdev && (pp->state.mode != IEEE1284_MODE_COMPAT)) { struct ieee1284_info *info; /* parport released, but not in compatibility mode */ parport_claim_or_block (pp->pdev); pp->flags |= PP_CLAIMED; info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; compat_negot = 1; } else if ((pp->flags & PP_CLAIMED) && pp->pdev && (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT)) { compat_negot = 2; } if (compat_negot) { parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT); pr_debug(CHRDEV "%x: negotiated back to compatibility " "mode because user-space forgot\n", minor); } if (pp->flags & PP_CLAIMED) { struct ieee1284_info *info; info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release (pp->pdev); if (compat_negot != 1) { pr_debug(CHRDEV "%x: released pardevice " "because user-space forgot\n", minor); } } if (pp->pdev) { const char *name = pp->pdev->name; parport_unregister_device (pp->pdev); kfree (name); pp->pdev = NULL; pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } kfree (pp); return 0; } /* No kernel lock held - fine */ static unsigned int pp_poll (struct file * file, poll_table * wait) { struct pp_struct *pp = file->private_data; unsigned int mask = 0; poll_wait (file, &pp->irq_wait, wait); if (atomic_read (&pp->irqc)) mask |= POLLIN | POLLRDNORM; return mask; } static struct class *ppdev_class; static const struct file_operations pp_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = pp_read, .write = pp_write, .poll = pp_poll, .unlocked_ioctl = pp_ioctl, .open = pp_open, .release = pp_release, }; static void pp_attach(struct parport *port) { device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number), NULL, "parport%d", port->number); } static void pp_detach(struct parport *port) { device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); } static struct parport_driver pp_driver = { .name = CHRDEV, .attach = pp_attach, .detach = pp_detach, }; static int __init ppdev_init (void) { int err = 0; if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) { printk (KERN_WARNING CHRDEV ": unable to get major %d\n", PP_MAJOR); return -EIO; } ppdev_class = class_create(THIS_MODULE, CHRDEV); if (IS_ERR(ppdev_class)) { err = PTR_ERR(ppdev_class); goto out_chrdev; } if (parport_register_driver(&pp_driver)) { printk (KERN_WARNING CHRDEV ": unable to register with parport\n"); goto out_class; } printk (KERN_INFO PP_VERSION "\n"); goto out; out_class: class_destroy(ppdev_class); out_chrdev: unregister_chrdev(PP_MAJOR, CHRDEV); out: return err; } static void __exit ppdev_cleanup (void) { /* Clean up all parport stuff */ parport_unregister_driver(&pp_driver); class_destroy(ppdev_class); unregister_chrdev (PP_MAJOR, CHRDEV); } module_init(ppdev_init); module_exit(ppdev_cleanup); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR);
gpl-2.0
Cl3Kener/UBER-L
drivers/video/aty/radeon_backlight.c
11013
6262
/* * Backlight code for ATI Radeon based graphic cards * * Copyright (c) 2000 Ani Joshi <ajoshi@kernel.crashing.org> * Copyright (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org> * Copyright (c) 2006 Michael Hanselmann <linux-kernel@hansmi.ch> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "radeonfb.h" #include <linux/backlight.h> #include <linux/slab.h> #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif #define MAX_RADEON_LEVEL 0xFF struct radeon_bl_privdata { struct radeonfb_info *rinfo; uint8_t negative; }; static int radeon_bl_get_level_brightness(struct radeon_bl_privdata *pdata, int level) { int rlevel; /* Get and convert the value */ /* No locking of bl_curve since we read a single value */ rlevel = pdata->rinfo->info->bl_curve[level] * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL; if (rlevel < 0) rlevel = 0; else if (rlevel > MAX_RADEON_LEVEL) rlevel = MAX_RADEON_LEVEL; if (pdata->negative) rlevel = MAX_RADEON_LEVEL - rlevel; return rlevel; } static int radeon_bl_update_status(struct backlight_device *bd) { struct radeon_bl_privdata *pdata = bl_get_data(bd); struct radeonfb_info *rinfo = pdata->rinfo; u32 lvds_gen_cntl, tmpPixclksCntl; int level; if (rinfo->mon1_type != MT_LCD) return 0; /* We turn off the LCD completely instead of just dimming the * backlight. This provides some greater power saving and the display * is useless without backlight anyway. */ if (bd->props.power != FB_BLANK_UNBLANK || bd->props.fb_blank != FB_BLANK_UNBLANK) level = 0; else level = bd->props.brightness; del_timer_sync(&rinfo->lvds_timer); radeon_engine_idle(); lvds_gen_cntl = INREG(LVDS_GEN_CNTL); if (level > 0) { lvds_gen_cntl &= ~LVDS_DISPLAY_DIS; if (!(lvds_gen_cntl & LVDS_BLON) || !(lvds_gen_cntl & LVDS_ON)) { lvds_gen_cntl |= (rinfo->init_state.lvds_gen_cntl & LVDS_DIGON); lvds_gen_cntl |= LVDS_BLON | LVDS_EN; OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl); lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK; lvds_gen_cntl |= (radeon_bl_get_level_brightness(pdata, level) << LVDS_BL_MOD_LEVEL_SHIFT); lvds_gen_cntl |= LVDS_ON; lvds_gen_cntl |= (rinfo->init_state.lvds_gen_cntl & LVDS_BL_MOD_EN); rinfo->pending_lvds_gen_cntl = lvds_gen_cntl; mod_timer(&rinfo->lvds_timer, jiffies + msecs_to_jiffies(rinfo->panel_info.pwr_delay)); } else { lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK; lvds_gen_cntl |= (radeon_bl_get_level_brightness(pdata, level) << LVDS_BL_MOD_LEVEL_SHIFT); OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl); } rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK; rinfo->init_state.lvds_gen_cntl |= rinfo->pending_lvds_gen_cntl & LVDS_STATE_MASK; } else { /* Asic bug, when turning off LVDS_ON, we have to make sure RADEON_PIXCLK_LVDS_ALWAYS_ON bit is off */ tmpPixclksCntl = INPLL(PIXCLKS_CNTL); if (rinfo->is_mobility || rinfo->is_IGP) OUTPLLP(PIXCLKS_CNTL, 0, ~PIXCLK_LVDS_ALWAYS_ONb); lvds_gen_cntl &= ~(LVDS_BL_MOD_LEVEL_MASK | LVDS_BL_MOD_EN); lvds_gen_cntl |= (radeon_bl_get_level_brightness(pdata, 0) << LVDS_BL_MOD_LEVEL_SHIFT); lvds_gen_cntl |= LVDS_DISPLAY_DIS; OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl); udelay(100); lvds_gen_cntl &= ~(LVDS_ON | LVDS_EN); OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl); lvds_gen_cntl &= ~(LVDS_DIGON); rinfo->pending_lvds_gen_cntl = lvds_gen_cntl; mod_timer(&rinfo->lvds_timer, jiffies + msecs_to_jiffies(rinfo->panel_info.pwr_delay)); if (rinfo->is_mobility || rinfo->is_IGP) OUTPLL(PIXCLKS_CNTL, tmpPixclksCntl); } rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK; rinfo->init_state.lvds_gen_cntl |= (lvds_gen_cntl & LVDS_STATE_MASK); return 0; } static int radeon_bl_get_brightness(struct backlight_device *bd) { return bd->props.brightness; } static const struct backlight_ops radeon_bl_data = { .get_brightness = radeon_bl_get_brightness, .update_status = radeon_bl_update_status, }; void radeonfb_bl_init(struct radeonfb_info *rinfo) { struct backlight_properties props; struct backlight_device *bd; struct radeon_bl_privdata *pdata; char name[12]; if (rinfo->mon1_type != MT_LCD) return; #ifdef CONFIG_PMAC_BACKLIGHT if (!pmac_has_backlight_type("ati") && !pmac_has_backlight_type("mnca")) return; #endif pdata = kmalloc(sizeof(struct radeon_bl_privdata), GFP_KERNEL); if (!pdata) { printk("radeonfb: Memory allocation failed\n"); goto error; } snprintf(name, sizeof(name), "radeonbl%d", rinfo->info->node); memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = FB_BACKLIGHT_LEVELS - 1; bd = backlight_device_register(name, rinfo->info->dev, pdata, &radeon_bl_data, &props); if (IS_ERR(bd)) { rinfo->info->bl_dev = NULL; printk("radeonfb: Backlight registration failed\n"); goto error; } pdata->rinfo = rinfo; /* Pardon me for that hack... maybe some day we can figure out in what * direction backlight should work on a given panel? */ pdata->negative = (rinfo->family != CHIP_FAMILY_RV200 && rinfo->family != CHIP_FAMILY_RV250 && rinfo->family != CHIP_FAMILY_RV280 && rinfo->family != CHIP_FAMILY_RV350); #ifdef CONFIG_PMAC_BACKLIGHT pdata->negative = pdata->negative || of_machine_is_compatible("PowerBook4,3") || of_machine_is_compatible("PowerBook6,3") || of_machine_is_compatible("PowerBook6,5"); #endif rinfo->info->bl_dev = bd; fb_bl_default_curve(rinfo->info, 0, 63 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL, 217 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL); bd->props.brightness = bd->props.max_brightness; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); printk("radeonfb: Backlight initialized (%s)\n", name); return; error: kfree(pdata); return; } void radeonfb_bl_exit(struct radeonfb_info *rinfo) { struct backlight_device *bd = rinfo->info->bl_dev; if (bd) { struct radeon_bl_privdata *pdata; pdata = bl_get_data(bd); backlight_device_unregister(bd); kfree(pdata); rinfo->info->bl_dev = NULL; printk("radeonfb: Backlight unloaded\n"); } }
gpl-2.0
videoP/jaPRO
codeJK2/game/wp_emplaced_gun.cpp
6
1805
/* This file is part of OpenJK. OpenJK is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. OpenJK is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenJK. If not, see <http://www.gnu.org/licenses/>. */ // Copyright 2013 OpenJK #include "g_headers.h" #include "b_local.h" #include "g_local.h" #include "wp_saber.h" #include "w_local.h" #include "g_functions.h" // Emplaced Gun //--------------------------------------------------------- void WP_EmplacedFire( gentity_t *ent ) //--------------------------------------------------------- { float damage = weaponData[WP_EMPLACED_GUN].damage * ( ent->NPC ? 0.1f : 1.0f ); float vel = EMPLACED_VEL * ( ent->NPC ? 0.4f : 1.0f ); gentity_t *missile = CreateMissile( wpMuzzle, wpFwd, vel, 10000, ent ); missile->classname = "emplaced_proj"; missile->s.weapon = WP_EMPLACED_GUN; missile->damage = damage; missile->dflags = DAMAGE_DEATH_KNOCKBACK | DAMAGE_HEAVY_WEAP_CLASS; missile->methodOfDeath = MOD_EMPLACED; missile->clipmask = MASK_SHOT | CONTENTS_LIGHTSABER; // do some weird switchery on who the real owner is, we do this so the projectiles don't hit the gun object missile->owner = ent->owner; VectorSet( missile->maxs, EMPLACED_SIZE, EMPLACED_SIZE, EMPLACED_SIZE ); VectorScale( missile->maxs, -1, missile->mins ); // alternate wpMuzzles ent->fxID = !ent->fxID; }
gpl-2.0
Gangstere44/ASCYLIB_Test
src/bst-seq_external/test_simple.c
6
17468
/* * File: test_simple.c * Author: Vasileios Trigonakis <vasileios.trigonakis@epfl.ch> * Description: * test_simple.c is part of ASCYLIB * * Copyright (c) 2014 Vasileios Trigonakis <vasileios.trigonakis@epfl.ch>, * Tudor David <tudor.david@epfl.ch> * Distributed Programming Lab (LPD), EPFL * * ASCYLIB is free software: you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2 * of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <assert.h> #include <getopt.h> #include <limits.h> #include <pthread.h> #include <signal.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <time.h> #include <stdlib.h> #include <stdio.h> #include <errno.h> #include <string.h> #include <sched.h> #include <inttypes.h> #include <sys/time.h> #include <unistd.h> #include <malloc.h> #include "utils.h" #include "atomic_ops.h" #include "rapl_read.h" #ifdef __sparc__ # include <sys/types.h> # include <sys/processor.h> # include <sys/procset.h> #endif #include "intset.h" /* ################################################################### * * Definition of macros: per data structure * ################################################################### */ #define DS_CONTAINS(s,k,t) set_contains(s, k) #define DS_ADD(s,k,t) set_add(s, k, k) #define DS_REMOVE(s,k,t) set_remove(s, k) #define DS_SIZE(s) set_size(s) #define DS_NEW() set_new() #define DS_TYPE intset_t #define DS_NODE node_t /* ################################################################### * * GLOBALS * ################################################################### */ size_t initial = DEFAULT_INITIAL; size_t range = DEFAULT_RANGE; size_t load_factor; size_t update = DEFAULT_UPDATE; size_t num_threads = DEFAULT_NB_THREADS; size_t duration = DEFAULT_DURATION; int test_verbose = 0; size_t print_vals_num = 100; size_t pf_vals_num = 1023; size_t put, put_explicit = false; double update_rate, put_rate, get_rate; size_t size_after = 0; int seed = 0; __thread unsigned long * seeds; uint32_t rand_max; #define rand_min 1 static volatile int stop; TEST_VARS_GLOBAL; volatile ticks *putting_succ; volatile ticks *putting_fail; volatile ticks *getting_succ; volatile ticks *getting_fail; volatile ticks *removing_succ; volatile ticks *removing_fail; volatile ticks *putting_count; volatile ticks *putting_count_succ; volatile ticks *getting_count; volatile ticks *getting_count_succ; volatile ticks *removing_count; volatile ticks *removing_count_succ; volatile ticks *total; /* ################################################################### * * LOCALS * ################################################################### */ #ifdef DEBUG extern __thread uint32_t put_num_restarts; extern __thread uint32_t put_num_failed_expand; extern __thread uint32_t put_num_failed_on_new; #endif barrier_t barrier, barrier_global; typedef struct thread_data { uint32_t id; DS_TYPE* set; } thread_data_t; void* test(void* thread) { thread_data_t* td = (thread_data_t*) thread; uint32_t ID = td->id; set_cpu(ID); ssalloc_init(); DS_TYPE* set = td->set; THREAD_INIT(ID); PF_INIT(3, SSPFD_NUM_ENTRIES, ID); #if defined(COMPUTE_LATENCY) volatile ticks my_putting_succ = 0; volatile ticks my_putting_fail = 0; volatile ticks my_getting_succ = 0; volatile ticks my_getting_fail = 0; volatile ticks my_removing_succ = 0; volatile ticks my_removing_fail = 0; #endif uint64_t my_putting_count = 0; uint64_t my_getting_count = 0; uint64_t my_removing_count = 0; uint64_t my_putting_count_succ = 0; uint64_t my_getting_count_succ = 0; uint64_t my_removing_count_succ = 0; #if defined(COMPUTE_LATENCY) && PFD_TYPE == 0 volatile ticks start_acq, end_acq; volatile ticks correction = getticks_correction_calc(); #endif seeds = seed_rand(); #if GC == 1 alloc = (ssmem_allocator_t*) malloc(sizeof(ssmem_allocator_t)); assert(alloc != NULL); ssmem_alloc_init_fs_size(alloc, SSMEM_DEFAULT_MEM_SIZE, SSMEM_GC_FREE_SET_SIZE, ID); #endif RR_INIT(phys_id); barrier_cross(&barrier); uint64_t key; int c = 0; uint32_t scale_rem = (uint32_t) (update_rate * UINT_MAX); uint32_t scale_put = (uint32_t) (put_rate * UINT_MAX); int i; uint32_t num_elems_thread = (uint32_t) (initial / num_threads); int32_t missing = (uint32_t) initial - (num_elems_thread * num_threads); if (ID < missing) { num_elems_thread++; } #if INITIALIZE_FROM_ONE == 1 num_elems_thread = (ID == 0) * initial; key = range; #endif for(i = 0; i < num_elems_thread; i++) { key = (my_random(&(seeds[0]), &(seeds[1]), &(seeds[2])) % (rand_max + 1)) + rand_min; if(DS_ADD(set, key, NULL) == false) { i--; } } MEM_BARRIER; barrier_cross(&barrier); if (!ID) { printf("#BEFORE size is: %zu\n", (size_t) DS_SIZE(set)); } barrier_cross(&barrier_global); RR_START_SIMPLE(); while (stop == 0) { TEST_LOOP(NULL); } barrier_cross(&barrier); RR_STOP_SIMPLE(); if (!ID) { size_after = DS_SIZE(set); printf("#AFTER size is: %zu\n", size_after); } barrier_cross(&barrier); #if defined(COMPUTE_LATENCY) putting_succ[ID] += my_putting_succ; putting_fail[ID] += my_putting_fail; getting_succ[ID] += my_getting_succ; getting_fail[ID] += my_getting_fail; removing_succ[ID] += my_removing_succ; removing_fail[ID] += my_removing_fail; #endif putting_count[ID] += my_putting_count; getting_count[ID] += my_getting_count; removing_count[ID]+= my_removing_count; putting_count_succ[ID] += my_putting_count_succ; getting_count_succ[ID] += my_getting_count_succ; removing_count_succ[ID]+= my_removing_count_succ; EXEC_IN_DEC_ID_ORDER(ID, num_threads) { print_latency_stats(ID, SSPFD_NUM_ENTRIES, print_vals_num); } EXEC_IN_DEC_ID_ORDER_END(&barrier); SSPFDTERM(); #if GC == 1 ssmem_term(); free(alloc); #endif THREAD_END(); pthread_exit(NULL); } int main(int argc, char **argv) { set_cpu(0); ssalloc_init(); seeds = seed_rand(); struct option long_options[] = { // These options don't set a flag {"help", no_argument, NULL, 'h'}, {"verbose", no_argument, NULL, 'e'}, {"duration", required_argument, NULL, 'd'}, {"initial-size", required_argument, NULL, 'i'}, {"num-threads", required_argument, NULL, 'n'}, {"range", required_argument, NULL, 'r'}, {"update-rate", required_argument, NULL, 'u'}, {"num-buckets", required_argument, NULL, 'b'}, {"print-vals", required_argument, NULL, 'v'}, {"vals-pf", required_argument, NULL, 'f'}, {NULL, 0, NULL, 0} }; int i, c; while(1) { i = 0; c = getopt_long(argc, argv, "hAf:d:i:n:r:s:u:m:el:p:b:v:f:x:", long_options, &i); if(c == -1) break; if(c == 0 && long_options[i].flag == 0) c = long_options[i].val; switch(c) { case 0: /* Flag is automatically set */ break; case 'h': printf("ASCYLIB -- stress test " "\n" "\n" "Usage:\n" " %s [options...]\n" "\n" "Options:\n" " -h, --help\n" " Print this message\n" " -e, --verbose\n" " Be verbose\n" " -d, --duration <int>\n" " Test duration in milliseconds\n" " -i, --initial-size <int>\n" " Number of elements to insert before test\n" " -n, --num-threads <int>\n" " Number of threads\n" " -r, --range <int>\n" " Range of integer values inserted in set\n" " -u, --update-rate <int>\n" " Percentage of update transactions\n" " -p, --put-rate <int>\n" " Percentage of put update transactions (should be less than percentage of updates)\n" " -b, --num-buckets <int>\n" " Number of initial buckets (stronger than -l)\n" " -v, --print-vals <int>\n" " When using detailed profiling, how many values to print.\n" " -f, --val-pf <int>\n" " When using detailed profiling, how many values to keep track of.\n" " -x, --lock-based algorithm (default=1)\n" " Use lock-based algorithm\n" " 1 = lock-coupling,\n" " 2 = lazy algorithm\n" " 3 = Pugh's lazy algorithm\n" , argv[0]); exit(0); case 'd': duration = atoi(optarg); break; case 'e': test_verbose = 1; break; case 'i': initial = atoi(optarg); break; case 'n': num_threads = atoi(optarg); break; case 'r': range = atol(optarg); break; case 'u': update = atoi(optarg); break; case 'p': put_explicit = 1; put = atoi(optarg); break; case 'l': load_factor = atoi(optarg); break; case 'v': print_vals_num = atoi(optarg); break; case 'f': pf_vals_num = pow2roundup(atoi(optarg)) - 1; break; case '?': default: printf("Use -h or --help for help\n"); exit(1); } } if (!is_power_of_two(initial)) { size_t initial_pow2 = pow2roundup(initial); printf("** rounding up initial (to make it power of 2): old: %zu / new: %zu\n", initial, initial_pow2); initial = initial_pow2; } if (range < initial) { range = 2 * initial; } printf("## Initial: %zu / Range: %zu / ", initial, range); printf("Sequential \n"); double kb = initial * sizeof(DS_NODE) / 1024.0; double mb = kb / 1024.0; printf("Sizeof initial: %.2f KB = %.2f MB\n", kb, mb); if (!is_power_of_two(range)) { size_t range_pow2 = pow2roundup(range); printf("** rounding up range (to make it power of 2): old: %zu / new: %zu\n", range, range_pow2); range = range_pow2; } if (put > update) { put = update; } update_rate = update / 100.0; if (put_explicit) { put_rate = put / 100.0; } else { put_rate = update_rate / 2; } get_rate = 1 - update_rate; /* printf("num_threads = %u\n", num_threads); */ /* printf("cap: = %u\n", num_buckets); */ /* printf("num elem = %u\n", num_elements); */ /* printf("filing rate= %f\n", filling_rate); */ /* printf("update = %f (putting = %f)\n", update_rate, put_rate); */ rand_max = range - 1; struct timeval start, end; struct timespec timeout; timeout.tv_sec = duration / 1000; timeout.tv_nsec = (duration % 1000) * 1000000; stop = 0; DS_TYPE* set = DS_NEW(); assert(set != NULL); /* Initializes the local data */ putting_succ = (ticks *) calloc(num_threads , sizeof(ticks)); putting_fail = (ticks *) calloc(num_threads , sizeof(ticks)); getting_succ = (ticks *) calloc(num_threads , sizeof(ticks)); getting_fail = (ticks *) calloc(num_threads , sizeof(ticks)); removing_succ = (ticks *) calloc(num_threads , sizeof(ticks)); removing_fail = (ticks *) calloc(num_threads , sizeof(ticks)); putting_count = (ticks *) calloc(num_threads , sizeof(ticks)); putting_count_succ = (ticks *) calloc(num_threads , sizeof(ticks)); getting_count = (ticks *) calloc(num_threads , sizeof(ticks)); getting_count_succ = (ticks *) calloc(num_threads , sizeof(ticks)); removing_count = (ticks *) calloc(num_threads , sizeof(ticks)); removing_count_succ = (ticks *) calloc(num_threads , sizeof(ticks)); pthread_t threads[num_threads]; pthread_attr_t attr; int rc; void *status; barrier_init(&barrier_global, num_threads + 1); barrier_init(&barrier, num_threads); /* Initialize and set thread detached attribute */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); thread_data_t* tds = (thread_data_t*) malloc(num_threads * sizeof(thread_data_t)); long t; for(t = 0; t < num_threads; t++) { tds[t].id = t; tds[t].set = set; rc = pthread_create(&threads[t], &attr, test, tds + t); if (rc) { printf("ERROR; return code from pthread_create() is %d\n", rc); exit(-1); } } /* Free attribute and wait for the other threads */ pthread_attr_destroy(&attr); barrier_cross(&barrier_global); gettimeofday(&start, NULL); nanosleep(&timeout, NULL); stop = 1; gettimeofday(&end, NULL); duration = (end.tv_sec * 1000 + end.tv_usec / 1000) - (start.tv_sec * 1000 + start.tv_usec / 1000); for(t = 0; t < num_threads; t++) { rc = pthread_join(threads[t], &status); if (rc) { printf("ERROR; return code from pthread_join() is %d\n", rc); exit(-1); } } free(tds); volatile ticks putting_suc_total = 0; volatile ticks putting_fal_total = 0; volatile ticks getting_suc_total = 0; volatile ticks getting_fal_total = 0; volatile ticks removing_suc_total = 0; volatile ticks removing_fal_total = 0; volatile uint64_t putting_count_total = 0; volatile uint64_t putting_count_total_succ = 0; volatile uint64_t getting_count_total = 0; volatile uint64_t getting_count_total_succ = 0; volatile uint64_t removing_count_total = 0; volatile uint64_t removing_count_total_succ = 0; for(t=0; t < num_threads; t++) { if (test_verbose) { printf("Thrd: %3lu : srch: %10zu (%10zu) / insr: %10zu (%10zu) / rems: %10zu (%10zu)\n", t, getting_count[t], getting_count_succ[t], putting_count[t], putting_count_succ[t], removing_count[t], removing_count_succ[t]); } PRINT_OPS_PER_THREAD(); putting_suc_total += putting_succ[t]; putting_fal_total += putting_fail[t]; getting_suc_total += getting_succ[t]; getting_fal_total += getting_fail[t]; removing_suc_total += removing_succ[t]; removing_fal_total += removing_fail[t]; putting_count_total += putting_count[t]; putting_count_total_succ += putting_count_succ[t]; getting_count_total += getting_count[t]; getting_count_total_succ += getting_count_succ[t]; removing_count_total += removing_count[t]; removing_count_total_succ += removing_count_succ[t]; } #if defined(COMPUTE_LATENCY) printf("#thread srch_suc srch_fal insr_suc insr_fal remv_suc remv_fal ## latency (in cycles) \n"); fflush(stdout); long unsigned get_suc = (getting_count_total_succ) ? getting_suc_total / getting_count_total_succ : 0; long unsigned get_fal = (getting_count_total - getting_count_total_succ) ? getting_fal_total / (getting_count_total - getting_count_total_succ) : 0; long unsigned put_suc = putting_count_total_succ ? putting_suc_total / putting_count_total_succ : 0; long unsigned put_fal = (putting_count_total - putting_count_total_succ) ? putting_fal_total / (putting_count_total - putting_count_total_succ) : 0; long unsigned rem_suc = removing_count_total_succ ? removing_suc_total / removing_count_total_succ : 0; long unsigned rem_fal = (removing_count_total - removing_count_total_succ) ? removing_fal_total / (removing_count_total - removing_count_total_succ) : 0; printf("%-7zu %-8lu %-8lu %-8lu %-8lu %-8lu %-8lu\n", num_threads, get_suc, get_fal, put_suc, put_fal, rem_suc, rem_fal); #endif #define LLU long long unsigned int int UNUSED pr = (int) (putting_count_total_succ - removing_count_total_succ); if (size_after != (initial + pr)) { printf("// WRONG size. %zu + %d != %zu\n", initial, pr, size_after); } printf(" : %-10s | %-10s | %-11s | %-11s | %s\n", "total", "success", "succ %", "total %", "effective %"); uint64_t total = putting_count_total + getting_count_total + removing_count_total; double putting_perc = 100.0 * (1 - ((double)(total - putting_count_total) / total)); double putting_perc_succ = (1 - (double) (putting_count_total - putting_count_total_succ) / putting_count_total) * 100; double getting_perc = 100.0 * (1 - ((double)(total - getting_count_total) / total)); double getting_perc_succ = (1 - (double) (getting_count_total - getting_count_total_succ) / getting_count_total) * 100; double removing_perc = 100.0 * (1 - ((double)(total - removing_count_total) / total)); double removing_perc_succ = (1 - (double) (removing_count_total - removing_count_total_succ) / removing_count_total) * 100; printf("srch: %-10llu | %-10llu | %10.1f%% | %10.1f%% | \n", (LLU) getting_count_total, (LLU) getting_count_total_succ, getting_perc_succ, getting_perc); printf("insr: %-10llu | %-10llu | %10.1f%% | %10.1f%% | %10.1f%%\n", (LLU) putting_count_total, (LLU) putting_count_total_succ, putting_perc_succ, putting_perc, (putting_perc * putting_perc_succ) / 100); printf("rems: %-10llu | %-10llu | %10.1f%% | %10.1f%% | %10.1f%%\n", (LLU) removing_count_total, (LLU) removing_count_total_succ, removing_perc_succ, removing_perc, (removing_perc * removing_perc_succ) / 100); double throughput = (putting_count_total + getting_count_total + removing_count_total) * 1000.0 / duration; printf("#txs %zu\t(%-10.0f\n", num_threads, throughput); printf("#Mops %.3f\n", throughput / 1e6); RR_PRINT_UNPROTECTED(RAPL_PRINT_POW); RR_PRINT_CORRECTED(); pthread_exit(NULL); return 0; }
gpl-2.0
Skiles/aseprite
third_party/freetype/src/pshinter/pshmod.c
6
3530
/***************************************************************************/ /* */ /* pshmod.c */ /* */ /* FreeType PostScript hinter module implementation (body). */ /* */ /* Copyright 2001 by */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ #include <ft2build.h> #include FT_INTERNAL_OBJECTS_H #include "pshrec.h" #include "pshalgo.h" /* the Postscript Hinter module structure */ typedef struct PS_Hinter_Module_Rec_ { FT_ModuleRec root; PS_HintsRec ps_hints; PSH_Globals_FuncsRec globals_funcs; T1_Hints_FuncsRec t1_funcs; T2_Hints_FuncsRec t2_funcs; } PS_Hinter_ModuleRec, *PS_Hinter_Module; /* finalize module */ FT_CALLBACK_DEF( void ) ps_hinter_done( PS_Hinter_Module module ) { module->t1_funcs.hints = NULL; module->t2_funcs.hints = NULL; ps_hints_done( &module->ps_hints ); } /* initialize module, create hints recorder and the interface */ FT_CALLBACK_DEF( FT_Error ) ps_hinter_init( PS_Hinter_Module module ) { FT_Memory memory = module->root.memory; ps_hints_init( &module->ps_hints, memory ); psh_globals_funcs_init( &module->globals_funcs ); t1_hints_funcs_init( &module->t1_funcs ); module->t1_funcs.hints = (T1_Hints)&module->ps_hints; t2_hints_funcs_init( &module->t2_funcs ); module->t2_funcs.hints = (T2_Hints)&module->ps_hints; return 0; } /* returns global hints interface */ FT_CALLBACK_DEF( PSH_Globals_Funcs ) pshinter_get_globals_funcs( FT_Module module ) { return &((PS_Hinter_Module)module)->globals_funcs; } /* return Type 1 hints interface */ FT_CALLBACK_DEF( T1_Hints_Funcs ) pshinter_get_t1_funcs( FT_Module module ) { return &((PS_Hinter_Module)module)->t1_funcs; } /* return Type 2 hints interface */ FT_CALLBACK_DEF( T2_Hints_Funcs ) pshinter_get_t2_funcs( FT_Module module ) { return &((PS_Hinter_Module)module)->t2_funcs; } FT_CALLBACK_DEF( PSHinter_Interface ) pshinter_interface = { pshinter_get_globals_funcs, pshinter_get_t1_funcs, pshinter_get_t2_funcs }; FT_CALLBACK_TABLE_DEF const FT_Module_Class pshinter_module_class = { 0, sizeof ( PS_Hinter_ModuleRec ), "pshinter", 0x10000L, 0x20000L, &pshinter_interface, /* module-specific interface */ (FT_Module_Constructor)ps_hinter_init, (FT_Module_Destructor) ps_hinter_done, (FT_Module_Requester) 0 /* no additional interface for now */ }; /* END */
gpl-2.0
xoox/linux-2.6.18_pro500
drivers/scsi/lpfc/lpfc_scsi.c
6
37339
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2006 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> #include "lpfc_version.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #define LPFC_RESET_WAIT 2 #define LPFC_ABORT_WAIT 2 /* * This routine allocates a scsi buffer, which contains all the necessary * information needed to initiate a SCSI I/O. The non-DMAable buffer region * contains information to build the IOCB. The DMAable region contains * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL * and the BPL BDE is setup in the IOCB. */ static struct lpfc_scsi_buf * lpfc_new_scsi_buf(struct lpfc_hba * phba) { struct lpfc_scsi_buf *psb; struct ulp_bde64 *bpl; IOCB_t *iocb; dma_addr_t pdma_phys; uint16_t iotag; psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); if (!psb) return NULL; memset(psb, 0, sizeof (struct lpfc_scsi_buf)); psb->scsi_hba = phba; /* * Get memory from the pci pool to map the virt space to pci bus space * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, * struct fcp_rsp and the number of bde's necessary to support the * sg_tablesize. */ psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); if (!psb->data) { kfree(psb); return NULL; } /* Initialize virtual ptrs to dma_buf region. */ memset(psb->data, 0, phba->cfg_sg_dma_buf_size); /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data, psb->dma_handle); kfree (psb); return NULL; } psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; psb->fcp_cmnd = psb->data; psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp); /* Initialize local short-hand pointers. */ bpl = psb->fcp_bpl; pdma_phys = psb->dma_handle; /* * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg * list bdes. Initialize the first two and leave the rest for * queuecommand. */ bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd); bpl->tus.f.bdeFlags = BUFF_USE_CMND; bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl++; /* Setup the physical region for the FCP RSP */ pdma_phys += sizeof (struct fcp_cmnd); bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); bpl->tus.f.bdeSize = sizeof (struct fcp_rsp); bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); bpl->tus.w = le32_to_cpu(bpl->tus.w); /* * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, * initialize it with all known data now. */ pdma_phys += (sizeof (struct fcp_rsp)); iocb = &psb->cur_iocbq.iocb; iocb->un.fcpi64.bdl.ulpIoTag32 = 0; iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys); iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys); iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; iocb->ulpBdeCount = 1; iocb->ulpClass = CLASS3; return psb; } static struct lpfc_scsi_buf* lpfc_get_scsi_buf(struct lpfc_hba * phba) { struct lpfc_scsi_buf * lpfc_cmd = NULL; struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; unsigned long iflag = 0; spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); return lpfc_cmd; } static void lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) { unsigned long iflag = 0; spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); psb->pCmd = NULL; list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); } static int lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct scatterlist *sgel = NULL; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; dma_addr_t physaddr; uint32_t i, num_bde = 0; int datadir = scsi_cmnd->sc_data_direction; int dma_error; /* * There are three possibilities here - use scatter-gather segment, use * the single mapping, or neither. Start the lpfc command prep by * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first * data bde entry. */ bpl += 2; if (scsi_cmnd->use_sg) { /* * The driver stores the segment count returned from pci_map_sg * because this a count of dma-mappings used to map the use_sg * pages. They are not guaranteed to be the same for those * architectures that implement an IOMMU. */ sgel = (struct scatterlist *)scsi_cmnd->request_buffer; lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel, scsi_cmnd->use_sg, datadir); if (lpfc_cmd->seg_cnt == 0) return 1; if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { printk(KERN_ERR "%s: Too many sg segments from " "dma_map_sg. Config %d, seg_cnt %d", __FUNCTION__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); dma_unmap_sg(&phba->pcidev->dev, sgel, lpfc_cmd->seg_cnt, datadir); return 1; } /* * The driver established a maximum scatter-gather segment count * during probe that limits the number of sg elements in any * single scsi command. Just run through the seg_cnt and format * the bde's. */ for (i = 0; i < lpfc_cmd->seg_cnt; i++) { physaddr = sg_dma_address(sgel); bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); bpl->tus.f.bdeSize = sg_dma_len(sgel); if (datadir == DMA_TO_DEVICE) bpl->tus.f.bdeFlags = 0; else bpl->tus.f.bdeFlags = BUFF_USE_RCV; bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl++; sgel++; num_bde++; } } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { physaddr = dma_map_single(&phba->pcidev->dev, scsi_cmnd->request_buffer, scsi_cmnd->request_bufflen, datadir); dma_error = dma_mapping_error(physaddr); if (dma_error) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0718 Unable to dma_map_single " "request_buffer: x%x\n", phba->brd_no, dma_error); return 1; } lpfc_cmd->nonsg_phys = physaddr; bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen; if (datadir == DMA_TO_DEVICE) bpl->tus.f.bdeFlags = 0; else bpl->tus.f.bdeFlags = BUFF_USE_RCV; bpl->tus.w = le32_to_cpu(bpl->tus.w); num_bde = 1; bpl++; } /* * Finish initializing those IOCB fields that are dependent on the * scsi_cmnd request_buffer. Note that the bdeSize is explicitly * reinitialized since all iocb memory resources are used many times * for transmit, receive, and continuation bpl's. */ iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof (struct ulp_bde64)); iocb_cmd->ulpBdeCount = 1; iocb_cmd->ulpLe = 1; fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen); return 0; } static void lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) { /* * There are only two special cases to consider. (1) the scsi command * requested scatter-gather usage or (2) the scsi command allocated * a request buffer, but did not request use_sg. There is a third * case, but it does not require resource deallocation. */ if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, psb->seg_cnt, psb->pCmd->sc_data_direction); } else { if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, psb->pCmd->request_bufflen, psb->pCmd->sc_data_direction); } } } static void lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd) { struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; struct lpfc_hba *phba = lpfc_cmd->scsi_hba; uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm; uint32_t resp_info = fcprsp->rspStatus2; uint32_t scsi_status = fcprsp->rspStatus3; uint32_t host_status = DID_OK; uint32_t rsplen = 0; /* * If this is a task management command, there is no * scsi packet associated with this lpfc_cmd. The driver * consumes it. */ if (fcpcmd->fcpCntl2) { scsi_status = 0; goto out; } lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0730 FCP command failed: RSP " "Data: x%x x%x x%x x%x x%x x%x\n", phba->brd_no, resp_info, scsi_status, be32_to_cpu(fcprsp->rspResId), be32_to_cpu(fcprsp->rspSnsLen), be32_to_cpu(fcprsp->rspRspLen), fcprsp->rspInfo3); if (resp_info & RSP_LEN_VALID) { rsplen = be32_to_cpu(fcprsp->rspRspLen); if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { host_status = DID_ERROR; goto out; } } if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); if (snslen > SCSI_SENSE_BUFFERSIZE) snslen = SCSI_SENSE_BUFFERSIZE; memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); } cmnd->resid = 0; if (resp_info & RESID_UNDER) { cmnd->resid = be32_to_cpu(fcprsp->rspResId); lpfc_printf_log(phba, KERN_INFO, LOG_FCP, "%d:0716 FCP Read Underrun, expected %d, " "residual %d Data: x%x x%x x%x\n", phba->brd_no, be32_to_cpu(fcpcmd->fcpDl), cmnd->resid, fcpi_parm, cmnd->cmnd[0], cmnd->underflow); /* * The cmnd->underflow is the minimum number of bytes that must * be transfered for this command. Provided a sense condition * is not present, make sure the actual amount transferred is at * least the underflow value or fail. */ if (!(resp_info & SNS_LEN_VALID) && (scsi_status == SAM_STAT_GOOD) && (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { lpfc_printf_log(phba, KERN_INFO, LOG_FCP, "%d:0717 FCP command x%x residual " "underrun converted to error " "Data: x%x x%x x%x\n", phba->brd_no, cmnd->cmnd[0], cmnd->request_bufflen, cmnd->resid, cmnd->underflow); host_status = DID_ERROR; } } else if (resp_info & RESID_OVER) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0720 FCP command x%x residual " "overrun error. Data: x%x x%x \n", phba->brd_no, cmnd->cmnd[0], cmnd->request_bufflen, cmnd->resid); host_status = DID_ERROR; /* * Check SLI validation that all the transfer was actually done * (fcpi_parm should be zero). Apply check only to reads. */ } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0734 FCP Read Check Error Data: " "x%x x%x x%x x%x\n", phba->brd_no, be32_to_cpu(fcpcmd->fcpDl), be32_to_cpu(fcprsp->rspResId), fcpi_parm, cmnd->cmnd[0]); host_status = DID_ERROR; cmnd->resid = cmnd->request_bufflen; } out: cmnd->result = ScsiResult(host_status, scsi_status); } static void lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, struct lpfc_iocbq *pIocbOut) { struct lpfc_scsi_buf *lpfc_cmd = (struct lpfc_scsi_buf *) pIocbIn->context1; struct lpfc_rport_data *rdata = lpfc_cmd->rdata; struct lpfc_nodelist *pnode = rdata->pnode; struct scsi_cmnd *cmd = lpfc_cmd->pCmd; int result; struct scsi_device *sdev, *tmp_sdev; int depth = 0; lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; lpfc_cmd->status = pIocbOut->iocb.ulpStatus; if (lpfc_cmd->status) { if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && (lpfc_cmd->result & IOERR_DRVR_MASK)) lpfc_cmd->status = IOSTAT_DRIVER_REJECT; else if (lpfc_cmd->status >= IOSTAT_CNT) lpfc_cmd->status = IOSTAT_DEFAULT; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0729 FCP cmd x%x failed <%d/%d> status: " "x%x result: x%x Data: x%x x%x\n", phba->brd_no, cmd->cmnd[0], cmd->device->id, cmd->device->lun, lpfc_cmd->status, lpfc_cmd->result, pIocbOut->iocb.ulpContext, lpfc_cmd->cur_iocbq.iocb.ulpIoTag); switch (lpfc_cmd->status) { case IOSTAT_FCP_RSP_ERROR: /* Call FCP RSP handler to determine result */ lpfc_handle_fcp_err(lpfc_cmd); break; case IOSTAT_NPORT_BSY: case IOSTAT_FABRIC_BSY: cmd->result = ScsiResult(DID_BUS_BUSY, 0); break; default: cmd->result = ScsiResult(DID_ERROR, 0); break; } if ((pnode == NULL ) || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); } else { cmd->result = ScsiResult(DID_OK, 0); } if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { uint32_t *lp = (uint32_t *)cmd->sense_buffer; lpfc_printf_log(phba, KERN_INFO, LOG_FCP, "%d:0710 Iodone <%d/%d> cmd %p, error x%x " "SNS x%x x%x Data: x%x x%x\n", phba->brd_no, cmd->device->id, cmd->device->lun, cmd, cmd->result, *lp, *(lp + 3), cmd->retries, cmd->resid); } result = cmd->result; sdev = cmd->device; cmd->scsi_done(cmd); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); lpfc_release_scsi_buf(phba, lpfc_cmd); return; } if (!result && pnode != NULL && ((jiffies - pnode->last_ramp_up_time) > LPFC_Q_RAMP_UP_INTERVAL * HZ) && ((jiffies - pnode->last_q_full_time) > LPFC_Q_RAMP_UP_INTERVAL * HZ) && (phba->cfg_lun_queue_depth > sdev->queue_depth)) { shost_for_each_device(tmp_sdev, sdev->host) { if (phba->cfg_lun_queue_depth > tmp_sdev->queue_depth) { if (tmp_sdev->id != sdev->id) continue; if (tmp_sdev->ordered_tags) scsi_adjust_queue_depth(tmp_sdev, MSG_ORDERED_TAG, tmp_sdev->queue_depth+1); else scsi_adjust_queue_depth(tmp_sdev, MSG_SIMPLE_TAG, tmp_sdev->queue_depth+1); pnode->last_ramp_up_time = jiffies; } } } /* * Check for queue full. If the lun is reporting queue full, then * back off the lun queue depth to prevent target overloads. */ if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) { pnode->last_q_full_time = jiffies; shost_for_each_device(tmp_sdev, sdev->host) { if (tmp_sdev->id != sdev->id) continue; depth = scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1); } /* * The queue depth cannot be lowered any more. * Modify the returned error code to store * the final depth value set by * scsi_track_queue_full. */ if (depth == -1) depth = sdev->host->cmd_per_lun; if (depth) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0711 detected queue full - lun queue depth " " adjusted to %d.\n", phba->brd_no, depth); } } lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); lpfc_release_scsi_buf(phba, lpfc_cmd); } static void lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_nodelist *pnode) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); int datadir = scsi_cmnd->sc_data_direction; lpfc_cmd->fcp_rsp->rspSnsLen = 0; /* clear task management bits */ lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; int_to_scsilun(lpfc_cmd->pCmd->device->lun, &lpfc_cmd->fcp_cmnd->fcp_lun); memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); if (scsi_cmnd->device->tagged_supported) { switch (scsi_cmnd->tag) { case HEAD_OF_QUEUE_TAG: fcp_cmnd->fcpCntl1 = HEAD_OF_Q; break; case ORDERED_QUEUE_TAG: fcp_cmnd->fcpCntl1 = ORDERED_Q; break; default: fcp_cmnd->fcpCntl1 = SIMPLE_Q; break; } } else fcp_cmnd->fcpCntl1 = 0; /* * There are three possibilities here - use scatter-gather segment, use * the single mapping, or neither. Start the lpfc command prep by * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first * data bde entry. */ if (scsi_cmnd->use_sg) { if (datadir == DMA_TO_DEVICE) { iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; iocb_cmd->un.fcpi.fcpi_parm = 0; iocb_cmd->ulpPU = 0; fcp_cmnd->fcpCntl3 = WRITE_DATA; phba->fc4OutputRequests++; } else { iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; iocb_cmd->un.fcpi.fcpi_parm = scsi_cmnd->request_bufflen; fcp_cmnd->fcpCntl3 = READ_DATA; phba->fc4InputRequests++; } } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { if (datadir == DMA_TO_DEVICE) { iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; iocb_cmd->un.fcpi.fcpi_parm = 0; iocb_cmd->ulpPU = 0; fcp_cmnd->fcpCntl3 = WRITE_DATA; phba->fc4OutputRequests++; } else { iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; iocb_cmd->un.fcpi.fcpi_parm = scsi_cmnd->request_bufflen; fcp_cmnd->fcpCntl3 = READ_DATA; phba->fc4InputRequests++; } } else { iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; iocb_cmd->un.fcpi.fcpi_parm = 0; iocb_cmd->ulpPU = 0; fcp_cmnd->fcpCntl3 = 0; phba->fc4ControlRequests++; } /* * Finish initializing those IOCB fields that are independent * of the scsi_cmnd request_buffer */ piocbq->iocb.ulpContext = pnode->nlp_rpi; if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) piocbq->iocb.ulpFCP2Rcvy = 1; piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); piocbq->context1 = lpfc_cmd; piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; } static int lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, unsigned int lun, uint8_t task_mgmt_cmd) { struct lpfc_sli *psli; struct lpfc_iocbq *piocbq; IOCB_t *piocb; struct fcp_cmnd *fcp_cmnd; struct lpfc_rport_data *rdata = lpfc_cmd->rdata; struct lpfc_nodelist *ndlp = rdata->pnode; if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { return 0; } psli = &phba->sli; piocbq = &(lpfc_cmd->cur_iocbq); piocb = &piocbq->iocb; fcp_cmnd = lpfc_cmd->fcp_cmnd; int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun); fcp_cmnd->fcpCntl2 = task_mgmt_cmd; piocb->ulpCommand = CMD_FCP_ICMND64_CR; piocb->ulpContext = ndlp->nlp_rpi; if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { piocb->ulpFCP2Rcvy = 1; } piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); /* ulpTimeout is only one byte */ if (lpfc_cmd->timeout > 0xff) { /* * Do not timeout the command at the firmware level. * The driver will provide the timeout mechanism. */ piocb->ulpTimeout = 0; } else { piocb->ulpTimeout = lpfc_cmd->timeout; } return (1); } static int lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, unsigned tgt_id, unsigned int lun, struct lpfc_rport_data *rdata) { struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbqrsp; int ret; lpfc_cmd->rdata = rdata; ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun, FCP_TARGET_RESET); if (!ret) return FAILED; lpfc_cmd->scsi_hba = phba; iocbq = &lpfc_cmd->cur_iocbq; iocbqrsp = lpfc_sli_get_iocbq(phba); if (!iocbqrsp) return FAILED; /* Issue Target Reset to TGT <num> */ lpfc_printf_log(phba, KERN_INFO, LOG_FCP, "%d:0702 Issue Target Reset to TGT %d " "Data: x%x x%x\n", phba->brd_no, tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); ret = lpfc_sli_issue_iocb_wait(phba, &phba->sli.ring[phba->sli.fcp_ring], iocbq, iocbqrsp, lpfc_cmd->timeout); if (ret != IOCB_SUCCESS) { lpfc_cmd->status = IOSTAT_DRIVER_REJECT; ret = FAILED; } else { ret = SUCCESS; lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && (lpfc_cmd->result & IOERR_DRVR_MASK)) lpfc_cmd->status = IOSTAT_DRIVER_REJECT; } lpfc_sli_release_iocbq(phba, iocbqrsp); return ret; } const char * lpfc_info(struct Scsi_Host *host) { struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata; int len; static char lpfcinfobuf[384]; memset(lpfcinfobuf,0,384); if (phba && phba->pcidev){ strncpy(lpfcinfobuf, phba->ModelDesc, 256); len = strlen(lpfcinfobuf); snprintf(lpfcinfobuf + len, 384-len, " on PCI bus %02x device %02x irq %d", phba->pcidev->bus->number, phba->pcidev->devfn, phba->pcidev->irq); len = strlen(lpfcinfobuf); if (phba->Port[0]) { snprintf(lpfcinfobuf + len, 384-len, " port %s", phba->Port); } } return lpfcinfobuf; } static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) { unsigned long poll_tmo_expires = (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) mod_timer(&phba->fcp_poll_timer, poll_tmo_expires); } void lpfc_poll_start_timer(struct lpfc_hba * phba) { lpfc_poll_rearm_timer(phba); } void lpfc_poll_timeout(unsigned long ptr) { struct lpfc_hba *phba = (struct lpfc_hba *)ptr; unsigned long iflag; spin_lock_irqsave(phba->host->host_lock, iflag); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_poll_fcp_ring (phba); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } spin_unlock_irqrestore(phba->host->host_lock, iflag); } static int lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) { struct lpfc_hba *phba = (struct lpfc_hba *) cmnd->device->host->hostdata; struct lpfc_sli *psli = &phba->sli; struct lpfc_rport_data *rdata = cmnd->device->hostdata; struct lpfc_nodelist *ndlp = rdata->pnode; struct lpfc_scsi_buf *lpfc_cmd; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); int err; err = fc_remote_port_chkready(rport); if (err) { cmnd->result = err; goto out_fail_command; } /* * Catch race where our node has transitioned, but the * transport is still transitioning. */ if (!ndlp) { cmnd->result = ScsiResult(DID_BUS_BUSY, 0); goto out_fail_command; } lpfc_cmd = lpfc_get_scsi_buf (phba); if (lpfc_cmd == NULL) { lpfc_printf_log(phba, KERN_INFO, LOG_FCP, "%d:0707 driver's buffer pool is empty, " "IO busied\n", phba->brd_no); goto out_host_busy; } /* * Store the midlayer's command structure for the completion phase * and complete the command initialization. */ lpfc_cmd->pCmd = cmnd; lpfc_cmd->rdata = rdata; lpfc_cmd->timeout = 0; cmnd->host_scribble = (unsigned char *)lpfc_cmd; cmnd->scsi_done = done; err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); if (err) goto out_host_busy_free_buf; lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp); err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); if (err) goto out_host_busy_free_buf; if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_poll_fcp_ring(phba); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } return 0; out_host_busy_free_buf: lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); lpfc_release_scsi_buf(phba, lpfc_cmd); out_host_busy: return SCSI_MLQUEUE_HOST_BUSY; out_fail_command: done(cmnd); return 0; } static void lpfc_block_error_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); spin_lock_irq(shost->host_lock); while (rport->port_state == FC_PORTSTATE_BLOCKED) { spin_unlock_irq(shost->host_lock); msleep(1000); spin_lock_irq(shost->host_lock); } spin_unlock_irq(shost->host_lock); return; } static int lpfc_abort_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; struct lpfc_iocbq *iocb; struct lpfc_iocbq *abtsiocb; struct lpfc_scsi_buf *lpfc_cmd; IOCB_t *cmd, *icmd; unsigned int loop_count = 0; int ret = SUCCESS; lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; BUG_ON(!lpfc_cmd); /* * If pCmd field of the corresponding lpfc_scsi_buf structure * points to a different SCSI command, then the driver has * already completed this command, but the midlayer did not * see the completion before the eh fired. Just return * SUCCESS. */ iocb = &lpfc_cmd->cur_iocbq; if (lpfc_cmd->pCmd != cmnd) goto out; BUG_ON(iocb->context1 != lpfc_cmd); abtsiocb = lpfc_sli_get_iocbq(phba); if (abtsiocb == NULL) { ret = FAILED; goto out; } /* * The scsi command can not be in txq and it is in flight because the * pCmd is still pointig at the SCSI command we have to abort. There * is no need to search the txcmplq. Just send an abort to the FW. */ cmd = &iocb->iocb; icmd = &abtsiocb->iocb; icmd->un.acxri.abortType = ABORT_TYPE_ABTS; icmd->un.acxri.abortContextTag = cmd->ulpContext; icmd->un.acxri.abortIoTag = cmd->ulpIoTag; icmd->ulpLe = 1; icmd->ulpClass = cmd->ulpClass; if (phba->hba_state >= LPFC_LINK_UP) icmd->ulpCommand = CMD_ABORT_XRI_CN; else icmd->ulpCommand = CMD_CLOSE_XRI_CN; abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); ret = FAILED; goto out; } if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_sli_poll_fcp_ring (phba); /* Wait for abort to complete */ while (lpfc_cmd->pCmd == cmnd) { if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_sli_poll_fcp_ring (phba); spin_unlock_irq(phba->host->host_lock); schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ); spin_lock_irq(phba->host->host_lock); if (++loop_count > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT) break; } if (lpfc_cmd->pCmd == cmnd) { ret = FAILED; lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0748 abort handler timed out waiting for " "abort to complete: ret %#x, ID %d, LUN %d, " "snum %#lx\n", phba->brd_no, ret, cmnd->device->id, cmnd->device->lun, cmnd->serial_number); } out: lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0749 SCSI Layer I/O Abort Request " "Status x%x ID %d LUN %d snum %#lx\n", phba->brd_no, ret, cmnd->device->id, cmnd->device->lun, cmnd->serial_number); spin_unlock_irq(shost->host_lock); return ret; } static int lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; struct lpfc_scsi_buf *lpfc_cmd; struct lpfc_iocbq *iocbq, *iocbqrsp; struct lpfc_rport_data *rdata = cmnd->device->hostdata; struct lpfc_nodelist *pnode = rdata->pnode; uint32_t cmd_result = 0, cmd_status = 0; int ret = FAILED; int cnt, loopcnt; lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); /* * If target is not in a MAPPED state, delay the reset until * target is rediscovered or nodev timeout expires. */ while ( 1 ) { if (!pnode) break; if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { spin_unlock_irq(phba->host->host_lock); schedule_timeout_uninterruptible(msecs_to_jiffies(500)); spin_lock_irq(phba->host->host_lock); } if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE)) break; } lpfc_cmd = lpfc_get_scsi_buf (phba); if (lpfc_cmd == NULL) goto out; lpfc_cmd->timeout = 60; lpfc_cmd->scsi_hba = phba; lpfc_cmd->rdata = rdata; ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun, FCP_LUN_RESET); if (!ret) goto out_free_scsi_buf; iocbq = &lpfc_cmd->cur_iocbq; /* get a buffer for this IOCB command response */ iocbqrsp = lpfc_sli_get_iocbq(phba); if (iocbqrsp == NULL) goto out_free_scsi_buf; lpfc_printf_log(phba, KERN_INFO, LOG_FCP, "%d:0703 Issue LUN Reset to TGT %d LUN %d " "Data: x%x x%x\n", phba->brd_no, cmnd->device->id, cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); ret = lpfc_sli_issue_iocb_wait(phba, &phba->sli.ring[phba->sli.fcp_ring], iocbq, iocbqrsp, lpfc_cmd->timeout); if (ret == IOCB_SUCCESS) ret = SUCCESS; cmd_result = iocbqrsp->iocb.un.ulpWord[4]; cmd_status = iocbqrsp->iocb.ulpStatus; lpfc_sli_release_iocbq(phba, iocbqrsp); /* * All outstanding txcmplq I/Os should have been aborted by the device. * Unfortunately, some targets do not abide by this forcing the driver * to double check. */ cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], cmnd->device->id, cmnd->device->lun, LPFC_CTX_LUN); if (cnt) lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], cmnd->device->id, cmnd->device->lun, 0, LPFC_CTX_LUN); loopcnt = 0; while(cnt) { spin_unlock_irq(phba->host->host_lock); schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); spin_lock_irq(phba->host->host_lock); if (++loopcnt > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) break; cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], cmnd->device->id, cmnd->device->lun, LPFC_CTX_LUN); } if (cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0719 LUN Reset I/O flush failure: cnt x%x\n", phba->brd_no, cnt); ret = FAILED; } out_free_scsi_buf: lpfc_release_scsi_buf(phba, lpfc_cmd); lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0713 SCSI layer issued LUN reset (%d, %d) " "Data: x%x x%x x%x\n", phba->brd_no, cmnd->device->id,cmnd->device->lun, ret, cmd_status, cmd_result); out: spin_unlock_irq(shost->host_lock); return ret; } static int lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; struct lpfc_nodelist *ndlp = NULL; int match; int ret = FAILED, i, err_count = 0; int cnt, loopcnt; struct lpfc_scsi_buf * lpfc_cmd; lpfc_block_error_handler(cmnd); spin_lock_irq(shost->host_lock); lpfc_cmd = lpfc_get_scsi_buf(phba); if (lpfc_cmd == NULL) goto out; /* The lpfc_cmd storage is reused. Set all loop invariants. */ lpfc_cmd->timeout = 60; lpfc_cmd->scsi_hba = phba; /* * Since the driver manages a single bus device, reset all * targets known to the driver. Should any target reset * fail, this routine returns failure to the midlayer. */ for (i = 0; i < LPFC_MAX_TARGET; i++) { /* Search the mapped list for this target ID */ match = 0; list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { if ((i == ndlp->nlp_sid) && ndlp->rport) { match = 1; break; } } if (!match) continue; ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun, ndlp->rport->dd_data); if (ret != SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0700 Bus Reset on target %d failed\n", phba->brd_no, i); err_count++; } } if (err_count == 0) ret = SUCCESS; lpfc_release_scsi_buf(phba, lpfc_cmd); /* * All outstanding txcmplq I/Os should have been aborted by * the targets. Unfortunately, some targets do not abide by * this forcing the driver to double check. */ cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 0, 0, LPFC_CTX_HOST); if (cnt) lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 0, 0, 0, LPFC_CTX_HOST); loopcnt = 0; while(cnt) { spin_unlock_irq(phba->host->host_lock); schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); spin_lock_irq(phba->host->host_lock); if (++loopcnt > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) break; cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 0, 0, LPFC_CTX_HOST); } if (cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", phba->brd_no, cnt, i); ret = FAILED; } lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0714 SCSI layer issued Bus Reset Data: x%x\n", phba->brd_no, ret); out: spin_unlock_irq(shost->host_lock); return ret; } static int lpfc_slave_alloc(struct scsi_device *sdev) { struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata; struct lpfc_scsi_buf *scsi_buf = NULL; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); uint32_t total = 0, i; uint32_t num_to_alloc = 0; unsigned long flags; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; sdev->hostdata = rport->dd_data; /* * Populate the cmds_per_lun count scsi_bufs into this host's globally * available list of scsi buffers. Don't allocate more than the * HBA limit conveyed to the midlayer via the host structure. The * formula accounts for the lun_queue_depth + error handlers + 1 * extra. This list of scsi bufs exists for the lifetime of the driver. */ total = phba->total_scsi_bufs; num_to_alloc = phba->cfg_lun_queue_depth + 2; if (total >= phba->cfg_hba_queue_depth) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0704 At limitation of %d preallocated " "command buffers\n", phba->brd_no, total); return 0; } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0705 Allocation request of %d command " "buffers will exceed max of %d. Reducing " "allocation request to %d.\n", phba->brd_no, num_to_alloc, phba->cfg_hba_queue_depth, (phba->cfg_hba_queue_depth - total)); num_to_alloc = phba->cfg_hba_queue_depth - total; } for (i = 0; i < num_to_alloc; i++) { scsi_buf = lpfc_new_scsi_buf(phba); if (!scsi_buf) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0706 Failed to allocate command " "buffer\n", phba->brd_no); break; } spin_lock_irqsave(&phba->scsi_buf_list_lock, flags); phba->total_scsi_bufs++; list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags); } return 0; } static int lpfc_slave_configure(struct scsi_device *sdev) { struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata; struct fc_rport *rport = starget_to_rport(sdev->sdev_target); if (sdev->tagged_supported) scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth); else scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth); /* * Initialize the fc transport attributes for the target * containing this scsi device. Also note that the driver's * target pointer is stored in the starget_data for the * driver's sysfs entry point functions. */ rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5; if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_poll_fcp_ring(phba); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } return 0; } static void lpfc_slave_destroy(struct scsi_device *sdev) { sdev->hostdata = NULL; return; } struct scsi_host_template lpfc_template = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, .eh_abort_handler = lpfc_abort_handler, .eh_device_reset_handler= lpfc_reset_lun_handler, .eh_bus_reset_handler = lpfc_reset_bus_handler, .slave_alloc = lpfc_slave_alloc, .slave_configure = lpfc_slave_configure, .slave_destroy = lpfc_slave_destroy, .this_id = -1, .sg_tablesize = LPFC_SG_SEG_CNT, .cmd_per_lun = LPFC_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = lpfc_host_attrs, .max_sectors = 0xFFFF, };
gpl-2.0
blueskycoco/rtt-2457
bsp/efm32/Libraries/emlib/src/em_dma.c
6
40649
/***************************************************************************//** * @file * @brief Direct memory access (DMA) module peripheral API * @author Energy Micro AS * @version 3.0.0 ******************************************************************************* * @section License * <b>(C) Copyright 2012 Energy Micro AS, http://www.energymicro.com</b> ******************************************************************************* * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * 3. This notice may not be removed or altered from any source distribution. * * DISCLAIMER OF WARRANTY/LIMITATION OF REMEDIES: Energy Micro AS has no * obligation to support this Software. Energy Micro AS is providing the * Software "AS IS", with no express or implied warranties of any kind, * including, but not limited to, any implied warranties of merchantability * or fitness for any particular purpose or warranties against infringement * of any proprietary rights of a third party. * * Energy Micro AS will not be liable for any consequential, incidental, or * special damages, or any other relief, or for any claim by any third party, * arising from your use of this Software. * ******************************************************************************/ #include "em_dma.h" #include "em_cmu.h" #include "em_assert.h" #include "em_bitband.h" /***************************************************************************//** * @addtogroup EM_Library * @{ ******************************************************************************/ /***************************************************************************//** * @addtogroup DMA * @brief Direct Memory Access (DMA) Peripheral API * @details * These DMA access functions provide basic support for the following * types of DMA cycles: * * @li @b Basic, used for transferring data between memory and peripherals. * @li @b Auto-request, used for transferring data between memory locations. * @li @b Ping-pong, used for for continuous transfer of data between memory * and peripherals, automatically toggling between primary and alternate * descriptors. * @li @b Memory @b scatter-gather, used for transferring a number of buffers * between memory locations. * @li @b Peripheral @b scatter-gather, used for transferring a number of * buffers between memory and peripherals. * * A basic understanding of the DMA controller is assumed. Please refer to * the EFM32 reference manual for further details. * * The term 'descriptor' is used as a synonym to the 'channel control data * structure' term. * * In order to use the DMA controller, the initialization function must have * been executed once (normally during system init): * @verbatim * DMA_Init(); * @endverbatim * * Then, normally a user of a DMA channel configures the channel: * @verbatim * DMA_CfgChannel(); * @endverbatim * * The channel configuration only has to be done once, if reusing the channel * for the same purpose later. * * In order to set up a DMA cycle, the primary and/or alternate descriptor * has to be set up as indicated below. * * For basic or auto-request cycles, use once on either primary or alternate * descriptor: * @verbatim * DMA_CfgDescr(); * @endverbatim * * For ping-pong cycles, configure both primary or alternate descriptors: * @verbatim * DMA_CfgDescr(); // Primary descriptor config * DMA_CfgDescr(); // Alternate descriptor config * @endverbatim * * For scatter-gather cycles, the alternate descriptor array must be programmed: * @verbatim * // 'n' is the number of scattered buffers * // 'descr' points to the start of the alternate descriptor array * * // Fill in 'cfg' * DMA_CfgDescrScatterGather(descr, 0, cfg); * // Fill in 'cfg' * DMA_CfgDescrScatterGather(descr, 1, cfg); * : * // Fill in 'cfg' * DMA_CfgDescrScatterGather(descr, n - 1, cfg); * @endverbatim * * In many cases, the descriptor configuration only has to be done once, if * re-using the channel for the same type of DMA cycles later. * * In order to activate the DMA cycle, use the respective DMA_Activate...() * function. * * For ping-pong DMA cycles, use DMA_RefreshPingPong() from the callback to * prepare the completed descriptor for reuse. Notice that the refresh must * be done prior to the other active descriptor completes, otherwise the * ping-pong DMA cycle will halt. * @{ ******************************************************************************/ /******************************************************************************* ************************** LOCAL FUNCTIONS ******************************** ******************************************************************************/ /** @cond DO_NOT_INCLUDE_WITH_DOXYGEN */ /***************************************************************************//** * @brief * Prepare descriptor for DMA cycle. * * @details * This function prepares the last pieces of configuration required to start a * DMA cycle. Since the DMA controller itself modifies some parts of the * descriptor during use, those parts need to be refreshed if reusing a * descriptor configuration. * * @note * If using this function on a descriptor already activated and in use by the * DMA controller, the behaviour is undefined. * * @param[in] channel * DMA channel to prepare for DMA cycle. * * @param[in] cycleCtrl * DMA cycle type to prepare for. * * @param[in] primary * @li true - prepare primary descriptor * @li false - prepare alternate descriptor * * @param[in] useBurst * The burst feature is only used on peripherals supporting DMA bursts. * Bursts must not be used if the total length (as given by nMinus1) is * less than the arbitration rate configured for the descriptor. Please * refer to the reference manual for further details on burst usage. * * @param[in] dst * Address to start location to transfer data to. If NULL, leave setting in * descriptor as is. * * @param[in] src * Address to start location to transfer data from. If NULL, leave setting in * descriptor as is. * * @param[in] nMinus1 * Number of elements (minus 1) to transfer (<= 1023). ******************************************************************************/ static void DMA_Prepare(unsigned int channel, DMA_CycleCtrl_TypeDef cycleCtrl, bool primary, bool useBurst, void *dst, void *src, unsigned int nMinus1) { DMA_DESCRIPTOR_TypeDef *descr; DMA_DESCRIPTOR_TypeDef *primDescr; DMA_CB_TypeDef *cb; uint32_t inc; uint32_t chBit; uint32_t tmp; primDescr = ((DMA_DESCRIPTOR_TypeDef *)(DMA->CTRLBASE)) + channel; /* Find descriptor to configure */ if (primary) { descr = primDescr; } else { descr = ((DMA_DESCRIPTOR_TypeDef *)(DMA->ALTCTRLBASE)) + channel; } /* If callback defined, update info on whether callback is issued */ /* for primary or alternate descriptor. Mainly needed for ping-pong */ /* cycles. */ cb = (DMA_CB_TypeDef *)(primDescr->USER); if (cb) { cb->primary = (uint8_t)primary; } if (src) { inc = (descr->CTRL & _DMA_CTRL_SRC_INC_MASK) >> _DMA_CTRL_SRC_INC_SHIFT; if (inc == _DMA_CTRL_SRC_INC_NONE) { descr->SRCEND = src; } else { descr->SRCEND = (void *)((uint32_t)src + (nMinus1 << inc)); } } if (dst) { inc = (descr->CTRL & _DMA_CTRL_DST_INC_MASK) >> _DMA_CTRL_DST_INC_SHIFT; if (inc == _DMA_CTRL_DST_INC_NONE) { descr->DSTEND = dst; } else { descr->DSTEND = (void *)((uint32_t)dst + (nMinus1 << inc)); } } chBit = 1 << channel; if (useBurst) { DMA->CHUSEBURSTS = chBit; } else { DMA->CHUSEBURSTC = chBit; } if (primary) { DMA->CHALTC = chBit; } else { DMA->CHALTS = chBit; } /* Set cycle control */ tmp = descr->CTRL & ~(_DMA_CTRL_CYCLE_CTRL_MASK | _DMA_CTRL_N_MINUS_1_MASK); tmp |= nMinus1 << _DMA_CTRL_N_MINUS_1_SHIFT; tmp |= (uint32_t)cycleCtrl << _DMA_CTRL_CYCLE_CTRL_SHIFT; descr->CTRL = tmp; } /** @endcond */ /******************************************************************************* ************************ INTERRUPT FUNCTIONS ****************************** ******************************************************************************/ /***************************************************************************//** * @brief * Interrupt handler for DMA cycle completion handling. * * @details * Clears any pending flags and calls registered callback (if any). * * If using the default interrupt vector table setup provided, this function * is automatically placed in the IRQ table due to weak linking. If taking * control over the interrupt vector table in some other way, this interrupt * handler must be installed in order to be able to support callback actions. ******************************************************************************/ void DMA_IRQHandler(void) { DMA_DESCRIPTOR_TypeDef *descr = (DMA_DESCRIPTOR_TypeDef *)(DMA->CTRLBASE); int channel; DMA_CB_TypeDef *cb; uint32_t pending; uint32_t pendingPrio; uint32_t prio; uint32_t primaryCpy; int i; /* Get all pending interrupts */ pending = DMA->IF; /* Check for bus error */ if (pending & DMA_IF_ERR) { /* Loop here to enable the debugger to see what has happened */ while (1) ; } /* Process all pending channel interrupts. First process channels */ /* defined with high priority, then those with default priority. */ prio = DMA->CHPRIS; pendingPrio = pending & prio; for (i = 0; i < 2; i++) { channel = 0; /* Process pending interrupts within high/default priority group */ /* honouring priority within group. */ while (pendingPrio) { if (pendingPrio & 1) { /* Clear pending interrupt prior to invoking callback, in case it */ /* sets up another DMA cycle. */ DMA->IFC = 1 << channel; /* Normally, no point in enabling interrupt without callback, but */ /* check if callback is defined anyway. Callback info is always */ /* located in primary descriptor. */ cb = (DMA_CB_TypeDef *)(descr[channel].USER); if (cb) { /* Toggle next-descriptor indicator always prior to invoking */ /* callback (in case callback reconfigurs something) */ primaryCpy = cb->primary; cb->primary ^= 1; if (cb->cbFunc) { cb->cbFunc(channel, (bool)primaryCpy, cb->userPtr); } } } pendingPrio >>= 1; channel++; } /* On second iteration, process default priority channels */ pendingPrio = pending & ~prio; } } /******************************************************************************* ************************** GLOBAL FUNCTIONS ******************************* ******************************************************************************/ /***************************************************************************//** * @brief * Activate DMA auto-request cycle (used for memory-memory transfers). * * @details * Prior to activating the DMA cycle, the channel and descriptor to be used * must have been properly configured. * * @note * If using this function on a channel already activated and in use by the * DMA controller, the behaviour is undefined. * * @param[in] channel * DMA channel to activate DMA cycle for. * * @param[in] primary * @li true - activate using primary descriptor * @li false - activate using alternate descriptor * * @param[in] dst * Address to start location to transfer data to. If NULL, leave setting in * descriptor as is from a previous activation. * * @param[in] src * Address to start location to transfer data from. If NULL, leave setting in * descriptor as is from a previous activation. * * @param[in] nMinus1 * Number of DMA transfer elements (minus 1) to transfer (<= 1023). The * size of the DMA transfer element (1, 2 or 4 bytes) is configured with * DMA_CfgDescr(). ******************************************************************************/ void DMA_ActivateAuto(unsigned int channel, bool primary, void *dst, void *src, unsigned int nMinus1) { uint32_t chBit; EFM_ASSERT(channel < DMA_CHAN_COUNT); EFM_ASSERT(nMinus1 <= (_DMA_CTRL_N_MINUS_1_MASK >> _DMA_CTRL_N_MINUS_1_SHIFT)); DMA_Prepare(channel, dmaCycleCtrlAuto, primary, false, dst, src, nMinus1); chBit = 1 << channel; DMA->CHENS = chBit; /* Enable channel */ DMA->CHSWREQ = chBit; /* Activate with SW request */ } /***************************************************************************//** * @brief * Activate DMA basic cycle (used for memory-peripheral transfers). * * @details * Prior to activating the DMA cycle, the channel and descriptor to be used * must have been properly configured. * * @note * If using this function on a channel already activated and in use by the * DMA controller, the behaviour is undefined. * * @param[in] channel * DMA channel to activate DMA cycle for. * * @param[in] primary * @li true - activate using primary descriptor * @li false - activate using alternate descriptor * * @param[in] useBurst * The burst feature is only used on peripherals supporting DMA bursts. * Bursts must not be used if the total length (as given by nMinus1) is * less than the arbitration rate configured for the descriptor. Please * refer to the reference manual for further details on burst usage. * * @param[in] dst * Address to start location to transfer data to. If NULL, leave setting in * descriptor as is from a previous activation. * * @param[in] src * Address to start location to transfer data from. If NULL, leave setting in * descriptor as is from a previous activation. * * @param[in] nMinus1 * Number of DMA transfer elements (minus 1) to transfer (<= 1023). The * size of the DMA transfer element (1, 2 or 4 bytes) is configured with * DMA_CfgDescr(). ******************************************************************************/ void DMA_ActivateBasic(unsigned int channel, bool primary, bool useBurst, void *dst, void *src, unsigned int nMinus1) { EFM_ASSERT(channel < DMA_CHAN_COUNT); EFM_ASSERT(nMinus1 <= (_DMA_CTRL_N_MINUS_1_MASK >> _DMA_CTRL_N_MINUS_1_SHIFT)); DMA_Prepare(channel, dmaCycleCtrlBasic, primary, useBurst, dst, src, nMinus1); /* Enable channel, request signal is provided by peripheral device */ DMA->CHENS = 1 << channel; } /***************************************************************************//** * @brief * Activate DMA ping-pong cycle (used for memory-peripheral transfers). * * @details * Prior to activating the DMA cycle, the channel and both descriptors must * have been properly configured. The primary descriptor is always the first * descriptor to be used by the DMA controller. * * @note * If using this function on a channel already activated and in use by the * DMA controller, the behaviour is undefined. * * @param[in] channel * DMA channel to activate DMA cycle for. * * @param[in] useBurst * The burst feature is only used on peripherals supporting DMA bursts. * Bursts must not be used if the total length (as given by nMinus1) is * less than the arbitration rate configured for the descriptors. Please * refer to the reference manual for further details on burst usage. Notice * that this setting is used for both the primary and alternate descriptors. * * @param[in] primDst * Address to start location to transfer data to, for primary descriptor. * If NULL, leave setting in descriptor as is from a previous activation. * * @param[in] primSrc * Address to start location to transfer data from, for primary descriptor. * If NULL, leave setting in descriptor as is from a previous activation. * * @param[in] primNMinus1 * Number of DMA transfer elements (minus 1) to transfer (<= 1023), for * primary descriptor. The size of the DMA transfer element (1, 2 or 4 bytes) * is configured with DMA_CfgDescr(). * * @param[in] altDst * Address to start location to transfer data to, for alternate descriptor. * If NULL, leave setting in descriptor as is from a previous activation. * * @param[in] altSrc * Address to start location to transfer data from, for alternate descriptor. * If NULL, leave setting in descriptor as is from a previous activation. * * @param[in] altNMinus1 * Number of DMA transfer elements (minus 1) to transfer (<= 1023), for * alternate descriptor. The size of the DMA transfer element (1, 2 or 4 bytes) * is configured with DMA_CfgDescr(). ******************************************************************************/ void DMA_ActivatePingPong(unsigned int channel, bool useBurst, void *primDst, void *primSrc, unsigned int primNMinus1, void *altDst, void *altSrc, unsigned int altNMinus1) { EFM_ASSERT(channel < DMA_CHAN_COUNT); EFM_ASSERT(primNMinus1 <= (_DMA_CTRL_N_MINUS_1_MASK >> _DMA_CTRL_N_MINUS_1_SHIFT)); EFM_ASSERT(altNMinus1 <= (_DMA_CTRL_N_MINUS_1_MASK >> _DMA_CTRL_N_MINUS_1_SHIFT)); /* Prepare alternate descriptor first */ DMA_Prepare(channel, dmaCycleCtrlPingPong, false, useBurst, altDst, altSrc, altNMinus1); /* Prepare primary descriptor last in order to start cycle using it */ DMA_Prepare(channel, dmaCycleCtrlPingPong, true, useBurst, primDst, primSrc, primNMinus1); /* Enable channel, request signal is provided by peripheral device */ DMA->CHENS = 1 << channel; } /***************************************************************************//** * @brief * Activate DMA scatter-gather cycle (used for either memory-peripheral * or memory-memory transfers). * * @details * Prior to activating the DMA cycle, the array with alternate descriptors * must have been properly configured. This function can be reused without * reconfiguring the alternate descriptors, as long as @p count is the same. * * @note * If using this function on a channel already activated and in use by the * DMA controller, the behaviour is undefined. * * @param[in] channel * DMA channel to activate DMA cycle for. * * @param[in] useBurst * The burst feature is only used on peripherals supporting DMA bursts * (and thus this parameter is ignored for memory scatter-gather cycles). * This parameter determines if bursts should be enabled during DMA transfers * using the alternate descriptors. Bursts must not be used if the total * length (as given by nMinus1 for the alternate descriptor) is * less than the arbitration rate configured for the descriptor. Please * refer to the reference manual for further details on burst usage. * * @param[in,out] altDescr * Pointer to start of array with prepared alternate descriptors. The last * descriptor will have its cycle control type reprogrammed to basic type. * * @param[in] count * Number of alternate descriptors in @p altDescr array. Maximum number of * alternate descriptors is 256. ******************************************************************************/ void DMA_ActivateScatterGather(unsigned int channel, bool useBurst, DMA_DESCRIPTOR_TypeDef *altDescr, unsigned int count) { DMA_DESCRIPTOR_TypeDef *descr; DMA_CB_TypeDef *cb; uint32_t cycleCtrl; uint32_t chBit; EFM_ASSERT(channel < DMA_CHAN_COUNT); EFM_ASSERT(altDescr); EFM_ASSERT(count && (count <= 256)); /* We have to configure the primary descriptor properly in order to */ /* transfer one complete alternate descriptor from the alternate */ /* descriptor table into the actual alternate descriptor. */ descr = (DMA_DESCRIPTOR_TypeDef *)(DMA->CTRLBASE) + channel; /* Set source end address to point to alternate descriptor array */ descr->SRCEND = (uint32_t *)altDescr + (count * 4) - 1; /* The destination end address in the primary descriptor MUST point */ /* to the corresponding alternate descriptor in scatter-gather mode. */ descr->DSTEND = (uint32_t *)((DMA_DESCRIPTOR_TypeDef *)(DMA->ALTCTRLBASE) + channel + 1) - 1; /* The user field of the descriptor is used for callback configuration, */ /* and already configured when channel is configured. Do not modify it. */ /* Determine from alternate configuration whether this is a memory or */ /* peripheral scatter-gather, by looking at the first alternate descriptor. */ cycleCtrl = altDescr->CTRL & _DMA_CTRL_CYCLE_CTRL_MASK; cycleCtrl &= ~(1 << _DMA_CTRL_CYCLE_CTRL_SHIFT); EFM_ASSERT((cycleCtrl == dmaCycleCtrlMemScatterGather) || (cycleCtrl == dmaCycleCtrlPerScatterGather)); /* Set last alternate descriptor to basic or auto-request cycle type in */ /* order to have dma_done signal asserted when complete. Otherwise interrupt */ /* will not be triggered when done. */ altDescr[count - 1].CTRL &= ~_DMA_CTRL_CYCLE_CTRL_MASK; if (cycleCtrl == dmaCycleCtrlMemScatterGather) { altDescr[count - 1].CTRL |= (uint32_t)dmaCycleCtrlAuto << _DMA_CTRL_CYCLE_CTRL_SHIFT; } else { altDescr[count - 1].CTRL |= (uint32_t)dmaCycleCtrlBasic << _DMA_CTRL_CYCLE_CTRL_SHIFT; } /* If callback defined, update info on whether callback is issued for */ /* primary or alternate descriptor. Not really useful for scatter-gather, */ /* but do for consistency. Always set to alternate, since that is the last */ /* descriptor actually used. */ cb = (DMA_CB_TypeDef *)(descr->USER); if (cb) { cb->primary = false; } /* Configure primary descriptor control word */ descr->CTRL = ((uint32_t)dmaDataInc4 << _DMA_CTRL_DST_INC_SHIFT) | ((uint32_t)dmaDataSize4 << _DMA_CTRL_DST_SIZE_SHIFT) | ((uint32_t)dmaDataInc4 << _DMA_CTRL_SRC_INC_SHIFT) | ((uint32_t)dmaDataSize4 << _DMA_CTRL_SRC_SIZE_SHIFT) | /* Use same protection scheme as for alternate descriptors */ (altDescr->CTRL & _DMA_CTRL_SRC_PROT_CTRL_MASK) | ((uint32_t)dmaArbitrate4 << _DMA_CTRL_R_POWER_SHIFT) | (((count * 4) - 1) << _DMA_CTRL_N_MINUS_1_SHIFT) | (((uint32_t)useBurst & 1) << _DMA_CTRL_NEXT_USEBURST_SHIFT) | cycleCtrl; chBit = 1 << channel; /* Start with primary descriptor */ DMA->CHALTC = chBit; /* Enable channel */ DMA->CHENS = chBit; /* Send request if memory scatter-gather, otherwise request signal is */ /* provided by peripheral. */ if (cycleCtrl == dmaCycleCtrlMemScatterGather) { DMA->CHSWREQ = chBit; } } /***************************************************************************//** * @brief * Configure a DMA channel. * * @details * Configure miscellaneous issues for a DMA channel. This function is typically * used once to setup a channel for a certain type of use. * * @note * If using this function on a channel already in use by the DMA controller, * the behaviour is undefined. * * @param[in] channel * DMA channel to configure. * * @param[in] cfg * Configuration to use. ******************************************************************************/ void DMA_CfgChannel(unsigned int channel, DMA_CfgChannel_TypeDef *cfg) { DMA_DESCRIPTOR_TypeDef *descr; EFM_ASSERT(channel < DMA_CHAN_COUNT); EFM_ASSERT(cfg); /* Always keep callback configuration reference in primary descriptor */ descr = (DMA_DESCRIPTOR_TypeDef *)(DMA->CTRLBASE); descr[channel].USER = (uint32_t)(cfg->cb); /* Set to specified priority for channel */ if (cfg->highPri) { DMA->CHPRIS = 1 << channel; } else { DMA->CHPRIC = 1 << channel; } /* Set DMA signal source select */ DMA->CH[channel].CTRL = cfg->select; /* Enable/disable interrupt as specified */ if (cfg->enableInt) { DMA->IFC = (1 << channel); BITBAND_Peripheral(&(DMA->IEN), channel, 1); } else { BITBAND_Peripheral(&(DMA->IEN), channel, 0); } } /***************************************************************************//** * @brief * Configure DMA descriptor for auto-request, basic or ping-pong DMA cycles. * * @details * This function is used for configuration of a descriptor for the following * DMA cycle types: * * @li auto-request - used for memory/memory transfer * @li basic - used for a peripheral/memory transfer * @li ping-pong - used for a ping-pong based peripheral/memory transfer * style providing time to refresh one descriptor while the other is * in use. * * The DMA cycle is not activated, please see DMA_ActivateAuto(), * DMA_ActivateBasic() or DMA_ActivatePingPong() to activate the DMA cycle. * In many cases, the configuration only has to be done once, and all * subsequent cycles may be activated with the activate function. * * For ping-pong DMA cycles, this function must be used both on the primary * and the alternate descriptor prior to activating the DMA cycle. * * Notice that the DMA channel must also be configured, see DMA_CfgChannel(). * * @note * If using this function on a descriptor already activated and in use by * the DMA controller, the behaviour is undefined. * * @param[in] channel * DMA channel to configure for. * * @param[in] primary * @li true - configure primary descriptor * @li false - configure alternate descriptor * * @param[in] cfg * Configuration to use. ******************************************************************************/ void DMA_CfgDescr(unsigned int channel, bool primary, DMA_CfgDescr_TypeDef *cfg) { DMA_DESCRIPTOR_TypeDef *descr; EFM_ASSERT(channel < DMA_CHAN_COUNT); EFM_ASSERT(cfg); /* Find descriptor to configure */ if (primary) { descr = (DMA_DESCRIPTOR_TypeDef *)DMA->CTRLBASE; } else { descr = (DMA_DESCRIPTOR_TypeDef *)DMA->ALTCTRLBASE; } descr += channel; /* Prepare the descriptor */ /* Source/destination end addresses set when started */ descr->CTRL = (cfg->dstInc << _DMA_CTRL_DST_INC_SHIFT) | (cfg->size << _DMA_CTRL_DST_SIZE_SHIFT) | (cfg->srcInc << _DMA_CTRL_SRC_INC_SHIFT) | (cfg->size << _DMA_CTRL_SRC_SIZE_SHIFT) | ((uint32_t)(cfg->hprot) << _DMA_CTRL_SRC_PROT_CTRL_SHIFT) | (cfg->arbRate << _DMA_CTRL_R_POWER_SHIFT) | (0 << _DMA_CTRL_N_MINUS_1_SHIFT) | /* Set when activated */ (0 << _DMA_CTRL_NEXT_USEBURST_SHIFT) | /* Set when activated */ DMA_CTRL_CYCLE_CTRL_INVALID; /* Set when activated */ } #if defined(_EFM32_GIANT_FAMILY) /***************************************************************************//** * @brief Configure DMA channel for Loop mode or 2D transfer. * * @details * For 2D transfer, set cfg->enable to "false", and only configure nMinus1 * to same width as channel descriptor. * * @param[in] channel * DMA channel to configure for. * * @param[in] cfg * Configuration to use. ******************************************************************************/ void DMA_CfgLoop(unsigned int channel, DMA_CfgLoop_TypeDef *cfg) { EFM_ASSERT(channel <= 1); EFM_ASSERT(cfg->nMinus1 <= 1023); /* Configure LOOP setting */ switch( channel ) { case 0: DMA->LOOP0 = (cfg->enable << _DMA_LOOP0_EN_SHIFT| cfg->nMinus1 << _DMA_LOOP0_WIDTH_SHIFT); break; case 1: DMA->LOOP1 = (cfg->enable << _DMA_LOOP1_EN_SHIFT| cfg->nMinus1 << _DMA_LOOP1_WIDTH_SHIFT); break; } } /***************************************************************************//** * @brief Configure DMA channel 2D transfer properties. * * @param[in] channel * DMA channel to configure for. * * @param[in] cfg * Configuration to use. ******************************************************************************/ void DMA_CfgRect(unsigned int channel, DMA_CfgRect_TypeDef *cfg) { EFM_ASSERT(channel == 0); EFM_ASSERT(cfg->dstStride <= 2047); EFM_ASSERT(cfg->srcStride <= 2047); EFM_ASSERT(cfg->height <= 1023); /* Configure rectangular/2D copy */ DMA->RECT0 = (cfg->dstStride << _DMA_RECT0_DSTSTRIDE_SHIFT| cfg->srcStride << _DMA_RECT0_SRCSTRIDE_SHIFT| cfg->height << _DMA_RECT0_HEIGHT_SHIFT); } #endif /***************************************************************************//** * @brief * Configure an alternate DMA descriptor for use with scatter-gather DMA * cycles. * * @details * In scatter-gather mode, the alternate descriptors are located in one * contiguous memory area. Each of the alternate descriptor must be fully * configured prior to starting the scatter-gather DMA cycle. * * The DMA cycle is not activated by this function, please see * DMA_ActivateScatterGather() to activate the DMA cycle. In some cases, the * alternate configuration only has to be done once, and all subsequent * transfers may be activated with the activate function. * * Notice that the DMA channel must also be configured, see DMA_CfgChannel(). * * @param[in] descr * Points to start of memory area holding the alternate descriptors. * * @param[in] indx * Alternate descriptor index number to configure (numbered from 0). * * @param[in] cfg * Configuration to use. ******************************************************************************/ void DMA_CfgDescrScatterGather(DMA_DESCRIPTOR_TypeDef *descr, unsigned int indx, DMA_CfgDescrSGAlt_TypeDef *cfg) { uint32_t cycleCtrl; EFM_ASSERT(descr); EFM_ASSERT(cfg); /* Point to selected entry in alternate descriptor table */ descr += indx; if (cfg->srcInc == dmaDataIncNone) { descr->SRCEND = cfg->src; } else { descr->SRCEND = (void *)((uint32_t)(cfg->src) + ((uint32_t)(cfg->nMinus1) << cfg->srcInc)); } if (cfg->dstInc == dmaDataIncNone) { descr->DSTEND = cfg->dst; } else { descr->DSTEND = (void *)((uint32_t)(cfg->dst) + ((uint32_t)(cfg->nMinus1) << cfg->dstInc)); } /* User definable part not used */ descr->USER = 0; if (cfg->peripheral) { cycleCtrl = (uint32_t)dmaCycleCtrlPerScatterGather + 1; } else { cycleCtrl = (uint32_t)dmaCycleCtrlMemScatterGather + 1; } descr->CTRL = (cfg->dstInc << _DMA_CTRL_DST_INC_SHIFT) | (cfg->size << _DMA_CTRL_DST_SIZE_SHIFT) | (cfg->srcInc << _DMA_CTRL_SRC_INC_SHIFT) | (cfg->size << _DMA_CTRL_SRC_SIZE_SHIFT) | ((uint32_t)(cfg->hprot) << _DMA_CTRL_SRC_PROT_CTRL_SHIFT) | (cfg->arbRate << _DMA_CTRL_R_POWER_SHIFT) | ((uint32_t)(cfg->nMinus1) << _DMA_CTRL_N_MINUS_1_SHIFT) | /* Never set next useburst bit, since the descriptor used after the */ /* alternate descriptor is the primary descriptor which operates on */ /* memory. If the alternate descriptors need to have useBurst set, this */ /* done when setting up the primary descriptor, ie when activating. */ (0 << _DMA_CTRL_NEXT_USEBURST_SHIFT) | (cycleCtrl << _DMA_CTRL_CYCLE_CTRL_SHIFT); } /***************************************************************************//** * @brief * Check if DMA channel is enabled. * * @details * The DMA channel is disabled when the DMA controller has finished a DMA * cycle. * * @param[in] channel * DMA channel to check. * * @return * true if channel is enabled, false if not. ******************************************************************************/ bool DMA_ChannelEnabled(unsigned int channel) { EFM_ASSERT(channel < DMA_CHAN_COUNT); return (bool)((DMA->CHENS >> channel) & 1); } /***************************************************************************//** * @brief * Initializes DMA controller. * * @details * This function will reset and prepare the DMA controller for use. Although * it may be used several times, it is normally only used during system * init. If reused during normal operation, notice that any ongoing DMA * transfers will be aborted. When completed, the DMA controller is in * an enabled state. * * @note * Must be invoked before using the DMA controller. * * @param[in] init * Pointer to a structure containing DMA init information. ******************************************************************************/ void DMA_Init(DMA_Init_TypeDef *init) { EFM_ASSERT(init); /* Make sure control block is properly aligned */ EFM_ASSERT(!((uint32_t)(init->controlBlock) & (256 - 1))); /* Make sure DMA clock is enabled prior to accessing DMA module */ CMU_ClockEnable(cmuClock_DMA, true); /* Make sure DMA controller is set to a known reset state */ DMA_Reset(); /* Clear/enable DMA interrupts */ NVIC_ClearPendingIRQ(DMA_IRQn); NVIC_EnableIRQ(DMA_IRQn); /* Enable bus error interrupt */ DMA->IEN = DMA_IEN_ERR; /* Set pointer to control block, notice that this ptr must have been */ /* properly aligned, according to requirements defined in the reference */ /* manual. */ DMA->CTRLBASE = (uint32_t)(init->controlBlock); /* Configure and enable the DMA controller */ DMA->CONFIG = ((uint32_t)(init->hprot) << _DMA_CONFIG_CHPROT_SHIFT) | DMA_CONFIG_EN; } /***************************************************************************//** * @brief * Refresh a descriptor used in a DMA ping-pong cycle. * * @details * During a ping-pong DMA cycle, the DMA controller automatically alternates * between primary and alternate descriptors, when completing use of a * descriptor. While the other descriptor is in use by the DMA controller, * the SW should refresh the completed descriptor. This is typically done from * the callback defined for the ping-pong cycle. * * @param[in] channel * DMA channel to refresh ping-pong descriptor for. * * @param[in] primary * @li true - refresh primary descriptor * @li false - refresh alternate descriptor * * @param[in] useBurst * The burst feature is only used on peripherals supporting DMA bursts. * Bursts must not be used if the total length (as given by nMinus1) is * less than the arbitration rate configured for the descriptor. Please * refer to the reference manual for further details on burst usage. * * @param[in] dst * Address to start location to transfer data to. If NULL, leave setting in * descriptor as is. * * @param[in] src * Address to start location to transfer data from. If NULL, leave setting in * descriptor as is. * * @param[in] nMinus1 * Number of DMA transfer elements (minus 1) to transfer (<= 1023). The * size of the DMA transfer element (1, 2 or 4 bytes) is configured with * DMA_CfgDescr(). * * @param[in] stop * Indicate that the DMA ping-pong cycle shall stop @b after completing use * of this descriptor. ******************************************************************************/ void DMA_RefreshPingPong(unsigned int channel, bool primary, bool useBurst, void *dst, void *src, unsigned int nMinus1, bool stop) { DMA_CycleCtrl_TypeDef cycleCtrl; DMA_DESCRIPTOR_TypeDef *descr; uint32_t inc; uint32_t chBit; uint32_t tmp; EFM_ASSERT(channel < DMA_CHAN_COUNT); EFM_ASSERT(nMinus1 <= (_DMA_CTRL_N_MINUS_1_MASK >> _DMA_CTRL_N_MINUS_1_SHIFT)); /* The ping-pong DMA cycle may be stopped by issuing a basic cycle type */ if (stop) { cycleCtrl = dmaCycleCtrlBasic; } else { cycleCtrl = dmaCycleCtrlPingPong; } /* Find descriptor to configure */ if (primary) { descr = ((DMA_DESCRIPTOR_TypeDef *)(DMA->CTRLBASE)) + channel; } else { descr = ((DMA_DESCRIPTOR_TypeDef *)(DMA->ALTCTRLBASE)) + channel; } if (src) { inc = (descr->CTRL & _DMA_CTRL_SRC_INC_MASK) >> _DMA_CTRL_SRC_INC_SHIFT; if (inc == _DMA_CTRL_SRC_INC_NONE) { descr->SRCEND = src; } else { descr->SRCEND = (void *)((uint32_t)src + (nMinus1 << inc)); } } if (dst) { inc = (descr->CTRL & _DMA_CTRL_DST_INC_MASK) >> _DMA_CTRL_DST_INC_SHIFT; if (inc == _DMA_CTRL_DST_INC_NONE) { descr->DSTEND = dst; } else { descr->DSTEND = (void *)((uint32_t)dst + (nMinus1 << inc)); } } chBit = 1 << channel; if (useBurst) { DMA->CHUSEBURSTS = chBit; } else { DMA->CHUSEBURSTC = chBit; } /* Set cycle control */ tmp = descr->CTRL & ~(_DMA_CTRL_CYCLE_CTRL_MASK | _DMA_CTRL_N_MINUS_1_MASK); tmp |= nMinus1 << _DMA_CTRL_N_MINUS_1_SHIFT; tmp |= cycleCtrl << _DMA_CTRL_CYCLE_CTRL_SHIFT; descr->CTRL = tmp; } /***************************************************************************//** * @brief * Reset the DMA controller. * * @details * This functions will disable the DMA controller and set it to a reset * state. * * @note * Notice that any ongoing transfers will be aborted. ******************************************************************************/ void DMA_Reset(void) { int i; /* Disable DMA interrupts */ NVIC_DisableIRQ(DMA_IRQn); /* Put the DMA controller into a known state, first disabling it. */ DMA->CONFIG = _DMA_CONFIG_RESETVALUE; DMA->CHUSEBURSTC = _DMA_CHUSEBURSTC_MASK; DMA->CHREQMASKC = _DMA_CHREQMASKC_MASK; DMA->CHENC = _DMA_CHENC_MASK; DMA->CHALTC = _DMA_CHALTC_MASK; DMA->CHPRIC = _DMA_CHPRIC_MASK; DMA->ERRORC = DMA_ERRORC_ERRORC; DMA->IEN = _DMA_IEN_RESETVALUE; DMA->IFC = _DMA_IFC_MASK; /* Clear channel control flags */ for (i = 0; i < DMA_CHAN_COUNT; i++) { DMA->CH[i].CTRL = _DMA_CH_CTRL_RESETVALUE; } } /** @} (end addtogroup DMA) */ /** @} (end addtogroup EM_Library) */
gpl-2.0
vasi/b2gui
src/Windows/router/dynsockets.cpp
6
8356
/* * dynsockets.cpp - ip router * * Basilisk II (C) 1997-2008 Christian Bauer * * Windows platform specific code copyright (C) Lauri Pesonen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "sysdeps.h" #include "dynsockets.h" #include "dump.h" #include "main.h" #if DEBUG #pragma optimize("",off) #endif #include "debug.h" /* Cannot link statically to winsock. We need ws2, but there are Win95 b2 users who can't (or won't) upgrade. */ static const char *wslib = "WS2_32.DLL"; static HMODULE hWinsock32 = 0; static WSADATA WSAData; int (WSAAPI *_WSAStartup) (WORD, LPWSADATA) = 0; int (WSAAPI *_WSACleanup) (void) = 0; int (WSAAPI *_gethostname) (char *, int) = 0; char * (WSAAPI *_inet_ntoa) (struct in_addr) = 0; struct hostent * (WSAAPI *_gethostbyname) (const char *) = 0; int (WSAAPI *_send) (SOCKET, const char *, int, int) = 0; int (WSAAPI *_sendto) (SOCKET, const char *, int, int, const struct sockaddr *, int) = 0; int (WSAAPI *_recv) (SOCKET, char *, int, int) = 0; int (WSAAPI *_recvfrom) (SOCKET, char *, int, int, struct sockaddr *, int *) = 0; int (WSAAPI *_listen) (SOCKET, int) = 0; SOCKET (WSAAPI *_accept) (SOCKET, struct sockaddr *, int *) = 0; SOCKET (WSAAPI *_socket) (int, int, int) = 0; int (WSAAPI *_bind) (SOCKET, const struct sockaddr *, int) = 0; int (WSAAPI *_WSAAsyncSelect) (SOCKET, HWND, u_int, long) = 0; int (WSAAPI *_closesocket) (SOCKET) = 0; int (WSAAPI *_getsockname) (SOCKET, struct sockaddr *, int *) = 0; int (WSAAPI *_WSARecvFrom) (SOCKET, LPWSABUF, DWORD, LPDWORD, LPDWORD, struct sockaddr *, LPINT, LPWSAOVERLAPPED, LPWSAOVERLAPPED_COMPLETION_ROUTINE) = 0; int (WSAAPI *_WSAGetLastError) (void) = 0; int (WSAAPI *_WSAConnect) (SOCKET, const struct sockaddr *, int, LPWSABUF, LPWSABUF, LPQOS, LPQOS) = 0; int (WSAAPI *_setsockopt) (SOCKET, int, int, const char *, int) = 0; int (WSAAPI *_WSAEventSelect) (SOCKET, WSAEVENT, long) = 0; WSAEVENT (WSAAPI *_WSACreateEvent) (void) = 0; BOOL (WSAAPI *_WSACloseEvent) (WSAEVENT) = 0; BOOL (WSAAPI *_WSAResetEvent) (WSAEVENT) = 0; int (WSAAPI *_WSAEnumNetworkEvents) (SOCKET, WSAEVENT, LPWSANETWORKEVENTS) = 0; int (WSAAPI *_shutdown) (SOCKET, int) = 0; int (WSAAPI *_WSASend) (SOCKET, LPWSABUF, DWORD, LPDWORD, DWORD, LPWSAOVERLAPPED, LPWSAOVERLAPPED_COMPLETION_ROUTINE) = 0; int (WSAAPI *_WSARecv) (SOCKET, LPWSABUF, DWORD, LPDWORD, LPDWORD, LPWSAOVERLAPPED, LPWSAOVERLAPPED_COMPLETION_ROUTINE) = 0; unsigned long (WSAAPI *_inet_addr) (const char *) = 0; static bool load_sockets() { bool result = false; hWinsock32 = LoadLibrary( wslib ); if(!hWinsock32) { ErrorAlert("Could not load Winsock libraries; router module is not available. Please install Windows sockets 2."); } else { _WSAStartup = (int (WSAAPI *)(WORD, LPWSADATA))GetProcAddress( hWinsock32, "WSAStartup" ); _WSACleanup = (int (WSAAPI *)(void))GetProcAddress( hWinsock32, "WSACleanup" ); _gethostname = (int (WSAAPI *)(char *, int))GetProcAddress( hWinsock32, "gethostname" ); _inet_ntoa = (char * (WSAAPI *)(struct in_addr))GetProcAddress( hWinsock32, "inet_ntoa" ); _gethostbyname = (struct hostent * (WSAAPI *)(const char *))GetProcAddress( hWinsock32, "gethostbyname" ); _send = (int (WSAAPI *)(SOCKET, const char *, int, int))GetProcAddress( hWinsock32, "send" ); _sendto = (int (WSAAPI *)(SOCKET, const char *, int, int, const struct sockaddr *, int))GetProcAddress( hWinsock32, "sendto" ); _recv = (int (WSAAPI *)(SOCKET, char *, int, int))GetProcAddress( hWinsock32, "recv" ); _recvfrom = (int (WSAAPI *)(SOCKET, char *, int, int, struct sockaddr *, int *))GetProcAddress( hWinsock32, "recvfrom" ); _listen = (int (WSAAPI *)(SOCKET, int))GetProcAddress( hWinsock32, "listen" ); _accept = (SOCKET (WSAAPI *)(SOCKET, struct sockaddr *, int *))GetProcAddress( hWinsock32, "accept" ); _socket = (SOCKET (WSAAPI *)(int, int, int))GetProcAddress( hWinsock32, "socket" ); _bind = (int (WSAAPI *)(SOCKET, const struct sockaddr *, int))GetProcAddress( hWinsock32, "bind" ); _WSAAsyncSelect = (int (WSAAPI *)(SOCKET, HWND, u_int, long))GetProcAddress( hWinsock32, "WSAAsyncSelect" ); _closesocket = (int (WSAAPI *)(SOCKET))GetProcAddress( hWinsock32, "closesocket" ); _getsockname = (int (WSAAPI *)(SOCKET, struct sockaddr *, int *))GetProcAddress( hWinsock32, "getsockname" ); _WSARecvFrom = (int (WSAAPI *)(SOCKET, LPWSABUF, DWORD, LPDWORD, LPDWORD, struct sockaddr *, LPINT, LPWSAOVERLAPPED, LPWSAOVERLAPPED_COMPLETION_ROUTINE))GetProcAddress( hWinsock32, "WSARecvFrom" ); _WSAGetLastError = (int (WSAAPI *)(void))GetProcAddress( hWinsock32, "WSAGetLastError" ); _WSAConnect = (int (WSAAPI *)(SOCKET, const struct sockaddr *, int, LPWSABUF, LPWSABUF, LPQOS, LPQOS))GetProcAddress( hWinsock32, "WSAConnect" ); _setsockopt = (int (WSAAPI *)(SOCKET, int, int, const char *, int))GetProcAddress( hWinsock32, "setsockopt" ); _WSAEventSelect = (int (WSAAPI *)(SOCKET, WSAEVENT, long))GetProcAddress( hWinsock32, "WSAEventSelect" ); _WSACreateEvent = (WSAEVENT (WSAAPI *)(void))GetProcAddress( hWinsock32, "WSACreateEvent" ); _WSACloseEvent = (BOOL (WSAAPI *)(WSAEVENT))GetProcAddress( hWinsock32, "WSACloseEvent" ); _WSAResetEvent = (BOOL (WSAAPI *)(WSAEVENT))GetProcAddress( hWinsock32, "WSAResetEvent" ); _WSAEnumNetworkEvents = (BOOL (WSAAPI *)(SOCKET, WSAEVENT, LPWSANETWORKEVENTS))GetProcAddress( hWinsock32, "WSAEnumNetworkEvents" ); _shutdown = (int (WSAAPI *)(SOCKET, int))GetProcAddress( hWinsock32, "shutdown" ); _WSASend = (int (WSAAPI *)(SOCKET, LPWSABUF, DWORD, LPDWORD, DWORD, LPWSAOVERLAPPED, LPWSAOVERLAPPED_COMPLETION_ROUTINE))GetProcAddress( hWinsock32, "WSASend" ); _WSARecv = (int (WSAAPI *)(SOCKET, LPWSABUF, DWORD, LPDWORD, LPDWORD, LPWSAOVERLAPPED, LPWSAOVERLAPPED_COMPLETION_ROUTINE))GetProcAddress( hWinsock32, "WSARecv" ); _inet_addr = (unsigned long (WSAAPI *)(const char *))GetProcAddress( hWinsock32, "inet_addr" ); if( _WSAStartup && _WSACleanup && _gethostname && _inet_ntoa && _gethostbyname && _send && _sendto && _recv && _recvfrom && _listen && _accept && _socket && _bind && _WSAAsyncSelect && _closesocket && _getsockname && _WSARecvFrom && _WSAGetLastError && _WSAConnect && _setsockopt && _WSAEventSelect && _WSACreateEvent && _WSACloseEvent && _WSAResetEvent && _WSAEnumNetworkEvents && _shutdown && _WSASend && _WSARecv && _inet_addr ) { result = true; } else { ErrorAlert("Could not find required entry points; router module is not available. Please install Windows sockets 2."); } } return result; } bool dynsockets_init(void) { bool result = false; if(load_sockets()) { if( (_WSAStartup(MAKEWORD(2,0), &WSAData)) != 0 || LOBYTE( WSAData.wVersion ) != 2 || HIBYTE( WSAData.wVersion ) != 0 ) { ErrorAlert("Could not start Windows sockets version 2."); } else { result = true; } } return result; } void dynsockets_final(void) { if(hWinsock32) { _WSACleanup(); FreeLibrary( hWinsock32 ); hWinsock32 = 0; } _WSAStartup = 0; _WSACleanup = 0; _gethostname = 0; _inet_ntoa = 0; _gethostbyname = 0; _send = 0; _sendto = 0; _recv = 0; _recvfrom = 0; _listen = 0; _accept = 0; _socket = 0; _bind = 0; _WSAAsyncSelect = 0; _closesocket = 0; _getsockname = 0; _WSARecvFrom = 0; _WSAGetLastError = 0; _WSAConnect = 0; _setsockopt = 0; _WSAEventSelect = 0; _WSACreateEvent = 0; _WSACloseEvent = 0; _WSAResetEvent = 0; _WSAEnumNetworkEvents = 0; _shutdown = 0; _WSASend = 0; _WSARecv = 0; _inet_addr = 0; }
gpl-2.0
OpenInkpot-archive/iplinux-gdb
gdb/testsuite/gdb.threads/execl.c
6
1545
/* This testcase is part of GDB, the GNU debugger. Copyright 2008, 2009 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* Test handling thread control across an execl. */ /* The original image loads a thread library and has several threads, while the new image does not load a thread library. */ #include <unistd.h> #include <pthread.h> #include <stdio.h> #include <string.h> #include <stdlib.h> void * thread_function (void *arg) { while (1) sleep (100); return NULL; } int main (int argc, char* argv[]) { pthread_t thread1; pthread_t thread2; char *new_image; pthread_create (&thread1, NULL, thread_function, NULL); pthread_create (&thread2, NULL, thread_function, NULL); new_image = malloc (strlen (argv[0]) + 2); strcpy (new_image, argv[0]); strcat (new_image, "1"); if (execl (new_image, new_image, NULL) == -1) /* set breakpoint here */ return 1; return 0; }
gpl-2.0
Gerhood/TrinityCore
src/server/scripts/Northrend/VaultOfArchavon/boss_toravon.cpp
6
8597
/* * Copyright (C) 2008-2017 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptMgr.h" #include "InstanceScript.h" #include "ObjectAccessor.h" #include "ScriptedCreature.h" #include "vault_of_archavon.h" enum Spells { // Toravon SPELL_FREEZING_GROUND = 72090, // don't know cd... using 20 secs. SPELL_FROZEN_ORB = 72091, SPELL_WHITEOUT = 72034, // Every 38 sec. cast. (after SPELL_FROZEN_ORB) SPELL_FROZEN_MALLET = 71993, // Frost Warder SPELL_FROST_BLAST = 72123, // don't know cd... using 20 secs. SPELL_FROZEN_MALLET_2 = 72122, // Frozen Orb SPELL_FROZEN_ORB_DMG = 72081, // priodic dmg aura SPELL_FROZEN_ORB_AURA = 72067, // make visible // Frozen Orb Stalker SPELL_FROZEN_ORB_SUMMON = 72093, // summon orb }; enum Events { EVENT_FREEZING_GROUND = 1, EVENT_FROZEN_ORB = 2, EVENT_WHITEOUT = 3, EVENT_FROST_BLAST = 4, }; enum Creatures { NPC_FROZEN_ORB = 38456 // 1 in 10 mode and 3 in 25 mode }; class boss_toravon : public CreatureScript { public: boss_toravon() : CreatureScript("boss_toravon") { } struct boss_toravonAI : public BossAI { boss_toravonAI(Creature* creature) : BossAI(creature, DATA_TORAVON) { } void EnterCombat(Unit* /*who*/) override { DoCast(me, SPELL_FROZEN_MALLET); events.ScheduleEvent(EVENT_FROZEN_ORB, 11000); events.ScheduleEvent(EVENT_WHITEOUT, 13000); events.ScheduleEvent(EVENT_FREEZING_GROUND, 15000); _EnterCombat(); } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_FROZEN_ORB: me->CastCustomSpell(SPELL_FROZEN_ORB, SPELLVALUE_MAX_TARGETS, 1, me); events.ScheduleEvent(EVENT_FROZEN_ORB, 38000); break; case EVENT_WHITEOUT: DoCast(me, SPELL_WHITEOUT); events.ScheduleEvent(EVENT_WHITEOUT, 38000); break; case EVENT_FREEZING_GROUND: if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 1)) DoCast(target, SPELL_FREEZING_GROUND); events.ScheduleEvent(EVENT_FREEZING_GROUND, 20000); break; default: break; } if (me->HasUnitState(UNIT_STATE_CASTING)) return; } DoMeleeAttackIfReady(); } }; CreatureAI* GetAI(Creature* creature) const override { return GetVaultOfArchavonAI<boss_toravonAI>(creature); } }; /*###### ## Mob Frost Warder ######*/ class npc_frost_warder : public CreatureScript { public: npc_frost_warder() : CreatureScript("npc_frost_warder") { } struct npc_frost_warderAI : public ScriptedAI { npc_frost_warderAI(Creature* creature) : ScriptedAI(creature) { } void Reset() override { events.Reset(); } void EnterCombat(Unit* /*who*/) override { DoZoneInCombat(); DoCast(me, SPELL_FROZEN_MALLET_2); events.ScheduleEvent(EVENT_FROST_BLAST, 5000); } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; if (events.ExecuteEvent() == EVENT_FROST_BLAST) { DoCastVictim(SPELL_FROST_BLAST); events.ScheduleEvent(EVENT_FROST_BLAST, 20000); } DoMeleeAttackIfReady(); } private: EventMap events; }; CreatureAI* GetAI(Creature* creature) const override { return GetVaultOfArchavonAI<npc_frost_warderAI>(creature); } }; /*###### ## Mob Frozen Orb ######*/ class npc_frozen_orb : public CreatureScript { public: npc_frozen_orb() : CreatureScript("npc_frozen_orb") { } struct npc_frozen_orbAI : public ScriptedAI { npc_frozen_orbAI(Creature* creature) : ScriptedAI(creature) { Initialize(); } void Initialize() { done = false; killTimer = 60000; // if after this time there is no victim -> destroy! } void Reset() override { Initialize(); } void EnterCombat(Unit* /*who*/) override { DoZoneInCombat(); } void UpdateAI(uint32 diff) override { if (!done) { DoCast(me, SPELL_FROZEN_ORB_AURA, true); DoCast(me, SPELL_FROZEN_ORB_DMG, true); done = true; } if (killTimer <= diff) { if (!UpdateVictim()) me->DespawnOrUnsummon(); killTimer = 10000; } else killTimer -= diff; } private: uint32 killTimer; bool done; }; CreatureAI* GetAI(Creature* creature) const override { return GetVaultOfArchavonAI<npc_frozen_orbAI>(creature); } }; /*###### ## Mob Frozen Orb Stalker ######*/ class npc_frozen_orb_stalker : public CreatureScript { public: npc_frozen_orb_stalker() : CreatureScript("npc_frozen_orb_stalker") { } struct npc_frozen_orb_stalkerAI : public ScriptedAI { npc_frozen_orb_stalkerAI(Creature* creature) : ScriptedAI(creature) { creature->SetVisible(false); creature->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE | UNIT_FLAG_NON_ATTACKABLE); me->SetControlled(true, UNIT_STATE_ROOT); creature->SetReactState(REACT_PASSIVE); instance = creature->GetInstanceScript(); spawned = false; SetCombatMovement(false); } void UpdateAI(uint32 /*diff*/) override { if (spawned) return; spawned = true; Unit* toravon = ObjectAccessor::GetCreature(*me, instance->GetGuidData(DATA_TORAVON)); if (!toravon) return; uint8 num_orbs = RAID_MODE(1, 3); for (uint8 i = 0; i < num_orbs; ++i) { Position pos; me->GetNearPoint(toravon, pos.m_positionX, pos.m_positionY, pos.m_positionZ, 0.0f, 10.0f, 0.0f); me->UpdatePosition(pos); DoCast(me, SPELL_FROZEN_ORB_SUMMON); } } private: InstanceScript* instance; bool spawned; }; CreatureAI* GetAI(Creature* creature) const override { return GetVaultOfArchavonAI<npc_frozen_orb_stalkerAI>(creature); } }; void AddSC_boss_toravon() { new boss_toravon(); new npc_frost_warder(); new npc_frozen_orb(); new npc_frozen_orb_stalker(); }
gpl-2.0
Perferom/android_kernel_htc_msm7x27
arch/powerpc/platforms/powermac/feature.c
774
82198
/* * Copyright (C) 1996-2001 Paul Mackerras (paulus@cs.anu.edu.au) * Ben. Herrenschmidt (benh@kernel.crashing.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * TODO: * * - Replace mdelay with some schedule loop if possible * - Shorten some obfuscated delays on some routines (like modem * power) * - Refcount some clocks (see darwin) * - Split split split... * */ #include <linux/types.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/ioport.h> #include <linux/pci.h> #include <asm/sections.h> #include <asm/errno.h> #include <asm/ohare.h> #include <asm/heathrow.h> #include <asm/keylargo.h> #include <asm/uninorth.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/dbdma.h> #include <asm/pci-bridge.h> #include <asm/pmac_low_i2c.h> #undef DEBUG_FEATURE #ifdef DEBUG_FEATURE #define DBG(fmt...) printk(KERN_DEBUG fmt) #else #define DBG(fmt...) #endif #ifdef CONFIG_6xx extern int powersave_lowspeed; #endif extern int powersave_nap; extern struct device_node *k2_skiplist[2]; /* * We use a single global lock to protect accesses. Each driver has * to take care of its own locking */ DEFINE_RAW_SPINLOCK(feature_lock); #define LOCK(flags) raw_spin_lock_irqsave(&feature_lock, flags); #define UNLOCK(flags) raw_spin_unlock_irqrestore(&feature_lock, flags); /* * Instance of some macio stuffs */ struct macio_chip macio_chips[MAX_MACIO_CHIPS]; struct macio_chip *macio_find(struct device_node *child, int type) { while(child) { int i; for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++) if (child == macio_chips[i].of_node && (!type || macio_chips[i].type == type)) return &macio_chips[i]; child = child->parent; } return NULL; } EXPORT_SYMBOL_GPL(macio_find); static const char *macio_names[] = { "Unknown", "Grand Central", "OHare", "OHareII", "Heathrow", "Gatwick", "Paddington", "Keylargo", "Pangea", "Intrepid", "K2", "Shasta", }; struct device_node *uninorth_node; u32 __iomem *uninorth_base; static u32 uninorth_rev; static int uninorth_maj; static void __iomem *u3_ht_base; /* * For each motherboard family, we have a table of functions pointers * that handle the various features. */ typedef long (*feature_call)(struct device_node *node, long param, long value); struct feature_table_entry { unsigned int selector; feature_call function; }; struct pmac_mb_def { const char* model_string; const char* model_name; int model_id; struct feature_table_entry* features; unsigned long board_flags; }; static struct pmac_mb_def pmac_mb; /* * Here are the chip specific feature functions */ static inline int simple_feature_tweak(struct device_node *node, int type, int reg, u32 mask, int value) { struct macio_chip* macio; unsigned long flags; macio = macio_find(node, type); if (!macio) return -ENODEV; LOCK(flags); if (value) MACIO_BIS(reg, mask); else MACIO_BIC(reg, mask); (void)MACIO_IN32(reg); UNLOCK(flags); return 0; } #ifndef CONFIG_POWER4 static long ohare_htw_scc_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long chan_mask; unsigned long fcr; unsigned long flags; int htw, trans; unsigned long rmask; macio = macio_find(node, 0); if (!macio) return -ENODEV; if (!strcmp(node->name, "ch-a")) chan_mask = MACIO_FLAG_SCCA_ON; else if (!strcmp(node->name, "ch-b")) chan_mask = MACIO_FLAG_SCCB_ON; else return -ENODEV; htw = (macio->type == macio_heathrow || macio->type == macio_paddington || macio->type == macio_gatwick); /* On these machines, the HRW_SCC_TRANS_EN_N bit mustn't be touched */ trans = (pmac_mb.model_id != PMAC_TYPE_YOSEMITE && pmac_mb.model_id != PMAC_TYPE_YIKES); if (value) { #ifdef CONFIG_ADB_PMU if ((param & 0xfff) == PMAC_SCC_IRDA) pmu_enable_irled(1); #endif /* CONFIG_ADB_PMU */ LOCK(flags); fcr = MACIO_IN32(OHARE_FCR); /* Check if scc cell need enabling */ if (!(fcr & OH_SCC_ENABLE)) { fcr |= OH_SCC_ENABLE; if (htw) { /* Side effect: this will also power up the * modem, but it's too messy to figure out on which * ports this controls the tranceiver and on which * it controls the modem */ if (trans) fcr &= ~HRW_SCC_TRANS_EN_N; MACIO_OUT32(OHARE_FCR, fcr); fcr |= (rmask = HRW_RESET_SCC); MACIO_OUT32(OHARE_FCR, fcr); } else { fcr |= (rmask = OH_SCC_RESET); MACIO_OUT32(OHARE_FCR, fcr); } UNLOCK(flags); (void)MACIO_IN32(OHARE_FCR); mdelay(15); LOCK(flags); fcr &= ~rmask; MACIO_OUT32(OHARE_FCR, fcr); } if (chan_mask & MACIO_FLAG_SCCA_ON) fcr |= OH_SCCA_IO; if (chan_mask & MACIO_FLAG_SCCB_ON) fcr |= OH_SCCB_IO; MACIO_OUT32(OHARE_FCR, fcr); macio->flags |= chan_mask; UNLOCK(flags); if (param & PMAC_SCC_FLAG_XMON) macio->flags |= MACIO_FLAG_SCC_LOCKED; } else { if (macio->flags & MACIO_FLAG_SCC_LOCKED) return -EPERM; LOCK(flags); fcr = MACIO_IN32(OHARE_FCR); if (chan_mask & MACIO_FLAG_SCCA_ON) fcr &= ~OH_SCCA_IO; if (chan_mask & MACIO_FLAG_SCCB_ON) fcr &= ~OH_SCCB_IO; MACIO_OUT32(OHARE_FCR, fcr); if ((fcr & (OH_SCCA_IO | OH_SCCB_IO)) == 0) { fcr &= ~OH_SCC_ENABLE; if (htw && trans) fcr |= HRW_SCC_TRANS_EN_N; MACIO_OUT32(OHARE_FCR, fcr); } macio->flags &= ~(chan_mask); UNLOCK(flags); mdelay(10); #ifdef CONFIG_ADB_PMU if ((param & 0xfff) == PMAC_SCC_IRDA) pmu_enable_irled(0); #endif /* CONFIG_ADB_PMU */ } return 0; } static long ohare_floppy_enable(struct device_node *node, long param, long value) { return simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_FLOPPY_ENABLE, value); } static long ohare_mesh_enable(struct device_node *node, long param, long value) { return simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_MESH_ENABLE, value); } static long ohare_ide_enable(struct device_node *node, long param, long value) { switch(param) { case 0: /* For some reason, setting the bit in set_initial_features() * doesn't stick. I'm still investigating... --BenH. */ if (value) simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_IOBUS_ENABLE, 1); return simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_IDE0_ENABLE, value); case 1: return simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_BAY_IDE_ENABLE, value); default: return -ENODEV; } } static long ohare_ide_reset(struct device_node *node, long param, long value) { switch(param) { case 0: return simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_IDE0_RESET_N, !value); case 1: return simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_IDE1_RESET_N, !value); default: return -ENODEV; } } static long ohare_sleep_state(struct device_node *node, long param, long value) { struct macio_chip* macio = &macio_chips[0]; if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) return -EPERM; if (value == 1) { MACIO_BIC(OHARE_FCR, OH_IOBUS_ENABLE); } else if (value == 0) { MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE); } return 0; } static long heathrow_modem_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; u8 gpio; unsigned long flags; macio = macio_find(node, macio_unknown); if (!macio) return -ENODEV; gpio = MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1; if (!value) { LOCK(flags); MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio); UNLOCK(flags); (void)MACIO_IN8(HRW_GPIO_MODEM_RESET); mdelay(250); } if (pmac_mb.model_id != PMAC_TYPE_YOSEMITE && pmac_mb.model_id != PMAC_TYPE_YIKES) { LOCK(flags); if (value) MACIO_BIC(HEATHROW_FCR, HRW_SCC_TRANS_EN_N); else MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N); UNLOCK(flags); (void)MACIO_IN32(HEATHROW_FCR); mdelay(250); } if (value) { LOCK(flags); MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1); (void)MACIO_IN8(HRW_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); LOCK(flags); MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio); (void)MACIO_IN8(HRW_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); LOCK(flags); MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1); (void)MACIO_IN8(HRW_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); } return 0; } static long heathrow_floppy_enable(struct device_node *node, long param, long value) { return simple_feature_tweak(node, macio_unknown, HEATHROW_FCR, HRW_SWIM_ENABLE|HRW_BAY_FLOPPY_ENABLE, value); } static long heathrow_mesh_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long flags; macio = macio_find(node, macio_unknown); if (!macio) return -ENODEV; LOCK(flags); /* Set clear mesh cell enable */ if (value) MACIO_BIS(HEATHROW_FCR, HRW_MESH_ENABLE); else MACIO_BIC(HEATHROW_FCR, HRW_MESH_ENABLE); (void)MACIO_IN32(HEATHROW_FCR); udelay(10); /* Set/Clear termination power */ if (value) MACIO_BIC(HEATHROW_MBCR, 0x04000000); else MACIO_BIS(HEATHROW_MBCR, 0x04000000); (void)MACIO_IN32(HEATHROW_MBCR); udelay(10); UNLOCK(flags); return 0; } static long heathrow_ide_enable(struct device_node *node, long param, long value) { switch(param) { case 0: return simple_feature_tweak(node, macio_unknown, HEATHROW_FCR, HRW_IDE0_ENABLE, value); case 1: return simple_feature_tweak(node, macio_unknown, HEATHROW_FCR, HRW_BAY_IDE_ENABLE, value); default: return -ENODEV; } } static long heathrow_ide_reset(struct device_node *node, long param, long value) { switch(param) { case 0: return simple_feature_tweak(node, macio_unknown, HEATHROW_FCR, HRW_IDE0_RESET_N, !value); case 1: return simple_feature_tweak(node, macio_unknown, HEATHROW_FCR, HRW_IDE1_RESET_N, !value); default: return -ENODEV; } } static long heathrow_bmac_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long flags; macio = macio_find(node, 0); if (!macio) return -ENODEV; if (value) { LOCK(flags); MACIO_BIS(HEATHROW_FCR, HRW_BMAC_IO_ENABLE); MACIO_BIS(HEATHROW_FCR, HRW_BMAC_RESET); UNLOCK(flags); (void)MACIO_IN32(HEATHROW_FCR); mdelay(10); LOCK(flags); MACIO_BIC(HEATHROW_FCR, HRW_BMAC_RESET); UNLOCK(flags); (void)MACIO_IN32(HEATHROW_FCR); mdelay(10); } else { LOCK(flags); MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE); UNLOCK(flags); } return 0; } static long heathrow_sound_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long flags; /* B&W G3 and Yikes don't support that properly (the * sound appear to never come back after beeing shut down). */ if (pmac_mb.model_id == PMAC_TYPE_YOSEMITE || pmac_mb.model_id == PMAC_TYPE_YIKES) return 0; macio = macio_find(node, 0); if (!macio) return -ENODEV; if (value) { LOCK(flags); MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE); MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N); UNLOCK(flags); (void)MACIO_IN32(HEATHROW_FCR); } else { LOCK(flags); MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N); MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE); UNLOCK(flags); } return 0; } static u32 save_fcr[6]; static u32 save_mbcr; static struct dbdma_regs save_dbdma[13]; static struct dbdma_regs save_alt_dbdma[13]; static void dbdma_save(struct macio_chip *macio, struct dbdma_regs *save) { int i; /* Save state & config of DBDMA channels */ for (i = 0; i < 13; i++) { volatile struct dbdma_regs __iomem * chan = (void __iomem *) (macio->base + ((0x8000+i*0x100)>>2)); save[i].cmdptr_hi = in_le32(&chan->cmdptr_hi); save[i].cmdptr = in_le32(&chan->cmdptr); save[i].intr_sel = in_le32(&chan->intr_sel); save[i].br_sel = in_le32(&chan->br_sel); save[i].wait_sel = in_le32(&chan->wait_sel); } } static void dbdma_restore(struct macio_chip *macio, struct dbdma_regs *save) { int i; /* Save state & config of DBDMA channels */ for (i = 0; i < 13; i++) { volatile struct dbdma_regs __iomem * chan = (void __iomem *) (macio->base + ((0x8000+i*0x100)>>2)); out_le32(&chan->control, (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16); while (in_le32(&chan->status) & ACTIVE) mb(); out_le32(&chan->cmdptr_hi, save[i].cmdptr_hi); out_le32(&chan->cmdptr, save[i].cmdptr); out_le32(&chan->intr_sel, save[i].intr_sel); out_le32(&chan->br_sel, save[i].br_sel); out_le32(&chan->wait_sel, save[i].wait_sel); } } static void heathrow_sleep(struct macio_chip *macio, int secondary) { if (secondary) { dbdma_save(macio, save_alt_dbdma); save_fcr[2] = MACIO_IN32(0x38); save_fcr[3] = MACIO_IN32(0x3c); } else { dbdma_save(macio, save_dbdma); save_fcr[0] = MACIO_IN32(0x38); save_fcr[1] = MACIO_IN32(0x3c); save_mbcr = MACIO_IN32(0x34); /* Make sure sound is shut down */ MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N); MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE); /* This seems to be necessary as well or the fan * keeps coming up and battery drains fast */ MACIO_BIC(HEATHROW_FCR, HRW_IOBUS_ENABLE); MACIO_BIC(HEATHROW_FCR, HRW_IDE0_RESET_N); /* Make sure eth is down even if module or sleep * won't work properly */ MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE | HRW_BMAC_RESET); } /* Make sure modem is shut down */ MACIO_OUT8(HRW_GPIO_MODEM_RESET, MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1); MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N); MACIO_BIC(HEATHROW_FCR, OH_SCCA_IO|OH_SCCB_IO|HRW_SCC_ENABLE); /* Let things settle */ (void)MACIO_IN32(HEATHROW_FCR); } static void heathrow_wakeup(struct macio_chip *macio, int secondary) { if (secondary) { MACIO_OUT32(0x38, save_fcr[2]); (void)MACIO_IN32(0x38); mdelay(1); MACIO_OUT32(0x3c, save_fcr[3]); (void)MACIO_IN32(0x38); mdelay(10); dbdma_restore(macio, save_alt_dbdma); } else { MACIO_OUT32(0x38, save_fcr[0] | HRW_IOBUS_ENABLE); (void)MACIO_IN32(0x38); mdelay(1); MACIO_OUT32(0x3c, save_fcr[1]); (void)MACIO_IN32(0x38); mdelay(1); MACIO_OUT32(0x34, save_mbcr); (void)MACIO_IN32(0x38); mdelay(10); dbdma_restore(macio, save_dbdma); } } static long heathrow_sleep_state(struct device_node *node, long param, long value) { if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) return -EPERM; if (value == 1) { if (macio_chips[1].type == macio_gatwick) heathrow_sleep(&macio_chips[0], 1); heathrow_sleep(&macio_chips[0], 0); } else if (value == 0) { heathrow_wakeup(&macio_chips[0], 0); if (macio_chips[1].type == macio_gatwick) heathrow_wakeup(&macio_chips[0], 1); } return 0; } static long core99_scc_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long flags; unsigned long chan_mask; u32 fcr; macio = macio_find(node, 0); if (!macio) return -ENODEV; if (!strcmp(node->name, "ch-a")) chan_mask = MACIO_FLAG_SCCA_ON; else if (!strcmp(node->name, "ch-b")) chan_mask = MACIO_FLAG_SCCB_ON; else return -ENODEV; if (value) { int need_reset_scc = 0; int need_reset_irda = 0; LOCK(flags); fcr = MACIO_IN32(KEYLARGO_FCR0); /* Check if scc cell need enabling */ if (!(fcr & KL0_SCC_CELL_ENABLE)) { fcr |= KL0_SCC_CELL_ENABLE; need_reset_scc = 1; } if (chan_mask & MACIO_FLAG_SCCA_ON) { fcr |= KL0_SCCA_ENABLE; /* Don't enable line drivers for I2S modem */ if ((param & 0xfff) == PMAC_SCC_I2S1) fcr &= ~KL0_SCC_A_INTF_ENABLE; else fcr |= KL0_SCC_A_INTF_ENABLE; } if (chan_mask & MACIO_FLAG_SCCB_ON) { fcr |= KL0_SCCB_ENABLE; /* Perform irda specific inits */ if ((param & 0xfff) == PMAC_SCC_IRDA) { fcr &= ~KL0_SCC_B_INTF_ENABLE; fcr |= KL0_IRDA_ENABLE; fcr |= KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE; fcr |= KL0_IRDA_SOURCE1_SEL; fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0); fcr &= ~(KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND); need_reset_irda = 1; } else fcr |= KL0_SCC_B_INTF_ENABLE; } MACIO_OUT32(KEYLARGO_FCR0, fcr); macio->flags |= chan_mask; if (need_reset_scc) { MACIO_BIS(KEYLARGO_FCR0, KL0_SCC_RESET); (void)MACIO_IN32(KEYLARGO_FCR0); UNLOCK(flags); mdelay(15); LOCK(flags); MACIO_BIC(KEYLARGO_FCR0, KL0_SCC_RESET); } if (need_reset_irda) { MACIO_BIS(KEYLARGO_FCR0, KL0_IRDA_RESET); (void)MACIO_IN32(KEYLARGO_FCR0); UNLOCK(flags); mdelay(15); LOCK(flags); MACIO_BIC(KEYLARGO_FCR0, KL0_IRDA_RESET); } UNLOCK(flags); if (param & PMAC_SCC_FLAG_XMON) macio->flags |= MACIO_FLAG_SCC_LOCKED; } else { if (macio->flags & MACIO_FLAG_SCC_LOCKED) return -EPERM; LOCK(flags); fcr = MACIO_IN32(KEYLARGO_FCR0); if (chan_mask & MACIO_FLAG_SCCA_ON) fcr &= ~KL0_SCCA_ENABLE; if (chan_mask & MACIO_FLAG_SCCB_ON) { fcr &= ~KL0_SCCB_ENABLE; /* Perform irda specific clears */ if ((param & 0xfff) == PMAC_SCC_IRDA) { fcr &= ~KL0_IRDA_ENABLE; fcr &= ~(KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE); fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0); fcr &= ~(KL0_IRDA_SOURCE1_SEL|KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND); } } MACIO_OUT32(KEYLARGO_FCR0, fcr); if ((fcr & (KL0_SCCA_ENABLE | KL0_SCCB_ENABLE)) == 0) { fcr &= ~KL0_SCC_CELL_ENABLE; MACIO_OUT32(KEYLARGO_FCR0, fcr); } macio->flags &= ~(chan_mask); UNLOCK(flags); mdelay(10); } return 0; } static long core99_modem_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; u8 gpio; unsigned long flags; /* Hack for internal USB modem */ if (node == NULL) { if (macio_chips[0].type != macio_keylargo) return -ENODEV; node = macio_chips[0].of_node; } macio = macio_find(node, 0); if (!macio) return -ENODEV; gpio = MACIO_IN8(KL_GPIO_MODEM_RESET); gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE; gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA; if (!value) { LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio); UNLOCK(flags); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); mdelay(250); } LOCK(flags); if (value) { MACIO_BIC(KEYLARGO_FCR2, KL2_ALT_DATA_OUT); UNLOCK(flags); (void)MACIO_IN32(KEYLARGO_FCR2); mdelay(250); } else { MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT); UNLOCK(flags); } if (value) { LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); } return 0; } static long pangea_modem_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; u8 gpio; unsigned long flags; /* Hack for internal USB modem */ if (node == NULL) { if (macio_chips[0].type != macio_pangea && macio_chips[0].type != macio_intrepid) return -ENODEV; node = macio_chips[0].of_node; } macio = macio_find(node, 0); if (!macio) return -ENODEV; gpio = MACIO_IN8(KL_GPIO_MODEM_RESET); gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE; gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA; if (!value) { LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio); UNLOCK(flags); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); mdelay(250); } LOCK(flags); if (value) { MACIO_OUT8(KL_GPIO_MODEM_POWER, KEYLARGO_GPIO_OUTPUT_ENABLE); UNLOCK(flags); (void)MACIO_IN32(KEYLARGO_FCR2); mdelay(250); } else { MACIO_OUT8(KL_GPIO_MODEM_POWER, KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA); UNLOCK(flags); } if (value) { LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); } return 0; } static long core99_ata100_enable(struct device_node *node, long value) { unsigned long flags; struct pci_dev *pdev = NULL; u8 pbus, pid; int rc; if (uninorth_rev < 0x24) return -ENODEV; LOCK(flags); if (value) UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100); else UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100); (void)UN_IN(UNI_N_CLOCK_CNTL); UNLOCK(flags); udelay(20); if (value) { if (pci_device_from_OF_node(node, &pbus, &pid) == 0) pdev = pci_get_bus_and_slot(pbus, pid); if (pdev == NULL) return 0; rc = pci_enable_device(pdev); if (rc == 0) pci_set_master(pdev); pci_dev_put(pdev); if (rc) return rc; } return 0; } static long core99_ide_enable(struct device_node *node, long param, long value) { /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2 * based ata-100 */ switch(param) { case 0: return simple_feature_tweak(node, macio_unknown, KEYLARGO_FCR1, KL1_EIDE0_ENABLE, value); case 1: return simple_feature_tweak(node, macio_unknown, KEYLARGO_FCR1, KL1_EIDE1_ENABLE, value); case 2: return simple_feature_tweak(node, macio_unknown, KEYLARGO_FCR1, KL1_UIDE_ENABLE, value); case 3: return core99_ata100_enable(node, value); default: return -ENODEV; } } static long core99_ide_reset(struct device_node *node, long param, long value) { switch(param) { case 0: return simple_feature_tweak(node, macio_unknown, KEYLARGO_FCR1, KL1_EIDE0_RESET_N, !value); case 1: return simple_feature_tweak(node, macio_unknown, KEYLARGO_FCR1, KL1_EIDE1_RESET_N, !value); case 2: return simple_feature_tweak(node, macio_unknown, KEYLARGO_FCR1, KL1_UIDE_RESET_N, !value); default: return -ENODEV; } } static long core99_gmac_enable(struct device_node *node, long param, long value) { unsigned long flags; LOCK(flags); if (value) UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC); else UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC); (void)UN_IN(UNI_N_CLOCK_CNTL); UNLOCK(flags); udelay(20); return 0; } static long core99_gmac_phy_reset(struct device_node *node, long param, long value) { unsigned long flags; struct macio_chip *macio; macio = &macio_chips[0]; if (macio->type != macio_keylargo && macio->type != macio_pangea && macio->type != macio_intrepid) return -ENODEV; LOCK(flags); MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, KEYLARGO_GPIO_OUTPUT_ENABLE); (void)MACIO_IN8(KL_GPIO_ETH_PHY_RESET); UNLOCK(flags); mdelay(10); LOCK(flags); MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, /*KEYLARGO_GPIO_OUTPUT_ENABLE | */ KEYLARGO_GPIO_OUTOUT_DATA); UNLOCK(flags); mdelay(10); return 0; } static long core99_sound_chip_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long flags; macio = macio_find(node, 0); if (!macio) return -ENODEV; /* Do a better probe code, screamer G4 desktops & * iMacs can do that too, add a recalibrate in * the driver as well */ if (pmac_mb.model_id == PMAC_TYPE_PISMO || pmac_mb.model_id == PMAC_TYPE_TITANIUM) { LOCK(flags); if (value) MACIO_OUT8(KL_GPIO_SOUND_POWER, KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA); else MACIO_OUT8(KL_GPIO_SOUND_POWER, KEYLARGO_GPIO_OUTPUT_ENABLE); (void)MACIO_IN8(KL_GPIO_SOUND_POWER); UNLOCK(flags); } return 0; } static long core99_airport_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long flags; int state; macio = macio_find(node, 0); if (!macio) return -ENODEV; /* Hint: we allow passing of macio itself for the sake of the * sleep code */ if (node != macio->of_node && (!node->parent || node->parent != macio->of_node)) return -ENODEV; state = (macio->flags & MACIO_FLAG_AIRPORT_ON) != 0; if (value == state) return 0; if (value) { /* This code is a reproduction of OF enable-cardslot * and init-wireless methods, slightly hacked until * I got it working. */ LOCK(flags); MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 5); (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf); UNLOCK(flags); mdelay(10); LOCK(flags); MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 4); (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf); UNLOCK(flags); mdelay(10); LOCK(flags); MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16); (void)MACIO_IN32(KEYLARGO_FCR2); udelay(10); MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xb, 0); (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xb); udelay(10); MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xa, 0x28); (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xa); udelay(10); MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xd, 0x28); (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xd); udelay(10); MACIO_OUT8(KEYLARGO_GPIO_0+0xd, 0x28); (void)MACIO_IN8(KEYLARGO_GPIO_0+0xd); udelay(10); MACIO_OUT8(KEYLARGO_GPIO_0+0xe, 0x28); (void)MACIO_IN8(KEYLARGO_GPIO_0+0xe); UNLOCK(flags); udelay(10); MACIO_OUT32(0x1c000, 0); mdelay(1); MACIO_OUT8(0x1a3e0, 0x41); (void)MACIO_IN8(0x1a3e0); udelay(10); LOCK(flags); MACIO_BIS(KEYLARGO_FCR2, KL2_CARDSEL_16); (void)MACIO_IN32(KEYLARGO_FCR2); UNLOCK(flags); mdelay(100); macio->flags |= MACIO_FLAG_AIRPORT_ON; } else { LOCK(flags); MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16); (void)MACIO_IN32(KEYLARGO_FCR2); MACIO_OUT8(KL_GPIO_AIRPORT_0, 0); MACIO_OUT8(KL_GPIO_AIRPORT_1, 0); MACIO_OUT8(KL_GPIO_AIRPORT_2, 0); MACIO_OUT8(KL_GPIO_AIRPORT_3, 0); MACIO_OUT8(KL_GPIO_AIRPORT_4, 0); (void)MACIO_IN8(KL_GPIO_AIRPORT_4); UNLOCK(flags); macio->flags &= ~MACIO_FLAG_AIRPORT_ON; } return 0; } #ifdef CONFIG_SMP static long core99_reset_cpu(struct device_node *node, long param, long value) { unsigned int reset_io = 0; unsigned long flags; struct macio_chip *macio; struct device_node *np; struct device_node *cpus; const int dflt_reset_lines[] = { KL_GPIO_RESET_CPU0, KL_GPIO_RESET_CPU1, KL_GPIO_RESET_CPU2, KL_GPIO_RESET_CPU3 }; macio = &macio_chips[0]; if (macio->type != macio_keylargo) return -ENODEV; cpus = of_find_node_by_path("/cpus"); if (cpus == NULL) return -ENODEV; for (np = cpus->child; np != NULL; np = np->sibling) { const u32 *num = of_get_property(np, "reg", NULL); const u32 *rst = of_get_property(np, "soft-reset", NULL); if (num == NULL || rst == NULL) continue; if (param == *num) { reset_io = *rst; break; } } of_node_put(cpus); if (np == NULL || reset_io == 0) reset_io = dflt_reset_lines[param]; LOCK(flags); MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE); (void)MACIO_IN8(reset_io); udelay(1); MACIO_OUT8(reset_io, 0); (void)MACIO_IN8(reset_io); UNLOCK(flags); return 0; } #endif /* CONFIG_SMP */ static long core99_usb_enable(struct device_node *node, long param, long value) { struct macio_chip *macio; unsigned long flags; const char *prop; int number; u32 reg; macio = &macio_chips[0]; if (macio->type != macio_keylargo && macio->type != macio_pangea && macio->type != macio_intrepid) return -ENODEV; prop = of_get_property(node, "AAPL,clock-id", NULL); if (!prop) return -ENODEV; if (strncmp(prop, "usb0u048", 8) == 0) number = 0; else if (strncmp(prop, "usb1u148", 8) == 0) number = 2; else if (strncmp(prop, "usb2u248", 8) == 0) number = 4; else return -ENODEV; /* Sorry for the brute-force locking, but this is only used during * sleep and the timing seem to be critical */ LOCK(flags); if (value) { /* Turn ON */ if (number == 0) { MACIO_BIC(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1)); (void)MACIO_IN32(KEYLARGO_FCR0); UNLOCK(flags); mdelay(1); LOCK(flags); MACIO_BIS(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE); } else if (number == 2) { MACIO_BIC(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1)); UNLOCK(flags); (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); LOCK(flags); MACIO_BIS(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE); } else if (number == 4) { MACIO_BIC(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1)); UNLOCK(flags); (void)MACIO_IN32(KEYLARGO_FCR1); mdelay(1); LOCK(flags); MACIO_BIS(KEYLARGO_FCR1, KL1_USB2_CELL_ENABLE); } if (number < 4) { reg = MACIO_IN32(KEYLARGO_FCR4); reg &= ~(KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) | KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number)); reg &= ~(KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) | KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1)); MACIO_OUT32(KEYLARGO_FCR4, reg); (void)MACIO_IN32(KEYLARGO_FCR4); udelay(10); } else { reg = MACIO_IN32(KEYLARGO_FCR3); reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) | KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0)); reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) | KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1)); MACIO_OUT32(KEYLARGO_FCR3, reg); (void)MACIO_IN32(KEYLARGO_FCR3); udelay(10); } if (macio->type == macio_intrepid) { /* wait for clock stopped bits to clear */ u32 test0 = 0, test1 = 0; u32 status0, status1; int timeout = 1000; UNLOCK(flags); switch (number) { case 0: test0 = UNI_N_CLOCK_STOPPED_USB0; test1 = UNI_N_CLOCK_STOPPED_USB0PCI; break; case 2: test0 = UNI_N_CLOCK_STOPPED_USB1; test1 = UNI_N_CLOCK_STOPPED_USB1PCI; break; case 4: test0 = UNI_N_CLOCK_STOPPED_USB2; test1 = UNI_N_CLOCK_STOPPED_USB2PCI; break; } do { if (--timeout <= 0) { printk(KERN_ERR "core99_usb_enable: " "Timeout waiting for clocks\n"); break; } mdelay(1); status0 = UN_IN(UNI_N_CLOCK_STOP_STATUS0); status1 = UN_IN(UNI_N_CLOCK_STOP_STATUS1); } while ((status0 & test0) | (status1 & test1)); LOCK(flags); } } else { /* Turn OFF */ if (number < 4) { reg = MACIO_IN32(KEYLARGO_FCR4); reg |= KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) | KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number); reg |= KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) | KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1); MACIO_OUT32(KEYLARGO_FCR4, reg); (void)MACIO_IN32(KEYLARGO_FCR4); udelay(1); } else { reg = MACIO_IN32(KEYLARGO_FCR3); reg |= KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) | KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0); reg |= KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) | KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1); MACIO_OUT32(KEYLARGO_FCR3, reg); (void)MACIO_IN32(KEYLARGO_FCR3); udelay(1); } if (number == 0) { if (macio->type != macio_intrepid) MACIO_BIC(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE); (void)MACIO_IN32(KEYLARGO_FCR0); udelay(1); MACIO_BIS(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1)); (void)MACIO_IN32(KEYLARGO_FCR0); } else if (number == 2) { if (macio->type != macio_intrepid) MACIO_BIC(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE); (void)MACIO_IN32(KEYLARGO_FCR0); udelay(1); MACIO_BIS(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1)); (void)MACIO_IN32(KEYLARGO_FCR0); } else if (number == 4) { udelay(1); MACIO_BIS(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1)); (void)MACIO_IN32(KEYLARGO_FCR1); } udelay(1); } UNLOCK(flags); return 0; } static long core99_firewire_enable(struct device_node *node, long param, long value) { unsigned long flags; struct macio_chip *macio; macio = &macio_chips[0]; if (macio->type != macio_keylargo && macio->type != macio_pangea && macio->type != macio_intrepid) return -ENODEV; if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED)) return -ENODEV; LOCK(flags); if (value) { UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW); (void)UN_IN(UNI_N_CLOCK_CNTL); } else { UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW); (void)UN_IN(UNI_N_CLOCK_CNTL); } UNLOCK(flags); mdelay(1); return 0; } static long core99_firewire_cable_power(struct device_node *node, long param, long value) { unsigned long flags; struct macio_chip *macio; /* Trick: we allow NULL node */ if ((pmac_mb.board_flags & PMAC_MB_HAS_FW_POWER) == 0) return -ENODEV; macio = &macio_chips[0]; if (macio->type != macio_keylargo && macio->type != macio_pangea && macio->type != macio_intrepid) return -ENODEV; if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED)) return -ENODEV; LOCK(flags); if (value) { MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 0); MACIO_IN8(KL_GPIO_FW_CABLE_POWER); udelay(10); } else { MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 4); MACIO_IN8(KL_GPIO_FW_CABLE_POWER); udelay(10); } UNLOCK(flags); mdelay(1); return 0; } static long intrepid_aack_delay_enable(struct device_node *node, long param, long value) { unsigned long flags; if (uninorth_rev < 0xd2) return -ENODEV; LOCK(flags); if (param) UN_BIS(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE); else UN_BIC(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE); UNLOCK(flags); return 0; } #endif /* CONFIG_POWER4 */ static long core99_read_gpio(struct device_node *node, long param, long value) { struct macio_chip *macio = &macio_chips[0]; return MACIO_IN8(param); } static long core99_write_gpio(struct device_node *node, long param, long value) { struct macio_chip *macio = &macio_chips[0]; MACIO_OUT8(param, (u8)(value & 0xff)); return 0; } #ifdef CONFIG_POWER4 static long g5_gmac_enable(struct device_node *node, long param, long value) { struct macio_chip *macio = &macio_chips[0]; unsigned long flags; if (node == NULL) return -ENODEV; LOCK(flags); if (value) { MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE); mb(); k2_skiplist[0] = NULL; } else { k2_skiplist[0] = node; mb(); MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE); } UNLOCK(flags); mdelay(1); return 0; } static long g5_fw_enable(struct device_node *node, long param, long value) { struct macio_chip *macio = &macio_chips[0]; unsigned long flags; if (node == NULL) return -ENODEV; LOCK(flags); if (value) { MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE); mb(); k2_skiplist[1] = NULL; } else { k2_skiplist[1] = node; mb(); MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE); } UNLOCK(flags); mdelay(1); return 0; } static long g5_mpic_enable(struct device_node *node, long param, long value) { unsigned long flags; struct device_node *parent = of_get_parent(node); int is_u3; if (parent == NULL) return 0; is_u3 = strcmp(parent->name, "u3") == 0 || strcmp(parent->name, "u4") == 0; of_node_put(parent); if (!is_u3) return 0; LOCK(flags); UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE); UNLOCK(flags); return 0; } static long g5_eth_phy_reset(struct device_node *node, long param, long value) { struct macio_chip *macio = &macio_chips[0]; struct device_node *phy; int need_reset; /* * We must not reset the combo PHYs, only the BCM5221 found in * the iMac G5. */ phy = of_get_next_child(node, NULL); if (!phy) return -ENODEV; need_reset = of_device_is_compatible(phy, "B5221"); of_node_put(phy); if (!need_reset) return 0; /* PHY reset is GPIO 29, not in device-tree unfortunately */ MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA); /* Thankfully, this is now always called at a time when we can * schedule by sungem. */ msleep(10); MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0); return 0; } static long g5_i2s_enable(struct device_node *node, long param, long value) { /* Very crude implementation for now */ struct macio_chip *macio = &macio_chips[0]; unsigned long flags; int cell; u32 fcrs[3][3] = { { 0, K2_FCR1_I2S0_CELL_ENABLE | K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE, KL3_I2S0_CLK18_ENABLE }, { KL0_SCC_A_INTF_ENABLE, K2_FCR1_I2S1_CELL_ENABLE | K2_FCR1_I2S1_CLK_ENABLE_BIT | K2_FCR1_I2S1_ENABLE, KL3_I2S1_CLK18_ENABLE }, { KL0_SCC_B_INTF_ENABLE, SH_FCR1_I2S2_CELL_ENABLE | SH_FCR1_I2S2_CLK_ENABLE_BIT | SH_FCR1_I2S2_ENABLE, SH_FCR3_I2S2_CLK18_ENABLE }, }; if (macio->type != macio_keylargo2 && macio->type != macio_shasta) return -ENODEV; if (strncmp(node->name, "i2s-", 4)) return -ENODEV; cell = node->name[4] - 'a'; switch(cell) { case 0: case 1: break; case 2: if (macio->type == macio_shasta) break; default: return -ENODEV; } LOCK(flags); if (value) { MACIO_BIC(KEYLARGO_FCR0, fcrs[cell][0]); MACIO_BIS(KEYLARGO_FCR1, fcrs[cell][1]); MACIO_BIS(KEYLARGO_FCR3, fcrs[cell][2]); } else { MACIO_BIC(KEYLARGO_FCR3, fcrs[cell][2]); MACIO_BIC(KEYLARGO_FCR1, fcrs[cell][1]); MACIO_BIS(KEYLARGO_FCR0, fcrs[cell][0]); } udelay(10); UNLOCK(flags); return 0; } #ifdef CONFIG_SMP static long g5_reset_cpu(struct device_node *node, long param, long value) { unsigned int reset_io = 0; unsigned long flags; struct macio_chip *macio; struct device_node *np; struct device_node *cpus; macio = &macio_chips[0]; if (macio->type != macio_keylargo2 && macio->type != macio_shasta) return -ENODEV; cpus = of_find_node_by_path("/cpus"); if (cpus == NULL) return -ENODEV; for (np = cpus->child; np != NULL; np = np->sibling) { const u32 *num = of_get_property(np, "reg", NULL); const u32 *rst = of_get_property(np, "soft-reset", NULL); if (num == NULL || rst == NULL) continue; if (param == *num) { reset_io = *rst; break; } } of_node_put(cpus); if (np == NULL || reset_io == 0) return -ENODEV; LOCK(flags); MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE); (void)MACIO_IN8(reset_io); udelay(1); MACIO_OUT8(reset_io, 0); (void)MACIO_IN8(reset_io); UNLOCK(flags); return 0; } #endif /* CONFIG_SMP */ /* * This can be called from pmac_smp so isn't static * * This takes the second CPU off the bus on dual CPU machines * running UP */ void g5_phy_disable_cpu1(void) { if (uninorth_maj == 3) UN_OUT(U3_API_PHY_CONFIG_1, 0); } #endif /* CONFIG_POWER4 */ #ifndef CONFIG_POWER4 #ifdef CONFIG_PM static u32 save_gpio_levels[2]; static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT]; static u8 save_gpio_normal[KEYLARGO_GPIO_CNT]; static u32 save_unin_clock_ctl; static void keylargo_shutdown(struct macio_chip *macio, int sleep_mode) { u32 temp; if (sleep_mode) { mdelay(1); MACIO_BIS(KEYLARGO_FCR0, KL0_USB_REF_SUSPEND); (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); } MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE | KL0_SCC_CELL_ENABLE | KL0_IRDA_ENABLE | KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE); MACIO_BIC(KEYLARGO_MBCR, KL_MBCR_MB0_DEV_MASK); MACIO_BIS(KEYLARGO_MBCR, KL_MBCR_MB0_IDE_ENABLE); MACIO_BIC(KEYLARGO_FCR1, KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT | KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE | KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT | KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE | KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE | KL1_EIDE0_ENABLE | KL1_EIDE0_RESET_N | KL1_EIDE1_ENABLE | KL1_EIDE1_RESET_N | KL1_UIDE_ENABLE); MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT); MACIO_BIC(KEYLARGO_FCR2, KL2_IOBUS_ENABLE); temp = MACIO_IN32(KEYLARGO_FCR3); if (macio->rev >= 2) { temp |= KL3_SHUTDOWN_PLL2X; if (sleep_mode) temp |= KL3_SHUTDOWN_PLL_TOTAL; } temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 | KL3_SHUTDOWN_PLLKW35; if (sleep_mode) temp |= KL3_SHUTDOWN_PLLKW12; temp &= ~(KL3_CLK66_ENABLE | KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | KL3_CLK31_ENABLE | KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE); if (sleep_mode) temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_VIA_CLK16_ENABLE); MACIO_OUT32(KEYLARGO_FCR3, temp); /* Flush posted writes & wait a bit */ (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); } static void pangea_shutdown(struct macio_chip *macio, int sleep_mode) { u32 temp; MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE | KL0_SCC_CELL_ENABLE | KL0_USB0_CELL_ENABLE | KL0_USB1_CELL_ENABLE); MACIO_BIC(KEYLARGO_FCR1, KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT | KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE | KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT | KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE | KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE | KL1_UIDE_ENABLE); if (pmac_mb.board_flags & PMAC_MB_MOBILE) MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N); MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT); temp = MACIO_IN32(KEYLARGO_FCR3); temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 | KL3_SHUTDOWN_PLLKW35; temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | KL3_CLK31_ENABLE | KL3_I2S0_CLK18_ENABLE | KL3_I2S1_CLK18_ENABLE); if (sleep_mode) temp &= ~(KL3_VIA_CLK16_ENABLE | KL3_TIMER_CLK18_ENABLE); MACIO_OUT32(KEYLARGO_FCR3, temp); /* Flush posted writes & wait a bit */ (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); } static void intrepid_shutdown(struct macio_chip *macio, int sleep_mode) { u32 temp; MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE | KL0_SCC_CELL_ENABLE); MACIO_BIC(KEYLARGO_FCR1, KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT | KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE | KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE | KL1_EIDE0_ENABLE); if (pmac_mb.board_flags & PMAC_MB_MOBILE) MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N); temp = MACIO_IN32(KEYLARGO_FCR3); temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE); if (sleep_mode) temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_IT_VIA_CLK32_ENABLE); MACIO_OUT32(KEYLARGO_FCR3, temp); /* Flush posted writes & wait a bit */ (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(10); } static int core99_sleep(void) { struct macio_chip *macio; int i; macio = &macio_chips[0]; if (macio->type != macio_keylargo && macio->type != macio_pangea && macio->type != macio_intrepid) return -ENODEV; /* We power off the wireless slot in case it was not done * by the driver. We don't power it on automatically however */ if (macio->flags & MACIO_FLAG_AIRPORT_ON) core99_airport_enable(macio->of_node, 0, 0); /* We power off the FW cable. Should be done by the driver... */ if (macio->flags & MACIO_FLAG_FW_SUPPORTED) { core99_firewire_enable(NULL, 0, 0); core99_firewire_cable_power(NULL, 0, 0); } /* We make sure int. modem is off (in case driver lost it) */ if (macio->type == macio_keylargo) core99_modem_enable(macio->of_node, 0, 0); else pangea_modem_enable(macio->of_node, 0, 0); /* We make sure the sound is off as well */ core99_sound_chip_enable(macio->of_node, 0, 0); /* * Save various bits of KeyLargo */ /* Save the state of the various GPIOs */ save_gpio_levels[0] = MACIO_IN32(KEYLARGO_GPIO_LEVELS0); save_gpio_levels[1] = MACIO_IN32(KEYLARGO_GPIO_LEVELS1); for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++) save_gpio_extint[i] = MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+i); for (i=0; i<KEYLARGO_GPIO_CNT; i++) save_gpio_normal[i] = MACIO_IN8(KEYLARGO_GPIO_0+i); /* Save the FCRs */ if (macio->type == macio_keylargo) save_mbcr = MACIO_IN32(KEYLARGO_MBCR); save_fcr[0] = MACIO_IN32(KEYLARGO_FCR0); save_fcr[1] = MACIO_IN32(KEYLARGO_FCR1); save_fcr[2] = MACIO_IN32(KEYLARGO_FCR2); save_fcr[3] = MACIO_IN32(KEYLARGO_FCR3); save_fcr[4] = MACIO_IN32(KEYLARGO_FCR4); if (macio->type == macio_pangea || macio->type == macio_intrepid) save_fcr[5] = MACIO_IN32(KEYLARGO_FCR5); /* Save state & config of DBDMA channels */ dbdma_save(macio, save_dbdma); /* * Turn off as much as we can */ if (macio->type == macio_pangea) pangea_shutdown(macio, 1); else if (macio->type == macio_intrepid) intrepid_shutdown(macio, 1); else if (macio->type == macio_keylargo) keylargo_shutdown(macio, 1); /* * Put the host bridge to sleep */ save_unin_clock_ctl = UN_IN(UNI_N_CLOCK_CNTL); /* Note: do not switch GMAC off, driver does it when necessary, WOL must keep it * enabled ! */ UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl & ~(/*UNI_N_CLOCK_CNTL_GMAC|*/UNI_N_CLOCK_CNTL_FW/*|UNI_N_CLOCK_CNTL_PCI*/)); udelay(100); UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING); UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_SLEEP); mdelay(10); /* * FIXME: A bit of black magic with OpenPIC (don't ask me why) */ if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) { MACIO_BIS(0x506e0, 0x00400000); MACIO_BIS(0x506e0, 0x80000000); } return 0; } static int core99_wake_up(void) { struct macio_chip *macio; int i; macio = &macio_chips[0]; if (macio->type != macio_keylargo && macio->type != macio_pangea && macio->type != macio_intrepid) return -ENODEV; /* * Wakeup the host bridge */ UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL); udelay(10); UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING); udelay(10); /* * Restore KeyLargo */ if (macio->type == macio_keylargo) { MACIO_OUT32(KEYLARGO_MBCR, save_mbcr); (void)MACIO_IN32(KEYLARGO_MBCR); udelay(10); } MACIO_OUT32(KEYLARGO_FCR0, save_fcr[0]); (void)MACIO_IN32(KEYLARGO_FCR0); udelay(10); MACIO_OUT32(KEYLARGO_FCR1, save_fcr[1]); (void)MACIO_IN32(KEYLARGO_FCR1); udelay(10); MACIO_OUT32(KEYLARGO_FCR2, save_fcr[2]); (void)MACIO_IN32(KEYLARGO_FCR2); udelay(10); MACIO_OUT32(KEYLARGO_FCR3, save_fcr[3]); (void)MACIO_IN32(KEYLARGO_FCR3); udelay(10); MACIO_OUT32(KEYLARGO_FCR4, save_fcr[4]); (void)MACIO_IN32(KEYLARGO_FCR4); udelay(10); if (macio->type == macio_pangea || macio->type == macio_intrepid) { MACIO_OUT32(KEYLARGO_FCR5, save_fcr[5]); (void)MACIO_IN32(KEYLARGO_FCR5); udelay(10); } dbdma_restore(macio, save_dbdma); MACIO_OUT32(KEYLARGO_GPIO_LEVELS0, save_gpio_levels[0]); MACIO_OUT32(KEYLARGO_GPIO_LEVELS1, save_gpio_levels[1]); for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++) MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+i, save_gpio_extint[i]); for (i=0; i<KEYLARGO_GPIO_CNT; i++) MACIO_OUT8(KEYLARGO_GPIO_0+i, save_gpio_normal[i]); /* FIXME more black magic with OpenPIC ... */ if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) { MACIO_BIC(0x506e0, 0x00400000); MACIO_BIC(0x506e0, 0x80000000); } UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl); udelay(100); return 0; } #endif /* CONFIG_PM */ static long core99_sleep_state(struct device_node *node, long param, long value) { /* Param == 1 means to enter the "fake sleep" mode that is * used for CPU speed switch */ if (param == 1) { if (value == 1) { UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING); UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_IDLE2); } else { UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL); udelay(10); UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING); udelay(10); } return 0; } if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) return -EPERM; #ifdef CONFIG_PM if (value == 1) return core99_sleep(); else if (value == 0) return core99_wake_up(); #endif /* CONFIG_PM */ return 0; } #endif /* CONFIG_POWER4 */ static long generic_dev_can_wake(struct device_node *node, long param, long value) { /* Todo: eventually check we are really dealing with on-board * video device ... */ if (pmac_mb.board_flags & PMAC_MB_MAY_SLEEP) pmac_mb.board_flags |= PMAC_MB_CAN_SLEEP; return 0; } static long generic_get_mb_info(struct device_node *node, long param, long value) { switch(param) { case PMAC_MB_INFO_MODEL: return pmac_mb.model_id; case PMAC_MB_INFO_FLAGS: return pmac_mb.board_flags; case PMAC_MB_INFO_NAME: /* hack hack hack... but should work */ *((const char **)value) = pmac_mb.model_name; return 0; } return -EINVAL; } /* * Table definitions */ /* Used on any machine */ static struct feature_table_entry any_features[] = { { PMAC_FTR_GET_MB_INFO, generic_get_mb_info }, { PMAC_FTR_DEVICE_CAN_WAKE, generic_dev_can_wake }, { 0, NULL } }; #ifndef CONFIG_POWER4 /* OHare based motherboards. Currently, we only use these on the * 2400,3400 and 3500 series powerbooks. Some older desktops seem * to have issues with turning on/off those asic cells */ static struct feature_table_entry ohare_features[] = { { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, { PMAC_FTR_SWIM3_ENABLE, ohare_floppy_enable }, { PMAC_FTR_MESH_ENABLE, ohare_mesh_enable }, { PMAC_FTR_IDE_ENABLE, ohare_ide_enable}, { PMAC_FTR_IDE_RESET, ohare_ide_reset}, { PMAC_FTR_SLEEP_STATE, ohare_sleep_state }, { 0, NULL } }; /* Heathrow desktop machines (Beige G3). * Separated as some features couldn't be properly tested * and the serial port control bits appear to confuse it. */ static struct feature_table_entry heathrow_desktop_features[] = { { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable }, { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable }, { PMAC_FTR_IDE_RESET, heathrow_ide_reset }, { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable }, { 0, NULL } }; /* Heathrow based laptop, that is the Wallstreet and mainstreet * powerbooks. */ static struct feature_table_entry heathrow_laptop_features[] = { { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable }, { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable }, { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable }, { PMAC_FTR_IDE_RESET, heathrow_ide_reset }, { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable }, { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable }, { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state }, { 0, NULL } }; /* Paddington based machines * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4. */ static struct feature_table_entry paddington_features[] = { { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable }, { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable }, { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable }, { PMAC_FTR_IDE_RESET, heathrow_ide_reset }, { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable }, { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable }, { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state }, { 0, NULL } }; /* Core99 & MacRISC 2 machines (all machines released since the * iBook (included), that is all AGP machines, except pangea * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo * used on iBook2 & iMac "flow power". */ static struct feature_table_entry core99_features[] = { { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, { PMAC_FTR_MODEM_ENABLE, core99_modem_enable }, { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, { PMAC_FTR_IDE_RESET, core99_ide_reset }, { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable }, { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset }, { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable }, { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable }, { PMAC_FTR_USB_ENABLE, core99_usb_enable }, { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, #ifdef CONFIG_PM { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, #endif #ifdef CONFIG_SMP { PMAC_FTR_RESET_CPU, core99_reset_cpu }, #endif /* CONFIG_SMP */ { PMAC_FTR_READ_GPIO, core99_read_gpio }, { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, { 0, NULL } }; /* RackMac */ static struct feature_table_entry rackmac_features[] = { { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, { PMAC_FTR_IDE_RESET, core99_ide_reset }, { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable }, { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset }, { PMAC_FTR_USB_ENABLE, core99_usb_enable }, { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, #ifdef CONFIG_SMP { PMAC_FTR_RESET_CPU, core99_reset_cpu }, #endif /* CONFIG_SMP */ { PMAC_FTR_READ_GPIO, core99_read_gpio }, { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, { 0, NULL } }; /* Pangea features */ static struct feature_table_entry pangea_features[] = { { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable }, { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, { PMAC_FTR_IDE_RESET, core99_ide_reset }, { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable }, { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset }, { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable }, { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable }, { PMAC_FTR_USB_ENABLE, core99_usb_enable }, { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, { PMAC_FTR_READ_GPIO, core99_read_gpio }, { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, { 0, NULL } }; /* Intrepid features */ static struct feature_table_entry intrepid_features[] = { { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable }, { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, { PMAC_FTR_IDE_RESET, core99_ide_reset }, { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable }, { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset }, { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable }, { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable }, { PMAC_FTR_USB_ENABLE, core99_usb_enable }, { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, { PMAC_FTR_READ_GPIO, core99_read_gpio }, { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, { PMAC_FTR_AACK_DELAY_ENABLE, intrepid_aack_delay_enable }, { 0, NULL } }; #else /* CONFIG_POWER4 */ /* G5 features */ static struct feature_table_entry g5_features[] = { { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable }, { PMAC_FTR_1394_ENABLE, g5_fw_enable }, { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable }, { PMAC_FTR_GMAC_PHY_RESET, g5_eth_phy_reset }, { PMAC_FTR_SOUND_CHIP_ENABLE, g5_i2s_enable }, #ifdef CONFIG_SMP { PMAC_FTR_RESET_CPU, g5_reset_cpu }, #endif /* CONFIG_SMP */ { PMAC_FTR_READ_GPIO, core99_read_gpio }, { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, { 0, NULL } }; #endif /* CONFIG_POWER4 */ static struct pmac_mb_def pmac_mb_defs[] = { #ifndef CONFIG_POWER4 /* * Desktops */ { "AAPL,8500", "PowerMac 8500/8600", PMAC_TYPE_PSURGE, NULL, 0 }, { "AAPL,9500", "PowerMac 9500/9600", PMAC_TYPE_PSURGE, NULL, 0 }, { "AAPL,7200", "PowerMac 7200", PMAC_TYPE_PSURGE, NULL, 0 }, { "AAPL,7300", "PowerMac 7200/7300", PMAC_TYPE_PSURGE, NULL, 0 }, { "AAPL,7500", "PowerMac 7500", PMAC_TYPE_PSURGE, NULL, 0 }, { "AAPL,ShinerESB", "Apple Network Server", PMAC_TYPE_ANS, NULL, 0 }, { "AAPL,e407", "Alchemy", PMAC_TYPE_ALCHEMY, NULL, 0 }, { "AAPL,e411", "Gazelle", PMAC_TYPE_GAZELLE, NULL, 0 }, { "AAPL,Gossamer", "PowerMac G3 (Gossamer)", PMAC_TYPE_GOSSAMER, heathrow_desktop_features, 0 }, { "AAPL,PowerMac G3", "PowerMac G3 (Silk)", PMAC_TYPE_SILK, heathrow_desktop_features, 0 }, { "PowerMac1,1", "Blue&White G3", PMAC_TYPE_YOSEMITE, paddington_features, 0 }, { "PowerMac1,2", "PowerMac G4 PCI Graphics", PMAC_TYPE_YIKES, paddington_features, 0 }, { "PowerMac2,1", "iMac FireWire", PMAC_TYPE_FW_IMAC, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 }, { "PowerMac2,2", "iMac FireWire", PMAC_TYPE_FW_IMAC, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 }, { "PowerMac3,1", "PowerMac G4 AGP Graphics", PMAC_TYPE_SAWTOOTH, core99_features, PMAC_MB_OLD_CORE99 }, { "PowerMac3,2", "PowerMac G4 AGP Graphics", PMAC_TYPE_SAWTOOTH, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 }, { "PowerMac3,3", "PowerMac G4 AGP Graphics", PMAC_TYPE_SAWTOOTH, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 }, { "PowerMac3,4", "PowerMac G4 Silver", PMAC_TYPE_QUICKSILVER, core99_features, PMAC_MB_MAY_SLEEP }, { "PowerMac3,5", "PowerMac G4 Silver", PMAC_TYPE_QUICKSILVER, core99_features, PMAC_MB_MAY_SLEEP }, { "PowerMac3,6", "PowerMac G4 Windtunnel", PMAC_TYPE_WINDTUNNEL, core99_features, PMAC_MB_MAY_SLEEP, }, { "PowerMac4,1", "iMac \"Flower Power\"", PMAC_TYPE_PANGEA_IMAC, pangea_features, PMAC_MB_MAY_SLEEP }, { "PowerMac4,2", "Flat panel iMac", PMAC_TYPE_FLAT_PANEL_IMAC, pangea_features, PMAC_MB_CAN_SLEEP }, { "PowerMac4,4", "eMac", PMAC_TYPE_EMAC, core99_features, PMAC_MB_MAY_SLEEP }, { "PowerMac5,1", "PowerMac G4 Cube", PMAC_TYPE_CUBE, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 }, { "PowerMac6,1", "Flat panel iMac", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP, }, { "PowerMac6,3", "Flat panel iMac", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP, }, { "PowerMac6,4", "eMac", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP, }, { "PowerMac10,1", "Mac mini", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP, }, { "iMac,1", "iMac (first generation)", PMAC_TYPE_ORIG_IMAC, paddington_features, 0 }, /* * Xserve's */ { "RackMac1,1", "XServe", PMAC_TYPE_RACKMAC, rackmac_features, 0, }, { "RackMac1,2", "XServe rev. 2", PMAC_TYPE_RACKMAC, rackmac_features, 0, }, /* * Laptops */ { "AAPL,3400/2400", "PowerBook 3400", PMAC_TYPE_HOOPER, ohare_features, PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE }, { "AAPL,3500", "PowerBook 3500", PMAC_TYPE_KANGA, ohare_features, PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE }, { "AAPL,PowerBook1998", "PowerBook Wallstreet", PMAC_TYPE_WALLSTREET, heathrow_laptop_features, PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE }, { "PowerBook1,1", "PowerBook 101 (Lombard)", PMAC_TYPE_101_PBOOK, paddington_features, PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE }, { "PowerBook2,1", "iBook (first generation)", PMAC_TYPE_ORIG_IBOOK, core99_features, PMAC_MB_CAN_SLEEP | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE }, { "PowerBook2,2", "iBook FireWire", PMAC_TYPE_FW_IBOOK, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE }, { "PowerBook3,1", "PowerBook Pismo", PMAC_TYPE_PISMO, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE }, { "PowerBook3,2", "PowerBook Titanium", PMAC_TYPE_TITANIUM, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook3,3", "PowerBook Titanium II", PMAC_TYPE_TITANIUM2, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook3,4", "PowerBook Titanium III", PMAC_TYPE_TITANIUM3, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook3,5", "PowerBook Titanium IV", PMAC_TYPE_TITANIUM4, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook4,1", "iBook 2", PMAC_TYPE_IBOOK2, pangea_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook4,2", "iBook 2", PMAC_TYPE_IBOOK2, pangea_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook4,3", "iBook 2 rev. 2", PMAC_TYPE_IBOOK2, pangea_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook5,1", "PowerBook G4 17\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,2", "PowerBook G4 15\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,3", "PowerBook G4 17\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,4", "PowerBook G4 15\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,5", "PowerBook G4 17\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,6", "PowerBook G4 15\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,7", "PowerBook G4 17\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,8", "PowerBook G4 15\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE, }, { "PowerBook5,9", "PowerBook G4 17\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE, }, { "PowerBook6,1", "PowerBook G4 12\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook6,2", "PowerBook G4", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook6,3", "iBook G4", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook6,4", "PowerBook G4 12\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook6,5", "iBook G4", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook6,7", "iBook G4", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook6,8", "PowerBook G4 12\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, #else /* CONFIG_POWER4 */ { "PowerMac7,2", "PowerMac G5", PMAC_TYPE_POWERMAC_G5, g5_features, 0, }, #ifdef CONFIG_PPC64 { "PowerMac7,3", "PowerMac G5", PMAC_TYPE_POWERMAC_G5, g5_features, 0, }, { "PowerMac8,1", "iMac G5", PMAC_TYPE_IMAC_G5, g5_features, 0, }, { "PowerMac9,1", "PowerMac G5", PMAC_TYPE_POWERMAC_G5_U3L, g5_features, 0, }, { "PowerMac11,2", "PowerMac G5 Dual Core", PMAC_TYPE_POWERMAC_G5_U3L, g5_features, 0, }, { "PowerMac12,1", "iMac G5 (iSight)", PMAC_TYPE_POWERMAC_G5_U3L, g5_features, 0, }, { "RackMac3,1", "XServe G5", PMAC_TYPE_XSERVE_G5, g5_features, 0, }, #endif /* CONFIG_PPC64 */ #endif /* CONFIG_POWER4 */ }; /* * The toplevel feature_call callback */ long pmac_do_feature_call(unsigned int selector, ...) { struct device_node *node; long param, value; int i; feature_call func = NULL; va_list args; if (pmac_mb.features) for (i=0; pmac_mb.features[i].function; i++) if (pmac_mb.features[i].selector == selector) { func = pmac_mb.features[i].function; break; } if (!func) for (i=0; any_features[i].function; i++) if (any_features[i].selector == selector) { func = any_features[i].function; break; } if (!func) return -ENODEV; va_start(args, selector); node = (struct device_node*)va_arg(args, void*); param = va_arg(args, long); value = va_arg(args, long); va_end(args); return func(node, param, value); } static int __init probe_motherboard(void) { int i; struct macio_chip *macio = &macio_chips[0]; const char *model = NULL; struct device_node *dt; int ret = 0; /* Lookup known motherboard type in device-tree. First try an * exact match on the "model" property, then try a "compatible" * match is none is found. */ dt = of_find_node_by_name(NULL, "device-tree"); if (dt != NULL) model = of_get_property(dt, "model", NULL); for(i=0; model && i<ARRAY_SIZE(pmac_mb_defs); i++) { if (strcmp(model, pmac_mb_defs[i].model_string) == 0) { pmac_mb = pmac_mb_defs[i]; goto found; } } for(i=0; i<ARRAY_SIZE(pmac_mb_defs); i++) { if (of_machine_is_compatible(pmac_mb_defs[i].model_string)) { pmac_mb = pmac_mb_defs[i]; goto found; } } /* Fallback to selection depending on mac-io chip type */ switch(macio->type) { #ifndef CONFIG_POWER4 case macio_grand_central: pmac_mb.model_id = PMAC_TYPE_PSURGE; pmac_mb.model_name = "Unknown PowerSurge"; break; case macio_ohare: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_OHARE; pmac_mb.model_name = "Unknown OHare-based"; break; case macio_heathrow: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_HEATHROW; pmac_mb.model_name = "Unknown Heathrow-based"; pmac_mb.features = heathrow_desktop_features; break; case macio_paddington: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PADDINGTON; pmac_mb.model_name = "Unknown Paddington-based"; pmac_mb.features = paddington_features; break; case macio_keylargo: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_CORE99; pmac_mb.model_name = "Unknown Keylargo-based"; pmac_mb.features = core99_features; break; case macio_pangea: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PANGEA; pmac_mb.model_name = "Unknown Pangea-based"; pmac_mb.features = pangea_features; break; case macio_intrepid: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_INTREPID; pmac_mb.model_name = "Unknown Intrepid-based"; pmac_mb.features = intrepid_features; break; #else /* CONFIG_POWER4 */ case macio_keylargo2: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2; pmac_mb.model_name = "Unknown K2-based"; pmac_mb.features = g5_features; break; case macio_shasta: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_SHASTA; pmac_mb.model_name = "Unknown Shasta-based"; pmac_mb.features = g5_features; break; #endif /* CONFIG_POWER4 */ default: ret = -ENODEV; goto done; } found: #ifndef CONFIG_POWER4 /* Fixup Hooper vs. Comet */ if (pmac_mb.model_id == PMAC_TYPE_HOOPER) { u32 __iomem * mach_id_ptr = ioremap(0xf3000034, 4); if (!mach_id_ptr) { ret = -ENODEV; goto done; } /* Here, I used to disable the media-bay on comet. It * appears this is wrong, the floppy connector is actually * a kind of media-bay and works with the current driver. */ if (__raw_readl(mach_id_ptr) & 0x20000000UL) pmac_mb.model_id = PMAC_TYPE_COMET; iounmap(mach_id_ptr); } /* Set default value of powersave_nap on machines that support it. * It appears that uninorth rev 3 has a problem with it, we don't * enable it on those. In theory, the flush-on-lock property is * supposed to be set when not supported, but I'm not very confident * that all Apple OF revs did it properly, I do it the paranoid way. */ while (uninorth_base && uninorth_rev > 3) { struct device_node *cpus = of_find_node_by_path("/cpus"); struct device_node *np; if (!cpus || !cpus->child) { printk(KERN_WARNING "Can't find CPU(s) in device tree !\n"); of_node_put(cpus); break; } np = cpus->child; /* Nap mode not supported on SMP */ if (np->sibling) { of_node_put(cpus); break; } /* Nap mode not supported if flush-on-lock property is present */ if (of_get_property(np, "flush-on-lock", NULL)) { of_node_put(cpus); break; } of_node_put(cpus); powersave_nap = 1; printk(KERN_DEBUG "Processor NAP mode on idle enabled.\n"); break; } /* On CPUs that support it (750FX), lowspeed by default during * NAP mode */ powersave_lowspeed = 1; #else /* CONFIG_POWER4 */ powersave_nap = 1; #endif /* CONFIG_POWER4 */ /* Check for "mobile" machine */ if (model && (strncmp(model, "PowerBook", 9) == 0 || strncmp(model, "iBook", 5) == 0)) pmac_mb.board_flags |= PMAC_MB_MOBILE; printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name); done: of_node_put(dt); return ret; } /* Initialize the Core99 UniNorth host bridge and memory controller */ static void __init probe_uninorth(void) { const u32 *addrp; phys_addr_t address; unsigned long actrl; /* Locate core99 Uni-N */ uninorth_node = of_find_node_by_name(NULL, "uni-n"); uninorth_maj = 1; /* Locate G5 u3 */ if (uninorth_node == NULL) { uninorth_node = of_find_node_by_name(NULL, "u3"); uninorth_maj = 3; } /* Locate G5 u4 */ if (uninorth_node == NULL) { uninorth_node = of_find_node_by_name(NULL, "u4"); uninorth_maj = 4; } if (uninorth_node == NULL) { uninorth_maj = 0; return; } addrp = of_get_property(uninorth_node, "reg", NULL); if (addrp == NULL) return; address = of_translate_address(uninorth_node, addrp); if (address == 0) return; uninorth_base = ioremap(address, 0x40000); if (uninorth_base == NULL) return; uninorth_rev = in_be32(UN_REG(UNI_N_VERSION)); if (uninorth_maj == 3 || uninorth_maj == 4) { u3_ht_base = ioremap(address + U3_HT_CONFIG_BASE, 0x1000); if (u3_ht_base == NULL) { iounmap(uninorth_base); return; } } printk(KERN_INFO "Found %s memory controller & host bridge" " @ 0x%08x revision: 0x%02x\n", uninorth_maj == 3 ? "U3" : uninorth_maj == 4 ? "U4" : "UniNorth", (unsigned int)address, uninorth_rev); printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base); /* Set the arbitrer QAck delay according to what Apple does */ if (uninorth_rev < 0x11) { actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK; actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 : UNI_N_ARB_CTRL_QACK_DELAY) << UNI_N_ARB_CTRL_QACK_DELAY_SHIFT; UN_OUT(UNI_N_ARB_CTRL, actrl); } /* Some more magic as done by them in recent MacOS X on UniNorth * revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI * memory timeout */ if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) || uninorth_rev == 0xc0) UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff); } static void __init probe_one_macio(const char *name, const char *compat, int type) { struct device_node* node; int i; volatile u32 __iomem *base; const u32 *addrp, *revp; phys_addr_t addr; u64 size; for (node = NULL; (node = of_find_node_by_name(node, name)) != NULL;) { if (!compat) break; if (of_device_is_compatible(node, compat)) break; } if (!node) return; for(i=0; i<MAX_MACIO_CHIPS; i++) { if (!macio_chips[i].of_node) break; if (macio_chips[i].of_node == node) return; } if (i >= MAX_MACIO_CHIPS) { printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n"); printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name); return; } addrp = of_get_pci_address(node, 0, &size, NULL); if (addrp == NULL) { printk(KERN_ERR "pmac_feature: %s: can't find base !\n", node->full_name); return; } addr = of_translate_address(node, addrp); if (addr == 0) { printk(KERN_ERR "pmac_feature: %s, can't translate base !\n", node->full_name); return; } base = ioremap(addr, (unsigned long)size); if (!base) { printk(KERN_ERR "pmac_feature: %s, can't map mac-io chip !\n", node->full_name); return; } if (type == macio_keylargo || type == macio_keylargo2) { const u32 *did = of_get_property(node, "device-id", NULL); if (*did == 0x00000025) type = macio_pangea; if (*did == 0x0000003e) type = macio_intrepid; if (*did == 0x0000004f) type = macio_shasta; } macio_chips[i].of_node = node; macio_chips[i].type = type; macio_chips[i].base = base; macio_chips[i].flags = MACIO_FLAG_SCCA_ON | MACIO_FLAG_SCCB_ON; macio_chips[i].name = macio_names[type]; revp = of_get_property(node, "revision-id", NULL); if (revp) macio_chips[i].rev = *revp; printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n", macio_names[type], macio_chips[i].rev, macio_chips[i].base); } static int __init probe_macios(void) { /* Warning, ordering is important */ probe_one_macio("gc", NULL, macio_grand_central); probe_one_macio("ohare", NULL, macio_ohare); probe_one_macio("pci106b,7", NULL, macio_ohareII); probe_one_macio("mac-io", "keylargo", macio_keylargo); probe_one_macio("mac-io", "paddington", macio_paddington); probe_one_macio("mac-io", "gatwick", macio_gatwick); probe_one_macio("mac-io", "heathrow", macio_heathrow); probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2); /* Make sure the "main" macio chip appear first */ if (macio_chips[0].type == macio_gatwick && macio_chips[1].type == macio_heathrow) { struct macio_chip temp = macio_chips[0]; macio_chips[0] = macio_chips[1]; macio_chips[1] = temp; } if (macio_chips[0].type == macio_ohareII && macio_chips[1].type == macio_ohare) { struct macio_chip temp = macio_chips[0]; macio_chips[0] = macio_chips[1]; macio_chips[1] = temp; } macio_chips[0].lbus.index = 0; macio_chips[1].lbus.index = 1; return (macio_chips[0].of_node == NULL) ? -ENODEV : 0; } static void __init initial_serial_shutdown(struct device_node *np) { int len; const struct slot_names_prop { int count; char name[1]; } *slots; const char *conn; int port_type = PMAC_SCC_ASYNC; int modem = 0; slots = of_get_property(np, "slot-names", &len); conn = of_get_property(np, "AAPL,connector", &len); if (conn && (strcmp(conn, "infrared") == 0)) port_type = PMAC_SCC_IRDA; else if (of_device_is_compatible(np, "cobalt")) modem = 1; else if (slots && slots->count > 0) { if (strcmp(slots->name, "IrDA") == 0) port_type = PMAC_SCC_IRDA; else if (strcmp(slots->name, "Modem") == 0) modem = 1; } if (modem) pmac_call_feature(PMAC_FTR_MODEM_ENABLE, np, 0, 0); pmac_call_feature(PMAC_FTR_SCC_ENABLE, np, port_type, 0); } static void __init set_initial_features(void) { struct device_node *np; /* That hack appears to be necessary for some StarMax motherboards * but I'm not too sure it was audited for side-effects on other * ohare based machines... * Since I still have difficulties figuring the right way to * differenciate them all and since that hack was there for a long * time, I'll keep it around */ if (macio_chips[0].type == macio_ohare) { struct macio_chip *macio = &macio_chips[0]; np = of_find_node_by_name(NULL, "via-pmu"); if (np) MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE); else MACIO_OUT32(OHARE_FCR, STARMAX_FEATURES); of_node_put(np); } else if (macio_chips[1].type == macio_ohare) { struct macio_chip *macio = &macio_chips[1]; MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE); } #ifdef CONFIG_POWER4 if (macio_chips[0].type == macio_keylargo2 || macio_chips[0].type == macio_shasta) { #ifndef CONFIG_SMP /* On SMP machines running UP, we have the second CPU eating * bus cycles. We need to take it off the bus. This is done * from pmac_smp for SMP kernels running on one CPU */ np = of_find_node_by_type(NULL, "cpu"); if (np != NULL) np = of_find_node_by_type(np, "cpu"); if (np != NULL) { g5_phy_disable_cpu1(); of_node_put(np); } #endif /* CONFIG_SMP */ /* Enable GMAC for now for PCI probing. It will be disabled * later on after PCI probe */ np = of_find_node_by_name(NULL, "ethernet"); while(np) { if (of_device_is_compatible(np, "K2-GMAC")) g5_gmac_enable(np, 0, 1); np = of_find_node_by_name(np, "ethernet"); } /* Enable FW before PCI probe. Will be disabled later on * Note: We should have a batter way to check that we are * dealing with uninorth internal cell and not a PCI cell * on the external PCI. The code below works though. */ np = of_find_node_by_name(NULL, "firewire"); while(np) { if (of_device_is_compatible(np, "pci106b,5811")) { macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED; g5_fw_enable(np, 0, 1); } np = of_find_node_by_name(np, "firewire"); } } #else /* CONFIG_POWER4 */ if (macio_chips[0].type == macio_keylargo || macio_chips[0].type == macio_pangea || macio_chips[0].type == macio_intrepid) { /* Enable GMAC for now for PCI probing. It will be disabled * later on after PCI probe */ np = of_find_node_by_name(NULL, "ethernet"); while(np) { if (np->parent && of_device_is_compatible(np->parent, "uni-north") && of_device_is_compatible(np, "gmac")) core99_gmac_enable(np, 0, 1); np = of_find_node_by_name(np, "ethernet"); } /* Enable FW before PCI probe. Will be disabled later on * Note: We should have a batter way to check that we are * dealing with uninorth internal cell and not a PCI cell * on the external PCI. The code below works though. */ np = of_find_node_by_name(NULL, "firewire"); while(np) { if (np->parent && of_device_is_compatible(np->parent, "uni-north") && (of_device_is_compatible(np, "pci106b,18") || of_device_is_compatible(np, "pci106b,30") || of_device_is_compatible(np, "pci11c1,5811"))) { macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED; core99_firewire_enable(np, 0, 1); } np = of_find_node_by_name(np, "firewire"); } /* Enable ATA-100 before PCI probe. */ np = of_find_node_by_name(NULL, "ata-6"); while(np) { if (np->parent && of_device_is_compatible(np->parent, "uni-north") && of_device_is_compatible(np, "kauai-ata")) { core99_ata100_enable(np, 1); } np = of_find_node_by_name(np, "ata-6"); } /* Switch airport off */ for_each_node_by_name(np, "radio") { if (np && np->parent == macio_chips[0].of_node) { macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON; core99_airport_enable(np, 0, 0); } } of_node_put(np); } /* On all machines that support sound PM, switch sound off */ if (macio_chips[0].of_node) pmac_do_feature_call(PMAC_FTR_SOUND_CHIP_ENABLE, macio_chips[0].of_node, 0, 0); /* While on some desktop G3s, we turn it back on */ if (macio_chips[0].of_node && macio_chips[0].type == macio_heathrow && (pmac_mb.model_id == PMAC_TYPE_GOSSAMER || pmac_mb.model_id == PMAC_TYPE_SILK)) { struct macio_chip *macio = &macio_chips[0]; MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE); MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N); } #endif /* CONFIG_POWER4 */ /* On all machines, switch modem & serial ports off */ for_each_node_by_name(np, "ch-a") initial_serial_shutdown(np); of_node_put(np); for_each_node_by_name(np, "ch-b") initial_serial_shutdown(np); of_node_put(np); } void __init pmac_feature_init(void) { /* Detect the UniNorth memory controller */ probe_uninorth(); /* Probe mac-io controllers */ if (probe_macios()) { printk(KERN_WARNING "No mac-io chip found\n"); return; } /* Probe machine type */ if (probe_motherboard()) printk(KERN_WARNING "Unknown PowerMac !\n"); /* Set some initial features (turn off some chips that will * be later turned on) */ set_initial_features(); } #if 0 static void dump_HT_speeds(char *name, u32 cfg, u32 frq) { int freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 }; int bits[8] = { 8,16,0,32,2,4,0,0 }; int freq = (frq >> 8) & 0xf; if (freqs[freq] == 0) printk("%s: Unknown HT link frequency %x\n", name, freq); else printk("%s: %d MHz on main link, (%d in / %d out) bits width\n", name, freqs[freq], bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]); } void __init pmac_check_ht_link(void) { u32 ufreq, freq, ucfg, cfg; struct device_node *pcix_node; u8 px_bus, px_devfn; struct pci_controller *px_hose; (void)in_be32(u3_ht_base + U3_HT_LINK_COMMAND); ucfg = cfg = in_be32(u3_ht_base + U3_HT_LINK_CONFIG); ufreq = freq = in_be32(u3_ht_base + U3_HT_LINK_FREQ); dump_HT_speeds("U3 HyperTransport", cfg, freq); pcix_node = of_find_compatible_node(NULL, "pci", "pci-x"); if (pcix_node == NULL) { printk("No PCI-X bridge found\n"); return; } if (pci_device_from_OF_node(pcix_node, &px_bus, &px_devfn) != 0) { printk("PCI-X bridge found but not matched to pci\n"); return; } px_hose = pci_find_hose_for_OF_device(pcix_node); if (px_hose == NULL) { printk("PCI-X bridge found but not matched to host\n"); return; } early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg); early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq); dump_HT_speeds("PCI-X HT Uplink", cfg, freq); early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg); early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq); dump_HT_speeds("PCI-X HT Downlink", cfg, freq); } #endif /* 0 */ /* * Early video resume hook */ static void (*pmac_early_vresume_proc)(void *data); static void *pmac_early_vresume_data; void pmac_set_early_video_resume(void (*proc)(void *data), void *data) { if (!machine_is(powermac)) return; preempt_disable(); pmac_early_vresume_proc = proc; pmac_early_vresume_data = data; preempt_enable(); } EXPORT_SYMBOL(pmac_set_early_video_resume); void pmac_call_early_video_resume(void) { if (pmac_early_vresume_proc) pmac_early_vresume_proc(pmac_early_vresume_data); } /* * AGP related suspend/resume code */ static struct pci_dev *pmac_agp_bridge; static int (*pmac_agp_suspend)(struct pci_dev *bridge); static int (*pmac_agp_resume)(struct pci_dev *bridge); void pmac_register_agp_pm(struct pci_dev *bridge, int (*suspend)(struct pci_dev *bridge), int (*resume)(struct pci_dev *bridge)) { if (suspend || resume) { pmac_agp_bridge = bridge; pmac_agp_suspend = suspend; pmac_agp_resume = resume; return; } if (bridge != pmac_agp_bridge) return; pmac_agp_suspend = pmac_agp_resume = NULL; return; } EXPORT_SYMBOL(pmac_register_agp_pm); void pmac_suspend_agp_for_card(struct pci_dev *dev) { if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL) return; if (pmac_agp_bridge->bus != dev->bus) return; pmac_agp_suspend(pmac_agp_bridge); } EXPORT_SYMBOL(pmac_suspend_agp_for_card); void pmac_resume_agp_for_card(struct pci_dev *dev) { if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL) return; if (pmac_agp_bridge->bus != dev->bus) return; pmac_agp_resume(pmac_agp_bridge); } EXPORT_SYMBOL(pmac_resume_agp_for_card); int pmac_get_uninorth_variant(void) { return uninorth_maj; }
gpl-2.0
CyanogenMod/android_kernel_oppo_n1
sound/soc/omap/omap-abe-twl6040.c
1542
9385
/* * omap-abe-twl6040.c -- SoC audio for TI OMAP based boards with ABE and * twl6040 codec * * Author: Misael Lopez Cruz <misael.lopez@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/mfd/twl6040.h> #include <linux/platform_data/omap-abe-twl6040.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/jack.h> #include <asm/mach-types.h> #include <plat/hardware.h> #include <plat/mux.h> #include "omap-dmic.h" #include "omap-mcpdm.h" #include "omap-pcm.h" #include "../codecs/twl6040.h" static int omap_abe_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_codec *codec = rtd->codec; struct snd_soc_card *card = codec->card; struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev); int clk_id, freq; int ret; clk_id = twl6040_get_clk_id(rtd->codec); if (clk_id == TWL6040_SYSCLK_SEL_HPPLL) freq = pdata->mclk_freq; else if (clk_id == TWL6040_SYSCLK_SEL_LPPLL) freq = 32768; else return -EINVAL; /* set the codec mclk */ ret = snd_soc_dai_set_sysclk(codec_dai, clk_id, freq, SND_SOC_CLOCK_IN); if (ret) { printk(KERN_ERR "can't set codec system clock\n"); return ret; } return ret; } static struct snd_soc_ops omap_abe_ops = { .hw_params = omap_abe_hw_params, }; static int omap_abe_dmic_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int ret = 0; ret = snd_soc_dai_set_sysclk(cpu_dai, OMAP_DMIC_SYSCLK_PAD_CLKS, 19200000, SND_SOC_CLOCK_IN); if (ret < 0) { printk(KERN_ERR "can't set DMIC cpu system clock\n"); return ret; } ret = snd_soc_dai_set_sysclk(cpu_dai, OMAP_DMIC_ABE_DMIC_CLK, 2400000, SND_SOC_CLOCK_OUT); if (ret < 0) { printk(KERN_ERR "can't set DMIC output clock\n"); return ret; } return 0; } static struct snd_soc_ops omap_abe_dmic_ops = { .hw_params = omap_abe_dmic_hw_params, }; /* Headset jack */ static struct snd_soc_jack hs_jack; /*Headset jack detection DAPM pins */ static struct snd_soc_jack_pin hs_jack_pins[] = { { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE, }, { .pin = "Headset Stereophone", .mask = SND_JACK_HEADPHONE, }, }; /* SDP4430 machine DAPM */ static const struct snd_soc_dapm_widget twl6040_dapm_widgets[] = { /* Outputs */ SND_SOC_DAPM_HP("Headset Stereophone", NULL), SND_SOC_DAPM_SPK("Earphone Spk", NULL), SND_SOC_DAPM_SPK("Ext Spk", NULL), SND_SOC_DAPM_LINE("Line Out", NULL), SND_SOC_DAPM_SPK("Vibrator", NULL), /* Inputs */ SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_MIC("Main Handset Mic", NULL), SND_SOC_DAPM_MIC("Sub Handset Mic", NULL), SND_SOC_DAPM_LINE("Line In", NULL), }; static const struct snd_soc_dapm_route audio_map[] = { /* Routings for outputs */ {"Headset Stereophone", NULL, "HSOL"}, {"Headset Stereophone", NULL, "HSOR"}, {"Earphone Spk", NULL, "EP"}, {"Ext Spk", NULL, "HFL"}, {"Ext Spk", NULL, "HFR"}, {"Line Out", NULL, "AUXL"}, {"Line Out", NULL, "AUXR"}, {"Vibrator", NULL, "VIBRAL"}, {"Vibrator", NULL, "VIBRAR"}, /* Routings for inputs */ {"HSMIC", NULL, "Headset Mic"}, {"Headset Mic", NULL, "Headset Mic Bias"}, {"MAINMIC", NULL, "Main Handset Mic"}, {"Main Handset Mic", NULL, "Main Mic Bias"}, {"SUBMIC", NULL, "Sub Handset Mic"}, {"Sub Handset Mic", NULL, "Main Mic Bias"}, {"AFML", NULL, "Line In"}, {"AFMR", NULL, "Line In"}, }; static inline void twl6040_disconnect_pin(struct snd_soc_dapm_context *dapm, int connected, char *pin) { if (!connected) snd_soc_dapm_disable_pin(dapm, pin); } static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_card *card = codec->card; struct snd_soc_dapm_context *dapm = &codec->dapm; struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev); int hs_trim; int ret = 0; /* Disable not connected paths if not used */ twl6040_disconnect_pin(dapm, pdata->has_hs, "Headset Stereophone"); twl6040_disconnect_pin(dapm, pdata->has_hf, "Ext Spk"); twl6040_disconnect_pin(dapm, pdata->has_ep, "Earphone Spk"); twl6040_disconnect_pin(dapm, pdata->has_aux, "Line Out"); twl6040_disconnect_pin(dapm, pdata->has_vibra, "Vibrator"); twl6040_disconnect_pin(dapm, pdata->has_hsmic, "Headset Mic"); twl6040_disconnect_pin(dapm, pdata->has_mainmic, "Main Handset Mic"); twl6040_disconnect_pin(dapm, pdata->has_submic, "Sub Handset Mic"); twl6040_disconnect_pin(dapm, pdata->has_afm, "Line In"); /* * Configure McPDM offset cancellation based on the HSOTRIM value from * twl6040. */ hs_trim = twl6040_get_trim_value(codec, TWL6040_TRIM_HSOTRIM); omap_mcpdm_configure_dn_offsets(rtd, TWL6040_HSF_TRIM_LEFT(hs_trim), TWL6040_HSF_TRIM_RIGHT(hs_trim)); /* Headset jack detection only if it is supported */ if (pdata->jack_detection) { ret = snd_soc_jack_new(codec, "Headset Jack", SND_JACK_HEADSET, &hs_jack); if (ret) return ret; ret = snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins), hs_jack_pins); twl6040_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADSET); } return ret; } static const struct snd_soc_dapm_widget dmic_dapm_widgets[] = { SND_SOC_DAPM_MIC("Digital Mic", NULL), }; static const struct snd_soc_dapm_route dmic_audio_map[] = { {"DMic", NULL, "Digital Mic"}, {"Digital Mic", NULL, "Digital Mic1 Bias"}, }; static int omap_abe_dmic_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; ret = snd_soc_dapm_new_controls(dapm, dmic_dapm_widgets, ARRAY_SIZE(dmic_dapm_widgets)); if (ret) return ret; return snd_soc_dapm_add_routes(dapm, dmic_audio_map, ARRAY_SIZE(dmic_audio_map)); } /* Digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link twl6040_dmic_dai[] = { { .name = "TWL6040", .stream_name = "TWL6040", .cpu_dai_name = "omap-mcpdm", .codec_dai_name = "twl6040-legacy", .platform_name = "omap-pcm-audio", .codec_name = "twl6040-codec", .init = omap_abe_twl6040_init, .ops = &omap_abe_ops, }, { .name = "DMIC", .stream_name = "DMIC Capture", .cpu_dai_name = "omap-dmic", .codec_dai_name = "dmic-hifi", .platform_name = "omap-pcm-audio", .codec_name = "dmic-codec", .init = omap_abe_dmic_init, .ops = &omap_abe_dmic_ops, }, }; static struct snd_soc_dai_link twl6040_only_dai[] = { { .name = "TWL6040", .stream_name = "TWL6040", .cpu_dai_name = "omap-mcpdm", .codec_dai_name = "twl6040-legacy", .platform_name = "omap-pcm-audio", .codec_name = "twl6040-codec", .init = omap_abe_twl6040_init, .ops = &omap_abe_ops, }, }; /* Audio machine driver */ static struct snd_soc_card omap_abe_card = { .owner = THIS_MODULE, .dapm_widgets = twl6040_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets), .dapm_routes = audio_map, .num_dapm_routes = ARRAY_SIZE(audio_map), }; static __devinit int omap_abe_probe(struct platform_device *pdev) { struct omap_abe_twl6040_data *pdata = dev_get_platdata(&pdev->dev); struct snd_soc_card *card = &omap_abe_card; int ret; card->dev = &pdev->dev; if (!pdata) { dev_err(&pdev->dev, "Missing pdata\n"); return -ENODEV; } if (pdata->card_name) { card->name = pdata->card_name; } else { dev_err(&pdev->dev, "Card name is not provided\n"); return -ENODEV; } if (!pdata->mclk_freq) { dev_err(&pdev->dev, "MCLK frequency missing\n"); return -ENODEV; } if (pdata->has_dmic) { card->dai_link = twl6040_dmic_dai; card->num_links = ARRAY_SIZE(twl6040_dmic_dai); } else { card->dai_link = twl6040_only_dai; card->num_links = ARRAY_SIZE(twl6040_only_dai); } ret = snd_soc_register_card(card); if (ret) dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); return ret; } static int __devexit omap_abe_remove(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); snd_soc_unregister_card(card); return 0; } static struct platform_driver omap_abe_driver = { .driver = { .name = "omap-abe-twl6040", .owner = THIS_MODULE, .pm = &snd_soc_pm_ops, }, .probe = omap_abe_probe, .remove = __devexit_p(omap_abe_remove), }; module_platform_driver(omap_abe_driver); MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>"); MODULE_DESCRIPTION("ALSA SoC for OMAP boards with ABE and twl6040 codec"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:omap-abe-twl6040");
gpl-2.0
GHackAnonymous/linux
drivers/ata/pata_triflex.c
2054
6589
/* * pata_triflex.c - Compaq PATA for new ATA layer * (C) 2005 Red Hat Inc * Alan Cox <alan@lxorguk.ukuu.org.uk> * * based upon * * triflex.c * * IDE Chipset driver for the Compaq TriFlex IDE controller. * * Known to work with the Compaq Workstation 5x00 series. * * Copyright (C) 2002 Hewlett-Packard Development Group, L.P. * Author: Torben Mathiasen <torben.mathiasen@hp.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Loosely based on the piix & svwks drivers. * * Documentation: * Not publicly available. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_triflex" #define DRV_VERSION "0.2.8" /** * triflex_prereset - probe begin * @link: ATA link * @deadline: deadline jiffies for the operation * * Set up cable type and use generic probe init */ static int triflex_prereset(struct ata_link *link, unsigned long deadline) { static const struct pci_bits triflex_enable_bits[] = { { 0x80, 1, 0x01, 0x01 }, { 0x80, 1, 0x02, 0x02 } }; struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no])) return -ENOENT; return ata_sff_prereset(link, deadline); } /** * triflex_load_timing - timing configuration * @ap: ATA interface * @adev: Device on the bus * @speed: speed to configure * * The Triflex has one set of timings per device per channel. This * means we must do some switching. As the PIO and DMA timings don't * match we have to do some reloading unlike PIIX devices where tuning * tricks can avoid it. */ static void triflex_load_timing(struct ata_port *ap, struct ata_device *adev, int speed) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 timing = 0; u32 triflex_timing, old_triflex_timing; int channel_offset = ap->port_no ? 0x74: 0x70; unsigned int is_slave = (adev->devno != 0); pci_read_config_dword(pdev, channel_offset, &old_triflex_timing); triflex_timing = old_triflex_timing; switch(speed) { case XFER_MW_DMA_2: timing = 0x0103;break; case XFER_MW_DMA_1: timing = 0x0203;break; case XFER_MW_DMA_0: timing = 0x0808;break; case XFER_SW_DMA_2: case XFER_SW_DMA_1: case XFER_SW_DMA_0: timing = 0x0F0F;break; case XFER_PIO_4: timing = 0x0202;break; case XFER_PIO_3: timing = 0x0204;break; case XFER_PIO_2: timing = 0x0404;break; case XFER_PIO_1: timing = 0x0508;break; case XFER_PIO_0: timing = 0x0808;break; default: BUG(); } triflex_timing &= ~ (0xFFFF << (16 * is_slave)); triflex_timing |= (timing << (16 * is_slave)); if (triflex_timing != old_triflex_timing) pci_write_config_dword(pdev, channel_offset, triflex_timing); } /** * triflex_set_piomode - set initial PIO mode data * @ap: ATA interface * @adev: ATA device * * Use the timing loader to set up the PIO mode. We have to do this * because DMA start/stop will only be called once DMA occurs. If there * has been no DMA then the PIO timings are still needed. */ static void triflex_set_piomode(struct ata_port *ap, struct ata_device *adev) { triflex_load_timing(ap, adev, adev->pio_mode); } /** * triflex_dma_start - DMA start callback * @qc: Command in progress * * Usually drivers set the DMA timing at the point the set_dmamode call * is made. Triflex however requires we load new timings on the * transition or keep matching PIO/DMA pairs (ie MWDMA2/PIO4 etc). * We load the DMA timings just before starting DMA and then restore * the PIO timing when the DMA is finished. */ static void triflex_bmdma_start(struct ata_queued_cmd *qc) { triflex_load_timing(qc->ap, qc->dev, qc->dev->dma_mode); ata_bmdma_start(qc); } /** * triflex_dma_stop - DMA stop callback * @ap: ATA interface * @adev: ATA device * * We loaded new timings in dma_start, as a result we need to restore * the PIO timings in dma_stop so that the next command issue gets the * right clock values. */ static void triflex_bmdma_stop(struct ata_queued_cmd *qc) { ata_bmdma_stop(qc); triflex_load_timing(qc->ap, qc->dev, qc->dev->pio_mode); } static struct scsi_host_template triflex_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations triflex_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_start = triflex_bmdma_start, .bmdma_stop = triflex_bmdma_stop, .cable_detect = ata_cable_40wire, .set_piomode = triflex_set_piomode, .prereset = triflex_prereset, }; static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .port_ops = &triflex_port_ops }; const struct ata_port_info *ppi[] = { &info, NULL }; ata_print_version_once(&dev->dev, DRV_VERSION); return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0); } static const struct pci_device_id triflex[] = { { PCI_VDEVICE(COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE), }, { }, }; #ifdef CONFIG_PM_SLEEP static int triflex_ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) { struct ata_host *host = pci_get_drvdata(pdev); int rc = 0; rc = ata_host_suspend(host, mesg); if (rc) return rc; /* * We must not disable or powerdown the device. * APM bios refuses to suspend if IDE is not accessible. */ pci_save_state(pdev); return 0; } #endif static struct pci_driver triflex_pci_driver = { .name = DRV_NAME, .id_table = triflex, .probe = triflex_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM_SLEEP .suspend = triflex_ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; module_pci_driver(triflex_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Compaq Triflex"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, triflex); MODULE_VERSION(DRV_VERSION);
gpl-2.0
jrfastab/Linux-Kernel-QOS
drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
2310
3837
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/os.h> #include <core/class.h> #include <subdev/bios.h> #include <subdev/bios/dcb.h> #include <subdev/bios/dp.h> #include <subdev/bios/init.h> #include "nv50.h" static inline u32 nv94_sor_soff(struct dcb_output *outp) { return (ffs(outp->or) - 1) * 0x800; } static inline u32 nv94_sor_loff(struct dcb_output *outp) { return nv94_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80; } static inline u32 nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane) { static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ static const u8 nv94[] = { 16, 8, 0, 24 }; if (nv_device(priv)->chipset == 0xaf) return nvaf[lane]; return nv94[lane]; } static int nv94_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp, int head, int pattern) { struct nv50_disp_priv *priv = (void *)disp; const u32 loff = nv94_sor_loff(outp); nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24); return 0; } static int nv94_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp, int head, int link_nr, int link_bw, bool enh_frame) { struct nv50_disp_priv *priv = (void *)disp; const u32 soff = nv94_sor_soff(outp); const u32 loff = nv94_sor_loff(outp); u32 dpctrl = 0x00000000; u32 clksor = 0x00000000; u32 lane = 0; int i; dpctrl |= ((1 << link_nr) - 1) << 16; if (enh_frame) dpctrl |= 0x00004000; if (link_bw > 0x06) clksor |= 0x00040000; for (i = 0; i < link_nr; i++) lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3); nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor); nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl); nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane); return 0; } static int nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp, int head, int lane, int swing, int preem) { struct nouveau_bios *bios = nouveau_bios(disp); struct nv50_disp_priv *priv = (void *)disp; const u32 loff = nv94_sor_loff(outp); u32 addr, shift = nv94_sor_dp_lane_map(priv, lane); u8 ver, hdr, cnt, len; struct nvbios_dpout info; struct nvbios_dpcfg ocfg; addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm, &ver, &hdr, &cnt, &len, &info); if (!addr) return -ENODEV; addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg); if (!addr) return -EINVAL; nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift); nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift); nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8); return 0; } const struct nouveau_dp_func nv94_sor_dp_func = { .pattern = nv94_sor_dp_pattern, .lnk_ctl = nv94_sor_dp_lnk_ctl, .drv_ctl = nv94_sor_dp_drv_ctl, };
gpl-2.0
thewisenerd/android_kernel_mediatek_sprout
drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
2566
31225
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * **************************************************************************** */ #include "../wifi.h" #include "../base.h" #include "../pci.h" #include "reg.h" #include "def.h" #include "phy.h" #include "dm.h" #include "fw.h" #include "hal_btc.h" static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = { 0x7f8001fe, 0x788001e2, 0x71c001c7, 0x6b8001ae, 0x65400195, 0x5fc0017f, 0x5a400169, 0x55400155, 0x50800142, 0x4c000130, 0x47c0011f, 0x43c0010f, 0x40000100, 0x3c8000f2, 0x390000e4, 0x35c000d7, 0x32c000cb, 0x300000c0, 0x2d4000b5, 0x2ac000ab, 0x288000a2, 0x26000098, 0x24000090, 0x22000088, 0x20000080, 0x1e400079, 0x1c800072, 0x1b00006c, 0x19800066, 0x18000060, 0x16c0005b, 0x15800056, 0x14400051, 0x1300004c, 0x12000048, 0x11000044, 0x10000040, }; static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = { {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} }; static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} }; static void rtl8723ae_dm_diginit(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; dm_digtable->dig_enable_flag = true; dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; dm_digtable->cur_igvalue = 0x20; dm_digtable->pre_igvalue = 0x0; dm_digtable->cursta_cstate = DIG_STA_DISCONNECT; dm_digtable->presta_cstate = DIG_STA_DISCONNECT; dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT; dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; dm_digtable->rx_gain_max = DM_DIG_MAX; dm_digtable->rx_gain_min = DM_DIG_MIN; dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT; dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX; dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN; dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX; dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX; } static u8 rtl_init_gain_min_pwdb(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; long rssi_val_min = 0; if ((dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) && (dm_digtable->cursta_cstate == DIG_STA_CONNECT)) { if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0) rssi_val_min = (rtlpriv->dm.entry_min_undec_sm_pwdb > rtlpriv->dm.undec_sm_pwdb) ? rtlpriv->dm.undec_sm_pwdb : rtlpriv->dm.entry_min_undec_sm_pwdb; else rssi_val_min = rtlpriv->dm.undec_sm_pwdb; } else if (dm_digtable->cursta_cstate == DIG_STA_CONNECT || dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT) { rssi_val_min = rtlpriv->dm.undec_sm_pwdb; } else if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) { rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb; } return (u8) rssi_val_min; } static void rtl8723ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) { u32 ret_value; struct rtl_priv *rtlpriv = rtl_priv(hw); struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD); falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD); falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff); falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + falsealm_cnt->cnt_rate_illegal + falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail; rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1); ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0); falsealm_cnt->cnt_cck_fail = ret_value; ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3); falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail + falsealm_cnt->cnt_rate_illegal + falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail + falsealm_cnt->cnt_cck_fail); rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1); rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "cnt_parity_fail = %d, cnt_rate_illegal = %d, " "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", falsealm_cnt->cnt_parity_fail, falsealm_cnt->cnt_rate_illegal, falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n", falsealm_cnt->cnt_ofdm_fail, falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all); } static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; u8 value_igi = dm_digtable->cur_igvalue; if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) value_igi--; else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1) value_igi += 0; else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2) value_igi++; else value_igi += 2; value_igi = clamp(value_igi, (u8)DM_DIG_FA_LOWER, (u8)DM_DIG_FA_UPPER); if (rtlpriv->falsealm_cnt.cnt_all > 10000) value_igi = 0x32; dm_digtable->cur_igvalue = value_igi; rtl8723ae_dm_write_dig(hw); } static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dgtbl = &rtlpriv->dm_digtable; if (rtlpriv->falsealm_cnt.cnt_all > dgtbl->fa_highthresh) { if ((dgtbl->back_val - 2) < dgtbl->back_range_min) dgtbl->back_val = dgtbl->back_range_min; else dgtbl->back_val -= 2; } else if (rtlpriv->falsealm_cnt.cnt_all < dgtbl->fa_lowthresh) { if ((dgtbl->back_val + 2) > dgtbl->back_range_max) dgtbl->back_val = dgtbl->back_range_max; else dgtbl->back_val += 2; } if ((dgtbl->rssi_val_min + 10 - dgtbl->back_val) > dgtbl->rx_gain_max) dgtbl->cur_igvalue = dgtbl->rx_gain_max; else if ((dgtbl->rssi_val_min + 10 - dgtbl->back_val) < dgtbl->rx_gain_min) dgtbl->cur_igvalue = dgtbl->rx_gain_min; else dgtbl->cur_igvalue = dgtbl->rssi_val_min + 10 - dgtbl->back_val; RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "rssi_val_min = %x back_val %x\n", dgtbl->rssi_val_min, dgtbl->back_val); rtl8723ae_dm_write_dig(hw); } static void rtl8723ae_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; long rssi_strength = rtlpriv->dm.entry_min_undec_sm_pwdb; bool multi_sta = false; if (mac->opmode == NL80211_IFTYPE_ADHOC) multi_sta = true; if ((!multi_sta) || (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT)) { rtlpriv->initialized = false; dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; return; } else if (!rtlpriv->initialized) { rtlpriv->initialized = true; dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; dm_digtable->cur_igvalue = 0x20; rtl8723ae_dm_write_dig(hw); } if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) { if ((rssi_strength < dm_digtable->rssi_lowthresh) && (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) { if (dm_digtable->dig_ext_port_stage == DIG_EXT_PORT_STAGE_2) { dm_digtable->cur_igvalue = 0x20; rtl8723ae_dm_write_dig(hw); } dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_1; } else if (rssi_strength > dm_digtable->rssi_highthresh) { dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_2; rtl92c_dm_ctrl_initgain_by_fa(hw); } } else if (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) { dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; dm_digtable->cur_igvalue = 0x20; rtl8723ae_dm_write_dig(hw); } RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "curmultista_cstate = %x dig_ext_port_stage %x\n", dm_digtable->curmultista_cstate, dm_digtable->dig_ext_port_stage); } static void rtl8723ae_dm_initial_gain_sta(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "presta_cstate = %x, cursta_cstate = %x\n", dm_digtable->presta_cstate, dm_digtable->cursta_cstate); if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate || dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT || dm_digtable->cursta_cstate == DIG_STA_CONNECT) { if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) { dm_digtable->rssi_val_min = rtl_init_gain_min_pwdb(hw); rtl92c_dm_ctrl_initgain_by_rssi(hw); } } else { dm_digtable->rssi_val_min = 0; dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT; dm_digtable->cur_igvalue = 0x20; dm_digtable->pre_igvalue = 0; rtl8723ae_dm_write_dig(hw); } } static void rtl8723ae_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) { dm_digtable->rssi_val_min = rtl_init_gain_min_pwdb(hw); if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { if (dm_digtable->rssi_val_min <= 25) dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LowRssi; else dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_HighRssi; } else { if (dm_digtable->rssi_val_min <= 20) dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LowRssi; else dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_HighRssi; } } else { dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX; } if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) { if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) { if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800) dm_digtable->cur_cck_fa_state = CCK_FA_STAGE_High; else dm_digtable->cur_cck_fa_state = CCK_FA_STAGE_Low; if (dm_digtable->pre_cck_fa_state != dm_digtable->cur_cck_fa_state) { if (dm_digtable->cur_cck_fa_state == CCK_FA_STAGE_Low) rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83); else rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); dm_digtable->pre_cck_fa_state = dm_digtable->cur_cck_fa_state; } rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40); } else { rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47); } dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state; } RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n", dm_digtable->cur_cck_pd_state); } static void rtl8723ae_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; if (mac->act_scanning == true) return; if (mac->link_state >= MAC80211_LINKED) dm_digtable->cursta_cstate = DIG_STA_CONNECT; else dm_digtable->cursta_cstate = DIG_STA_DISCONNECT; rtl8723ae_dm_initial_gain_sta(hw); rtl8723ae_dm_initial_gain_multi_sta(hw); rtl8723ae_dm_cck_packet_detection_thresh(hw); dm_digtable->presta_cstate = dm_digtable->cursta_cstate; } static void rtl8723ae_dm_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; if (rtlpriv->dm.dm_initialgain_enable == false) return; if (dm_digtable->dig_enable_flag == false) return; rtl8723ae_dm_ctrl_initgain_by_twoport(hw); } static void rtl8723ae_dm_init_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.dynamic_txpower_enable = false; rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; } static void rtl8723ae_dm_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long undec_sm_pwdb; if (!rtlpriv->dm.dynamic_txpower_enable) return; if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if ((mac->link_state < MAC80211_LINKED) && (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, "Not connected\n"); rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if (mac->link_state >= MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_ADHOC) { undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Client PWDB = 0x%lx\n", undec_sm_pwdb); } else { undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "STA Default Port PWDB = 0x%lx\n", undec_sm_pwdb); } } else { undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Ext Port PWDB = 0x%lx\n", undec_sm_pwdb); } if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"); } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"); } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_NORMAL\n"); } if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "PHY_SetTxPowerLevel8192S() Channel = %d\n", rtlphy->current_channel); rtl8723ae_phy_set_txpower_level(hw, rtlphy->current_channel); } rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; } void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "cur_igvalue = 0x%x, " "pre_igvalue = 0x%x, back_val = %d\n", dm_digtable->cur_igvalue, dm_digtable->pre_igvalue, dm_digtable->back_val); if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) { rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, dm_digtable->cur_igvalue); rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, dm_digtable->cur_igvalue); dm_digtable->pre_igvalue = dm_digtable->cur_igvalue; } } static void rtl8723ae_dm_pwdmonitor(struct ieee80211_hw *hw) { } void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.current_turbo_edca = false; rtlpriv->dm.is_any_nonbepkts = false; rtlpriv->dm.is_cur_rdlstate = false; } static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u64 cur_txok_cnt = 0; u64 cur_rxok_cnt = 0; u32 edca_be_ul = 0x5ea42b; u32 edca_be_dl = 0x5ea42b; bool bt_change_edca = false; if ((mac->last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) || (mac->last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) { rtlpriv->dm.current_turbo_edca = false; mac->last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul; mac->last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl; } if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) { edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul; bt_change_edca = true; } if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) { edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl; bt_change_edca = true; } if (mac->link_state != MAC80211_LINKED) { rtlpriv->dm.current_turbo_edca = false; return; } if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) { if (!(edca_be_ul & 0xffff0000)) edca_be_ul |= 0x005e0000; if (!(edca_be_dl & 0xffff0000)) edca_be_dl |= 0x005e0000; } if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) && (!rtlpriv->dm.disable_framebursting))) { cur_txok_cnt = rtlpriv->stats.txbytesunicast - mac->last_txok_cnt; cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - mac->last_rxok_cnt; if (cur_rxok_cnt > 4 * cur_txok_cnt) { if (!rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be_dl); rtlpriv->dm.is_cur_rdlstate = true; } } else { if (rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be_ul); rtlpriv->dm.is_cur_rdlstate = false; } } rtlpriv->dm.current_turbo_edca = true; } else { if (rtlpriv->dm.current_turbo_edca) { u8 tmp = AC0_BE; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, (u8 *) (&tmp)); rtlpriv->dm.current_turbo_edca = false; } } rtlpriv->dm.is_any_nonbepkts = false; mac->last_txok_cnt = rtlpriv->stats.txbytesunicast; mac->last_rxok_cnt = rtlpriv->stats.rxbytesunicast; } static void rtl8723ae_dm_initialize_txpower_tracking(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.txpower_tracking = true; rtlpriv->dm.txpower_trackinginit = false; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "pMgntInfo->txpower_tracking = %d\n", rtlpriv->dm.txpower_tracking); } void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rate_adaptive *p_ra = &(rtlpriv->ra); p_ra->ratr_state = DM_RATR_STA_INIT; p_ra->pre_ratr_state = DM_RATR_STA_INIT; if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) rtlpriv->dm.useramask = true; else rtlpriv->dm.useramask = false; } static void rtl8723ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rate_adaptive *p_ra = &(rtlpriv->ra); u32 low_rssithresh_for_ra, high_rssithresh_for_ra; struct ieee80211_sta *sta = NULL; if (is_hal_stop(rtlhal)) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, " driver is going to unload\n"); return; } if (!rtlpriv->dm.useramask) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, " driver does not control rate adaptive mask\n"); return; } if (mac->link_state == MAC80211_LINKED && mac->opmode == NL80211_IFTYPE_STATION) { switch (p_ra->pre_ratr_state) { case DM_RATR_STA_HIGH: high_rssithresh_for_ra = 50; low_rssithresh_for_ra = 20; break; case DM_RATR_STA_MIDDLE: high_rssithresh_for_ra = 55; low_rssithresh_for_ra = 20; break; case DM_RATR_STA_LOW: high_rssithresh_for_ra = 50; low_rssithresh_for_ra = 25; break; default: high_rssithresh_for_ra = 50; low_rssithresh_for_ra = 20; break; } if (rtlpriv->dm.undec_sm_pwdb > high_rssithresh_for_ra) p_ra->ratr_state = DM_RATR_STA_HIGH; else if (rtlpriv->dm.undec_sm_pwdb > low_rssithresh_for_ra) p_ra->ratr_state = DM_RATR_STA_MIDDLE; else p_ra->ratr_state = DM_RATR_STA_LOW; if (p_ra->pre_ratr_state != p_ra->ratr_state) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI = %ld\n", rtlpriv->dm.undec_sm_pwdb); RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI_LEVEL = %d\n", p_ra->ratr_state); RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "PreState = %d, CurState = %d\n", p_ra->pre_ratr_state, p_ra->ratr_state); rcu_read_lock(); sta = rtl_find_sta(hw, mac->bssid); if (sta) rtlpriv->cfg->ops->update_rate_tbl(hw, sta, p_ra->ratr_state); rcu_read_unlock(); p_ra->pre_ratr_state = p_ra->ratr_state; } } } static void rtl8723ae_dm_init_dynamic_bpowersaving(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm_pstable.pre_ccastate = CCA_MAX; rtlpriv->dm_pstable.cur_ccasate = CCA_MAX; rtlpriv->dm_pstable.pre_rfstate = RF_MAX; rtlpriv->dm_pstable.cur_rfstate = RF_MAX; rtlpriv->dm_pstable.rssi_val_min = 0; } void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 force_in_normal) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct ps_t *dm_pstable = &rtlpriv->dm_pstable; if (!rtlpriv->reg_init) { rtlpriv->reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD) & 0x1CC000) >> 14; rtlpriv->reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1, MASKDWORD) & BIT(3)) >> 3; rtlpriv->reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, MASKDWORD) & 0xFF000000) >> 24; rtlpriv->reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12; rtlpriv->reg_init = true; } if (!force_in_normal) { if (dm_pstable->rssi_val_min != 0) { if (dm_pstable->pre_rfstate == RF_NORMAL) { if (dm_pstable->rssi_val_min >= 30) dm_pstable->cur_rfstate = RF_SAVE; else dm_pstable->cur_rfstate = RF_NORMAL; } else { if (dm_pstable->rssi_val_min <= 25) dm_pstable->cur_rfstate = RF_NORMAL; else dm_pstable->cur_rfstate = RF_SAVE; } } else { dm_pstable->cur_rfstate = RF_MAX; } } else { dm_pstable->cur_rfstate = RF_NORMAL; } if (dm_pstable->pre_rfstate != dm_pstable->cur_rfstate) { if (dm_pstable->cur_rfstate == RF_SAVE) { rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BIT(5), 0x1); rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, 0x1C0000, 0x2); rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0); rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000, 0x63); rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, 0xC000, 0x2); rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3); rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); rtl_set_bbreg(hw, 0x818, BIT(28), 0x1); } else { rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, 0x1CC000, rtlpriv->reg_874); rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), rtlpriv->reg_c70); rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000, rtlpriv->reg_85c); rtl_set_bbreg(hw, 0xa74, 0xF000, rtlpriv->reg_a74); rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BIT(5), 0x0); } dm_pstable->pre_rfstate = dm_pstable->cur_rfstate; } } static void rtl8723ae_dm_dynamic_bpowersaving(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct ps_t *dm_pstable = &rtlpriv->dm_pstable; if (((mac->link_state == MAC80211_NOLINK)) && (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { dm_pstable->rssi_val_min = 0; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n"); } if (mac->link_state == MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_ADHOC) { dm_pstable->rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "AP Client PWDB = 0x%lx\n", dm_pstable->rssi_val_min); } else { dm_pstable->rssi_val_min = rtlpriv->dm.undec_sm_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "STA Default Port PWDB = 0x%lx\n", dm_pstable->rssi_val_min); } } else { dm_pstable->rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "AP Ext Port PWDB = 0x%lx\n", dm_pstable->rssi_val_min); } rtl8723ae_dm_rf_saving(hw, false); } void rtl8723ae_dm_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; rtl8723ae_dm_diginit(hw); rtl8723ae_dm_init_dynamic_txpower(hw); rtl8723ae_dm_init_edca_turbo(hw); rtl8723ae_dm_init_rate_adaptive_mask(hw); rtl8723ae_dm_initialize_txpower_tracking(hw); rtl8723ae_dm_init_dynamic_bpowersaving(hw); } void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); bool fw_current_inpsmode = false; bool fw_ps_awake = true; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, (u8 *) (&fw_current_inpsmode)); rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, (u8 *) (&fw_ps_awake)); if (ppsc->p2p_ps_info.p2p_ps_mode) fw_ps_awake = false; if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && fw_ps_awake) && (!ppsc->rfchange_inprogress)) { rtl8723ae_dm_pwdmonitor(hw); rtl8723ae_dm_dig(hw); rtl8723ae_dm_false_alarm_counter_statistics(hw); rtl8723ae_dm_dynamic_bpowersaving(hw); rtl8723ae_dm_dynamic_txpower(hw); rtl8723ae_dm_refresh_rate_adaptive_mask(hw); rtl8723ae_dm_bt_coexist(hw); rtl8723ae_dm_check_edca_turbo(hw); } if (rtlpcipriv->bt_coexist.init_set) rtl_write_byte(rtlpriv, 0x76e, 0xc); } static void rtl8723ae_dm_init_bt_coexist(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); rtlpcipriv->bt_coexist.bt_rfreg_origin_1e = rtl_get_rfreg(hw, (enum radio_path)0, RF_RCK1, 0xfffff); rtlpcipriv->bt_coexist.bt_rfreg_origin_1f = rtl_get_rfreg(hw, (enum radio_path)0, RF_RCK2, 0xf0); rtlpcipriv->bt_coexist.cstate = 0; rtlpcipriv->bt_coexist.previous_state = 0; rtlpcipriv->bt_coexist.cstate_h = 0; rtlpcipriv->bt_coexist.previous_state_h = 0; rtlpcipriv->bt_coexist.lps_counter = 0; /* Enable counter statistics */ rtl_write_byte(rtlpriv, 0x76e, 0x4); rtl_write_byte(rtlpriv, 0x778, 0x3); rtl_write_byte(rtlpriv, 0x40, 0x20); rtlpcipriv->bt_coexist.init_set = true; } void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); u8 tmp_byte = 0; if (!rtlpcipriv->bt_coexist.bt_coexistence) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[DM]{BT], BT not exist!!\n"); return; } if (!rtlpcipriv->bt_coexist.init_set) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[DM][BT], rtl8723ae_dm_bt_coexist()\n"); rtl8723ae_dm_init_bt_coexist(hw); } tmp_byte = rtl_read_byte(rtlpriv, 0x40); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[DM][BT], 0x40 is 0x%x", tmp_byte); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[DM][BT], bt_dm_coexist start"); rtl8723ae_dm_bt_coexist_8723(hw); }
gpl-2.0
rutvik95/android_kernel_frostbite
drivers/watchdog/imx2_wdt.c
2566
9347
/* * Watchdog driver for IMX2 and later processors * * Copyright (C) 2010 Wolfram Sang, Pengutronix e.K. <w.sang@pengutronix.de> * * some parts adapted by similar drivers from Darius Augulis and Vladimir * Zapolskiy, additional improvements by Wim Van Sebroeck. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * NOTE: MX1 has a slightly different Watchdog than MX2 and later: * * MX1: MX2+: * ---- ----- * Registers: 32-bit 16-bit * Stopable timer: Yes No * Need to enable clk: No Yes * Halt on suspend: Manual Can be automatic */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/watchdog.h> #include <linux/clk.h> #include <linux/fs.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <mach/hardware.h> #define DRIVER_NAME "imx2-wdt" #define IMX2_WDT_WCR 0x00 /* Control Register */ #define IMX2_WDT_WCR_WT (0xFF << 8) /* -> Watchdog Timeout Field */ #define IMX2_WDT_WCR_WRE (1 << 3) /* -> WDOG Reset Enable */ #define IMX2_WDT_WCR_WDE (1 << 2) /* -> Watchdog Enable */ #define IMX2_WDT_WSR 0x02 /* Service Register */ #define IMX2_WDT_SEQ1 0x5555 /* -> service sequence 1 */ #define IMX2_WDT_SEQ2 0xAAAA /* -> service sequence 2 */ #define IMX2_WDT_MAX_TIME 128 #define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */ #define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8) #define IMX2_WDT_STATUS_OPEN 0 #define IMX2_WDT_STATUS_STARTED 1 #define IMX2_WDT_EXPECT_CLOSE 2 static struct { struct clk *clk; void __iomem *base; unsigned timeout; unsigned long status; struct timer_list timer; /* Pings the watchdog when closed */ } imx2_wdt; static struct miscdevice imx2_wdt_miscdev; static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static unsigned timeout = IMX2_WDT_DEFAULT_TIME; module_param(timeout, uint, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default=" __MODULE_STRING(IMX2_WDT_DEFAULT_TIME) ")"); static const struct watchdog_info imx2_wdt_info = { .identity = "imx2+ watchdog", .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, }; static inline void imx2_wdt_setup(void) { u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR); /* Strip the old watchdog Time-Out value */ val &= ~IMX2_WDT_WCR_WT; /* Generate reset if WDOG times out */ val &= ~IMX2_WDT_WCR_WRE; /* Keep Watchdog Disabled */ val &= ~IMX2_WDT_WCR_WDE; /* Set the watchdog's Time-Out value */ val |= WDOG_SEC_TO_COUNT(imx2_wdt.timeout); __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR); /* enable the watchdog */ val |= IMX2_WDT_WCR_WDE; __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR); } static inline void imx2_wdt_ping(void) { __raw_writew(IMX2_WDT_SEQ1, imx2_wdt.base + IMX2_WDT_WSR); __raw_writew(IMX2_WDT_SEQ2, imx2_wdt.base + IMX2_WDT_WSR); } static void imx2_wdt_timer_ping(unsigned long arg) { /* ping it every imx2_wdt.timeout / 2 seconds to prevent reboot */ imx2_wdt_ping(); mod_timer(&imx2_wdt.timer, jiffies + imx2_wdt.timeout * HZ / 2); } static void imx2_wdt_start(void) { if (!test_and_set_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) { /* at our first start we enable clock and do initialisations */ clk_enable(imx2_wdt.clk); imx2_wdt_setup(); } else /* delete the timer that pings the watchdog after close */ del_timer_sync(&imx2_wdt.timer); /* Watchdog is enabled - time to reload the timeout value */ imx2_wdt_ping(); } static void imx2_wdt_stop(void) { /* we don't need a clk_disable, it cannot be disabled once started. * We use a timer to ping the watchdog while /dev/watchdog is closed */ imx2_wdt_timer_ping(0); } static void imx2_wdt_set_timeout(int new_timeout) { u16 val = __raw_readw(imx2_wdt.base + IMX2_WDT_WCR); /* set the new timeout value in the WSR */ val &= ~IMX2_WDT_WCR_WT; val |= WDOG_SEC_TO_COUNT(new_timeout); __raw_writew(val, imx2_wdt.base + IMX2_WDT_WCR); } static int imx2_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(IMX2_WDT_STATUS_OPEN, &imx2_wdt.status)) return -EBUSY; imx2_wdt_start(); return nonseekable_open(inode, file); } static int imx2_wdt_close(struct inode *inode, struct file *file) { if (test_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status) && !nowayout) imx2_wdt_stop(); else { dev_crit(imx2_wdt_miscdev.parent, "Unexpected close: Expect reboot!\n"); imx2_wdt_ping(); } clear_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status); clear_bit(IMX2_WDT_STATUS_OPEN, &imx2_wdt.status); return 0; } static long imx2_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; int new_value; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &imx2_wdt_info, sizeof(struct watchdog_info)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_KEEPALIVE: imx2_wdt_ping(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_value, p)) return -EFAULT; if ((new_value < 1) || (new_value > IMX2_WDT_MAX_TIME)) return -EINVAL; imx2_wdt_set_timeout(new_value); imx2_wdt.timeout = new_value; imx2_wdt_ping(); /* Fallthrough to return current value */ case WDIOC_GETTIMEOUT: return put_user(imx2_wdt.timeout, p); default: return -ENOTTY; } } static ssize_t imx2_wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { size_t i; char c; if (len == 0) /* Can we see this even ? */ return 0; clear_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status); /* scan to see whether or not we got the magic character */ for (i = 0; i != len; i++) { if (get_user(c, data + i)) return -EFAULT; if (c == 'V') set_bit(IMX2_WDT_EXPECT_CLOSE, &imx2_wdt.status); } imx2_wdt_ping(); return len; } static const struct file_operations imx2_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = imx2_wdt_ioctl, .open = imx2_wdt_open, .release = imx2_wdt_close, .write = imx2_wdt_write, }; static struct miscdevice imx2_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &imx2_wdt_fops, }; static int __init imx2_wdt_probe(struct platform_device *pdev) { int ret; int res_size; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "can't get device resources\n"); return -ENODEV; } res_size = resource_size(res); if (!devm_request_mem_region(&pdev->dev, res->start, res_size, res->name)) { dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n", res_size, res->start); return -ENOMEM; } imx2_wdt.base = devm_ioremap_nocache(&pdev->dev, res->start, res_size); if (!imx2_wdt.base) { dev_err(&pdev->dev, "ioremap failed\n"); return -ENOMEM; } imx2_wdt.clk = clk_get(&pdev->dev, NULL); if (IS_ERR(imx2_wdt.clk)) { dev_err(&pdev->dev, "can't get Watchdog clock\n"); return PTR_ERR(imx2_wdt.clk); } imx2_wdt.timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME); if (imx2_wdt.timeout != timeout) dev_warn(&pdev->dev, "Initial timeout out of range! " "Clamped from %u to %u\n", timeout, imx2_wdt.timeout); setup_timer(&imx2_wdt.timer, imx2_wdt_timer_ping, 0); imx2_wdt_miscdev.parent = &pdev->dev; ret = misc_register(&imx2_wdt_miscdev); if (ret) goto fail; dev_info(&pdev->dev, "IMX2+ Watchdog Timer enabled. timeout=%ds (nowayout=%d)\n", imx2_wdt.timeout, nowayout); return 0; fail: imx2_wdt_miscdev.parent = NULL; clk_put(imx2_wdt.clk); return ret; } static int __exit imx2_wdt_remove(struct platform_device *pdev) { misc_deregister(&imx2_wdt_miscdev); if (test_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) { del_timer_sync(&imx2_wdt.timer); dev_crit(imx2_wdt_miscdev.parent, "Device removed: Expect reboot!\n"); } else clk_put(imx2_wdt.clk); imx2_wdt_miscdev.parent = NULL; return 0; } static void imx2_wdt_shutdown(struct platform_device *pdev) { if (test_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) { /* we are running, we need to delete the timer but will give * max timeout before reboot will take place */ del_timer_sync(&imx2_wdt.timer); imx2_wdt_set_timeout(IMX2_WDT_MAX_TIME); imx2_wdt_ping(); dev_crit(imx2_wdt_miscdev.parent, "Device shutdown: Expect reboot!\n"); } } static struct platform_driver imx2_wdt_driver = { .remove = __exit_p(imx2_wdt_remove), .shutdown = imx2_wdt_shutdown, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; static int __init imx2_wdt_init(void) { return platform_driver_probe(&imx2_wdt_driver, imx2_wdt_probe); } module_init(imx2_wdt_init); static void __exit imx2_wdt_exit(void) { platform_driver_unregister(&imx2_wdt_driver); } module_exit(imx2_wdt_exit); MODULE_AUTHOR("Wolfram Sang"); MODULE_DESCRIPTION("Watchdog driver for IMX2 and later"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
CyanogenMod/android_kernel_cyanogen_msm8994
fs/hpfs/dnode.c
2822
31194
/* * linux/fs/hpfs/dnode.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * handling directory dnode tree - adding, deleteing & searching for dirents */ #include "hpfs_fn.h" static loff_t get_pos(struct dnode *d, struct hpfs_dirent *fde) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); int i = 1; for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { if (de == fde) return ((loff_t) le32_to_cpu(d->self) << 4) | (loff_t)i; i++; } printk("HPFS: get_pos: not_found\n"); return ((loff_t)le32_to_cpu(d->self) << 4) | (loff_t)1; } void hpfs_add_pos(struct inode *inode, loff_t *pos) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); int i = 0; loff_t **ppos; if (hpfs_inode->i_rddir_off) for (; hpfs_inode->i_rddir_off[i]; i++) if (hpfs_inode->i_rddir_off[i] == pos) return; if (!(i&0x0f)) { if (!(ppos = kmalloc((i+0x11) * sizeof(loff_t*), GFP_NOFS))) { printk("HPFS: out of memory for position list\n"); return; } if (hpfs_inode->i_rddir_off) { memcpy(ppos, hpfs_inode->i_rddir_off, i * sizeof(loff_t)); kfree(hpfs_inode->i_rddir_off); } hpfs_inode->i_rddir_off = ppos; } hpfs_inode->i_rddir_off[i] = pos; hpfs_inode->i_rddir_off[i + 1] = NULL; } void hpfs_del_pos(struct inode *inode, loff_t *pos) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); loff_t **i, **j; if (!hpfs_inode->i_rddir_off) goto not_f; for (i = hpfs_inode->i_rddir_off; *i; i++) if (*i == pos) goto fnd; goto not_f; fnd: for (j = i + 1; *j; j++) ; *i = *(j - 1); *(j - 1) = NULL; if (j - 1 == hpfs_inode->i_rddir_off) { kfree(hpfs_inode->i_rddir_off); hpfs_inode->i_rddir_off = NULL; } return; not_f: /*printk("HPFS: warning: position pointer %p->%08x not found\n", pos, (int)*pos);*/ return; } static void for_all_poss(struct inode *inode, void (*f)(loff_t *, loff_t, loff_t), loff_t p1, loff_t p2) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); loff_t **i; if (!hpfs_inode->i_rddir_off) return; for (i = hpfs_inode->i_rddir_off; *i; i++) (*f)(*i, p1, p2); return; } static void hpfs_pos_subst(loff_t *p, loff_t f, loff_t t) { if (*p == f) *p = t; } /*void hpfs_hpfs_pos_substd(loff_t *p, loff_t f, loff_t t) { if ((*p & ~0x3f) == (f & ~0x3f)) *p = (t & ~0x3f) | (*p & 0x3f); }*/ static void hpfs_pos_ins(loff_t *p, loff_t d, loff_t c) { if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) { int n = (*p & 0x3f) + c; if (n > 0x3f) printk("HPFS: hpfs_pos_ins: %08x + %d\n", (int)*p, (int)c >> 8); else *p = (*p & ~0x3f) | n; } } static void hpfs_pos_del(loff_t *p, loff_t d, loff_t c) { if ((*p & ~0x3f) == (d & ~0x3f) && (*p & 0x3f) >= (d & 0x3f)) { int n = (*p & 0x3f) - c; if (n < 1) printk("HPFS: hpfs_pos_ins: %08x - %d\n", (int)*p, (int)c >> 8); else *p = (*p & ~0x3f) | n; } } static struct hpfs_dirent *dnode_pre_last_de(struct dnode *d) { struct hpfs_dirent *de, *de_end, *dee = NULL, *deee = NULL; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { deee = dee; dee = de; } return deee; } static struct hpfs_dirent *dnode_last_de(struct dnode *d) { struct hpfs_dirent *de, *de_end, *dee = NULL; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { dee = de; } return dee; } static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno ptr) { struct hpfs_dirent *de; if (!(de = dnode_last_de(d))) { hpfs_error(s, "set_last_pointer: empty dnode %08x", le32_to_cpu(d->self)); return; } if (hpfs_sb(s)->sb_chk) { if (de->down) { hpfs_error(s, "set_last_pointer: dnode %08x has already last pointer %08x", le32_to_cpu(d->self), de_down_pointer(de)); return; } if (le16_to_cpu(de->length) != 32) { hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", le32_to_cpu(d->self)); return; } } if (ptr) { le32_add_cpu(&d->first_free, 4); if (le32_to_cpu(d->first_free) > 2048) { hpfs_error(s, "set_last_pointer: too long dnode %08x", le32_to_cpu(d->self)); le32_add_cpu(&d->first_free, -4); return; } de->length = cpu_to_le16(36); de->down = 1; *(__le32 *)((char *)de + 32) = cpu_to_le32(ptr); } } /* Add an entry to dnode and don't care if it grows over 2048 bytes */ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, const unsigned char *name, unsigned namelen, secno down_ptr) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); unsigned d_size = de_size(namelen, down_ptr); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { int c = hpfs_compare_names(s, name, namelen, de->name, de->namelen, de->last); if (!c) { hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, le32_to_cpu(d->self)); return NULL; } if (c < 0) break; } memmove((char *)de + d_size, de, (char *)de_end - (char *)de); memset(de, 0, d_size); if (down_ptr) { *(__le32 *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr); de->down = 1; } de->length = cpu_to_le16(d_size); de->not_8x3 = hpfs_is_name_long(name, namelen); de->namelen = namelen; memcpy(de->name, name, namelen); le32_add_cpu(&d->first_free, d_size); return de; } /* Delete dirent and don't care about its subtree */ static void hpfs_delete_de(struct super_block *s, struct dnode *d, struct hpfs_dirent *de) { if (de->last) { hpfs_error(s, "attempt to delete last dirent in dnode %08x", le32_to_cpu(d->self)); return; } d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - le16_to_cpu(de->length)); memmove(de, de_next_de(de), le32_to_cpu(d->first_free) + (char *)d - (char *)de); } static void fix_up_ptrs(struct super_block *s, struct dnode *d) { struct hpfs_dirent *de; struct hpfs_dirent *de_end = dnode_end_de(d); dnode_secno dno = le32_to_cpu(d->self); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) if (de->down) { struct quad_buffer_head qbh; struct dnode *dd; if ((dd = hpfs_map_dnode(s, de_down_pointer(de), &qbh))) { if (le32_to_cpu(dd->up) != dno || dd->root_dnode) { dd->up = cpu_to_le32(dno); dd->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); } hpfs_brelse4(&qbh); } } } /* Add an entry to dnode and do dnode splitting if required */ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, const unsigned char *name, unsigned namelen, struct hpfs_dirent *new_de, dnode_secno down_ptr) { struct quad_buffer_head qbh, qbh1, qbh2; struct dnode *d, *ad, *rd, *nd = NULL; dnode_secno adno, rdno; struct hpfs_dirent *de; struct hpfs_dirent nde; unsigned char *nname; int h; int pos; struct buffer_head *bh; struct fnode *fnode; int c1, c2 = 0; if (!(nname = kmalloc(256, GFP_NOFS))) { printk("HPFS: out of memory, can't add to dnode\n"); return 1; } go_up: if (namelen >= 256) { hpfs_error(i->i_sb, "hpfs_add_to_dnode: namelen == %d", namelen); kfree(nd); kfree(nname); return 1; } if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) { kfree(nd); kfree(nname); return 1; } go_up_a: if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_to_dnode")) { hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 1; } if (le32_to_cpu(d->first_free) + de_size(namelen, down_ptr) <= 2048) { loff_t t; copy_de(de=hpfs_add_de(i->i_sb, d, name, namelen, down_ptr), new_de); t = get_pos(d, de); for_all_poss(i, hpfs_pos_ins, t, 1); for_all_poss(i, hpfs_pos_subst, 4, t); for_all_poss(i, hpfs_pos_subst, 5, t + 1); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 0; } if (!nd) if (!(nd = kmalloc(0x924, GFP_NOFS))) { /* 0x924 is a max size of dnode after adding a dirent with max name length. We alloc this only once. There must not be any error while splitting dnodes, otherwise the whole directory, not only file we're adding, would be lost. */ printk("HPFS: out of memory for dnode splitting\n"); hpfs_brelse4(&qbh); kfree(nname); return 1; } memcpy(nd, d, le32_to_cpu(d->first_free)); copy_de(de = hpfs_add_de(i->i_sb, nd, name, namelen, down_ptr), new_de); for_all_poss(i, hpfs_pos_ins, get_pos(nd, de), 1); h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10; if (!(ad = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &adno, &qbh1))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); kfree(nd); kfree(nname); return 1; } i->i_size += 2048; i->i_blocks += 4; pos = 1; for (de = dnode_first_de(nd); (char *)de_next_de(de) - (char *)nd < h; de = de_next_de(de)) { copy_de(hpfs_add_de(i->i_sb, ad, de->name, de->namelen, de->down ? de_down_pointer(de) : 0), de); for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, ((loff_t)adno << 4) | pos); pos++; } copy_de(new_de = &nde, de); memcpy(nname, de->name, de->namelen); name = nname; namelen = de->namelen; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, 4); down_ptr = adno; set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0); de = de_next_de(de); memmove((char *)nd + 20, de, le32_to_cpu(nd->first_free) + (char *)nd - (char *)de); le32_add_cpu(&nd->first_free, -((char *)de - (char *)nd - 20)); memcpy(d, nd, le32_to_cpu(nd->first_free)); for_all_poss(i, hpfs_pos_del, (loff_t)dno << 4, pos); fix_up_ptrs(i->i_sb, ad); if (!d->root_dnode) { ad->up = d->up; dno = le32_to_cpu(ad->up); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); goto go_up; } if (!(rd = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &rdno, &qbh2))) { hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted"); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); kfree(nd); kfree(nname); return 1; } i->i_size += 2048; i->i_blocks += 4; rd->root_dnode = 1; rd->up = d->up; if (!(fnode = hpfs_map_fnode(i->i_sb, le32_to_cpu(d->up), &bh))) { hpfs_free_dnode(i->i_sb, rdno); hpfs_brelse4(&qbh); hpfs_brelse4(&qbh1); hpfs_brelse4(&qbh2); kfree(nd); kfree(nname); return 1; } fnode->u.external[0].disk_secno = cpu_to_le32(rdno); mark_buffer_dirty(bh); brelse(bh); hpfs_i(i)->i_dno = rdno; d->up = ad->up = cpu_to_le32(rdno); d->root_dnode = ad->root_dnode = 0; hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); qbh = qbh2; set_last_pointer(i->i_sb, rd, dno); dno = rdno; d = rd; goto go_up_a; } /* * Add an entry to directory btree. * I hate such crazy directory structure. * It's easy to read but terrible to write. * I wrote this directory code 4 times. * I hope, now it's finally bug-free. */ int hpfs_add_dirent(struct inode *i, const unsigned char *name, unsigned namelen, struct hpfs_dirent *new_de) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct dnode *d; struct hpfs_dirent *de, *de_end; struct quad_buffer_head qbh; dnode_secno dno; int c; int c1, c2 = 0; dno = hpfs_inode->i_dno; down: if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "hpfs_add_dirent")) return 1; if (!(d = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 1; de_end = dnode_end_de(d); for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) { if (!(c = hpfs_compare_names(i->i_sb, name, namelen, de->name, de->namelen, de->last))) { hpfs_brelse4(&qbh); return -1; } if (c < 0) { if (de->down) { dno = de_down_pointer(de); hpfs_brelse4(&qbh); goto down; } break; } } hpfs_brelse4(&qbh); if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_ADD)) { c = 1; goto ret; } i->i_version++; c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0); ret: return c; } /* * Find dirent with higher name in 'from' subtree and move it to 'to' dnode. * Return the dnode we moved from (to be checked later if it's empty) */ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) { dnode_secno dno, ddno; dnode_secno chk_up = to; struct dnode *dnode; struct quad_buffer_head qbh; struct hpfs_dirent *de, *nde; int a; loff_t t; int c1, c2 = 0; dno = from; while (1) { if (hpfs_sb(i->i_sb)->sb_chk) if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "move_to_top")) return 0; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 0; if (hpfs_sb(i->i_sb)->sb_chk) { if (le32_to_cpu(dnode->up) != chk_up) { hpfs_error(i->i_sb, "move_to_top: up pointer from %08x should be %08x, is %08x", dno, chk_up, le32_to_cpu(dnode->up)); hpfs_brelse4(&qbh); return 0; } chk_up = dno; } if (!(de = dnode_last_de(dnode))) { hpfs_error(i->i_sb, "move_to_top: dnode %08x has no last de", dno); hpfs_brelse4(&qbh); return 0; } if (!de->down) break; dno = de_down_pointer(de); hpfs_brelse4(&qbh); } while (!(de = dnode_pre_last_de(dnode))) { dnode_secno up = le32_to_cpu(dnode->up); hpfs_brelse4(&qbh); hpfs_free_dnode(i->i_sb, dno); i->i_size -= 2048; i->i_blocks -= 4; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, 5); if (up == to) return to; if (!(dnode = hpfs_map_dnode(i->i_sb, up, &qbh))) return 0; if (dnode->root_dnode) { hpfs_error(i->i_sb, "move_to_top: got to root_dnode while moving from %08x to %08x", from, to); hpfs_brelse4(&qbh); return 0; } de = dnode_last_de(dnode); if (!de || !de->down) { hpfs_error(i->i_sb, "move_to_top: dnode %08x doesn't point down to %08x", up, dno); hpfs_brelse4(&qbh); return 0; } le32_add_cpu(&dnode->first_free, -4); le16_add_cpu(&de->length, -4); de->down = 0; hpfs_mark_4buffers_dirty(&qbh); dno = up; } t = get_pos(dnode, de); for_all_poss(i, hpfs_pos_subst, t, 4); for_all_poss(i, hpfs_pos_subst, t + 1, 5); if (!(nde = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { hpfs_error(i->i_sb, "out of memory for dirent - directory will be corrupted"); hpfs_brelse4(&qbh); return 0; } memcpy(nde, de, le16_to_cpu(de->length)); ddno = de->down ? de_down_pointer(de) : 0; hpfs_delete_de(i->i_sb, dnode, de); set_last_pointer(i->i_sb, dnode, ddno); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); a = hpfs_add_to_dnode(i, to, nde->name, nde->namelen, nde, from); kfree(nde); if (a) return 0; return dno; } /* * Check if a dnode is empty and delete it from the tree * (chkdsk doesn't like empty dnodes) */ static void delete_empty_dnode(struct inode *i, dnode_secno dno) { struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct quad_buffer_head qbh; struct dnode *dnode; dnode_secno down, up, ndown; int p; struct hpfs_dirent *de; int c1, c2 = 0; try_it_again: if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "delete_empty_dnode")) return; if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return; if (le32_to_cpu(dnode->first_free) > 56) goto end; if (le32_to_cpu(dnode->first_free) == 52 || le32_to_cpu(dnode->first_free) == 56) { struct hpfs_dirent *de_end; int root = dnode->root_dnode; up = le32_to_cpu(dnode->up); de = dnode_first_de(dnode); down = de->down ? de_down_pointer(de) : 0; if (hpfs_sb(i->i_sb)->sb_chk) if (root && !down) { hpfs_error(i->i_sb, "delete_empty_dnode: root dnode %08x is empty", dno); goto end; } hpfs_brelse4(&qbh); hpfs_free_dnode(i->i_sb, dno); i->i_size -= 2048; i->i_blocks -= 4; if (root) { struct fnode *fnode; struct buffer_head *bh; struct dnode *d1; struct quad_buffer_head qbh1; if (hpfs_sb(i->i_sb)->sb_chk) if (up != i->i_ino) { hpfs_error(i->i_sb, "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08lx", dno, up, (unsigned long)i->i_ino); return; } if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { d1->up = cpu_to_le32(up); d1->root_dnode = 1; hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } if ((fnode = hpfs_map_fnode(i->i_sb, up, &bh))) { fnode->u.external[0].disk_secno = cpu_to_le32(down); mark_buffer_dirty(bh); brelse(bh); } hpfs_inode->i_dno = down; for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, (loff_t) 12); return; } if (!(dnode = hpfs_map_dnode(i->i_sb, up, &qbh))) return; p = 1; de_end = dnode_end_de(dnode); for (de = dnode_first_de(dnode); de < de_end; de = de_next_de(de), p++) if (de->down) if (de_down_pointer(de) == dno) goto fnd; hpfs_error(i->i_sb, "delete_empty_dnode: pointer to dnode %08x not found in dnode %08x", dno, up); goto end; fnd: for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p); if (!down) { de->down = 0; le16_add_cpu(&de->length, -4); le32_add_cpu(&dnode->first_free, -4); memmove(de_next_de(de), (char *)de_next_de(de) + 4, (char *)dnode + le32_to_cpu(dnode->first_free) - (char *)de_next_de(de)); } else { struct dnode *d1; struct quad_buffer_head qbh1; *(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4) = down; if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) { d1->up = cpu_to_le32(up); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } } } else { hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, le32_to_cpu(dnode->first_free)); goto end; } if (!de->last) { struct hpfs_dirent *de_next = de_next_de(de); struct hpfs_dirent *de_cp; struct dnode *d1; struct quad_buffer_head qbh1; if (!de_next->down) goto endm; ndown = de_down_pointer(de_next); if (!(de_cp = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) { printk("HPFS: out of memory for dtree balancing\n"); goto endm; } memcpy(de_cp, de, le16_to_cpu(de->length)); hpfs_delete_de(i->i_sb, dnode, de); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, 4); for_all_poss(i, hpfs_pos_del, ((loff_t)up << 4) | p, 1); if (de_cp->down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de_cp), &qbh1))) { d1->up = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } hpfs_add_to_dnode(i, ndown, de_cp->name, de_cp->namelen, de_cp, de_cp->down ? de_down_pointer(de_cp) : 0); /*printk("UP-TO-DNODE: %08x (ndown = %08x, down = %08x, dno = %08x)\n", up, ndown, down, dno);*/ dno = up; kfree(de_cp); goto try_it_again; } else { struct hpfs_dirent *de_prev = dnode_pre_last_de(dnode); struct hpfs_dirent *de_cp; struct dnode *d1; struct quad_buffer_head qbh1; dnode_secno dlp; if (!de_prev) { hpfs_error(i->i_sb, "delete_empty_dnode: empty dnode %08x", up); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); dno = up; goto try_it_again; } if (!de_prev->down) goto endm; ndown = de_down_pointer(de_prev); if ((d1 = hpfs_map_dnode(i->i_sb, ndown, &qbh1))) { struct hpfs_dirent *del = dnode_last_de(d1); dlp = del->down ? de_down_pointer(del) : 0; if (!dlp && down) { if (le32_to_cpu(d1->first_free) > 2044) { if (hpfs_sb(i->i_sb)->sb_chk >= 2) { printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); printk("HPFS: warning: terminating balancing operation\n"); } hpfs_brelse4(&qbh1); goto endm; } if (hpfs_sb(i->i_sb)->sb_chk >= 2) { printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n"); printk("HPFS: warning: goin'on\n"); } le16_add_cpu(&del->length, 4); del->down = 1; le32_add_cpu(&d1->first_free, 4); } if (dlp && !down) { le16_add_cpu(&del->length, -4); del->down = 0; le32_add_cpu(&d1->first_free, -4); } else if (down) *(__le32 *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down); } else goto endm; if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) { printk("HPFS: out of memory for dtree balancing\n"); hpfs_brelse4(&qbh1); goto endm; } hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); memcpy(de_cp, de_prev, le16_to_cpu(de_prev->length)); hpfs_delete_de(i->i_sb, dnode, de_prev); if (!de_prev->down) { le16_add_cpu(&de_prev->length, 4); de_prev->down = 1; le32_add_cpu(&dnode->first_free, 4); } *(__le32 *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4); for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, ((loff_t)up << 4) | (p - 1)); if (down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de), &qbh1))) { d1->up = cpu_to_le32(ndown); hpfs_mark_4buffers_dirty(&qbh1); hpfs_brelse4(&qbh1); } hpfs_add_to_dnode(i, ndown, de_cp->name, de_cp->namelen, de_cp, dlp); dno = up; kfree(de_cp); goto try_it_again; } endm: hpfs_mark_4buffers_dirty(&qbh); end: hpfs_brelse4(&qbh); } /* Delete dirent from directory */ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de, struct quad_buffer_head *qbh, int depth) { struct dnode *dnode = qbh->data; dnode_secno down = 0; loff_t t; if (de->first || de->last) { hpfs_error(i->i_sb, "hpfs_remove_dirent: attempt to delete first or last dirent in dnode %08x", dno); hpfs_brelse4(qbh); return 1; } if (de->down) down = de_down_pointer(de); if (depth && (de->down || (de == dnode_first_de(dnode) && de_next_de(de)->last))) { if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_DEL)) { hpfs_brelse4(qbh); return 2; } } i->i_version++; for_all_poss(i, hpfs_pos_del, (t = get_pos(dnode, de)) + 1, 1); hpfs_delete_de(i->i_sb, dnode, de); hpfs_mark_4buffers_dirty(qbh); hpfs_brelse4(qbh); if (down) { dnode_secno a = move_to_top(i, down, dno); for_all_poss(i, hpfs_pos_subst, 5, t); if (a) delete_empty_dnode(i, a); return !a; } delete_empty_dnode(i, dno); return 0; } void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes, int *n_subdirs, int *n_items) { struct dnode *dnode; struct quad_buffer_head qbh; struct hpfs_dirent *de; dnode_secno ptr, odno = 0; int c1, c2 = 0; int d1, d2 = 0; go_down: if (n_dnodes) (*n_dnodes)++; if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, dno, &c1, &c2, "hpfs_count_dnodes #1")) return; ptr = 0; go_up: if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && le32_to_cpu(dnode->up) != odno) hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, le32_to_cpu(dnode->up)); de = dnode_first_de(dnode); if (ptr) while(1) { if (de->down) if (de_down_pointer(de) == ptr) goto process_de; if (de->last) { hpfs_brelse4(&qbh); hpfs_error(s, "hpfs_count_dnodes: pointer to dnode %08x not found in dnode %08x, got here from %08x", ptr, dno, odno); return; } de = de_next_de(de); } next_de: if (de->down) { odno = dno; dno = de_down_pointer(de); hpfs_brelse4(&qbh); goto go_down; } process_de: if (!de->first && !de->last && de->directory && n_subdirs) (*n_subdirs)++; if (!de->first && !de->last && n_items) (*n_items)++; if ((de = de_next_de(de)) < dnode_end_de(dnode)) goto next_de; ptr = dno; dno = le32_to_cpu(dnode->up); if (dnode->root_dnode) { hpfs_brelse4(&qbh); return; } hpfs_brelse4(&qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, ptr, &d1, &d2, "hpfs_count_dnodes #2")) return; odno = -1; goto go_up; } static struct hpfs_dirent *map_nth_dirent(struct super_block *s, dnode_secno dno, int n, struct quad_buffer_head *qbh, struct dnode **dn) { int i; struct hpfs_dirent *de, *de_end; struct dnode *dnode; dnode = hpfs_map_dnode(s, dno, qbh); if (!dnode) return NULL; if (dn) *dn=dnode; de = dnode_first_de(dnode); de_end = dnode_end_de(dnode); for (i = 1; de < de_end; i++, de = de_next_de(de)) { if (i == n) { return de; } if (de->last) break; } hpfs_brelse4(qbh); hpfs_error(s, "map_nth_dirent: n too high; dnode = %08x, requested %08x", dno, n); return NULL; } dnode_secno hpfs_de_as_down_as_possible(struct super_block *s, dnode_secno dno) { struct quad_buffer_head qbh; dnode_secno d = dno; dnode_secno up = 0; struct hpfs_dirent *de; int c1, c2 = 0; again: if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, d, &c1, &c2, "hpfs_de_as_down_as_possible")) return d; if (!(de = map_nth_dirent(s, d, 1, &qbh, NULL))) return dno; if (hpfs_sb(s)->sb_chk) if (up && le32_to_cpu(((struct dnode *)qbh.data)->up) != up) hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, le32_to_cpu(((struct dnode *)qbh.data)->up)); if (!de->down) { hpfs_brelse4(&qbh); return d; } up = d; d = de_down_pointer(de); hpfs_brelse4(&qbh); goto again; } struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp, struct quad_buffer_head *qbh) { loff_t pos; unsigned c; dnode_secno dno; struct hpfs_dirent *de, *d; struct hpfs_dirent *up_de; struct hpfs_dirent *end_up_de; struct dnode *dnode; struct dnode *up_dnode; struct quad_buffer_head qbh0; pos = *posp; dno = pos >> 6 << 2; pos &= 077; if (!(de = map_nth_dirent(inode->i_sb, dno, pos, qbh, &dnode))) goto bail; /* Going to the next dirent */ if ((d = de_next_de(de)) < dnode_end_de(dnode)) { if (!(++*posp & 077)) { hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; pos = %08llx", (unsigned long long)*posp); goto bail; } /* We're going down the tree */ if (d->down) { *posp = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, de_down_pointer(d)) << 4) + 1; } return de; } /* Going up */ if (dnode->root_dnode) goto bail; if (!(up_dnode = hpfs_map_dnode(inode->i_sb, le32_to_cpu(dnode->up), &qbh0))) goto bail; end_up_de = dnode_end_de(up_dnode); c = 0; for (up_de = dnode_first_de(up_dnode); up_de < end_up_de; up_de = de_next_de(up_de)) { if (!(++c & 077)) hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", le32_to_cpu(dnode->up)); if (up_de->down && de_down_pointer(up_de) == dno) { *posp = ((loff_t) le32_to_cpu(dnode->up) << 4) + c; hpfs_brelse4(&qbh0); return de; } } hpfs_error(inode->i_sb, "map_pos_dirent: pointer to dnode %08x not found in parent dnode %08x", dno, le32_to_cpu(dnode->up)); hpfs_brelse4(&qbh0); bail: *posp = 12; return de; } /* Find a dirent in tree */ struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno, const unsigned char *name, unsigned len, dnode_secno *dd, struct quad_buffer_head *qbh) { struct dnode *dnode; struct hpfs_dirent *de; struct hpfs_dirent *de_end; int c1, c2 = 0; if (!S_ISDIR(inode->i_mode)) hpfs_error(inode->i_sb, "map_dirent: not a directory\n"); again: if (hpfs_sb(inode->i_sb)->sb_chk) if (hpfs_stop_cycles(inode->i_sb, dno, &c1, &c2, "map_dirent")) return NULL; if (!(dnode = hpfs_map_dnode(inode->i_sb, dno, qbh))) return NULL; de_end = dnode_end_de(dnode); for (de = dnode_first_de(dnode); de < de_end; de = de_next_de(de)) { int t = hpfs_compare_names(inode->i_sb, name, len, de->name, de->namelen, de->last); if (!t) { if (dd) *dd = dno; return de; } if (t < 0) { if (de->down) { dno = de_down_pointer(de); hpfs_brelse4(qbh); goto again; } break; } } hpfs_brelse4(qbh); return NULL; } /* * Remove empty directory. In normal cases it is only one dnode with two * entries, but we must handle also such obscure cases when it's a tree * of empty dnodes. */ void hpfs_remove_dtree(struct super_block *s, dnode_secno dno) { struct quad_buffer_head qbh; struct dnode *dnode; struct hpfs_dirent *de; dnode_secno d1, d2, rdno = dno; while (1) { if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return; de = dnode_first_de(dnode); if (de->last) { if (de->down) d1 = de_down_pointer(de); else goto error; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); dno = d1; } else break; } if (!de->first) goto error; d1 = de->down ? de_down_pointer(de) : 0; de = de_next_de(de); if (!de->last) goto error; d2 = de->down ? de_down_pointer(de) : 0; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); do { while (d1) { if (!(dnode = hpfs_map_dnode(s, dno = d1, &qbh))) return; de = dnode_first_de(dnode); if (!de->last) goto error; d1 = de->down ? de_down_pointer(de) : 0; hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); } d1 = d2; d2 = 0; } while (d1); return; error: hpfs_brelse4(&qbh); hpfs_free_dnode(s, dno); hpfs_error(s, "directory %08x is corrupted or not empty", rdno); } /* * Find dirent for specified fnode. Use truncated 15-char name in fnode as * a help for searching. */ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, struct fnode *f, struct quad_buffer_head *qbh) { unsigned char *name1; unsigned char *name2; int name1len, name2len; struct dnode *d; dnode_secno dno, downd; struct fnode *upf; struct buffer_head *bh; struct hpfs_dirent *de, *de_end; int c; int c1, c2 = 0; int d1, d2 = 0; name1 = f->name; if (!(name2 = kmalloc(256, GFP_NOFS))) { printk("HPFS: out of memory, can't map dirent\n"); return NULL; } if (f->len <= 15) memcpy(name2, name1, name1len = name2len = f->len); else { memcpy(name2, name1, 15); memset(name2 + 15, 0xff, 256 - 15); /*name2[15] = 0xff;*/ name1len = 15; name2len = 256; } if (!(upf = hpfs_map_fnode(s, le32_to_cpu(f->up), &bh))) { kfree(name2); return NULL; } if (!fnode_is_dir(upf)) { brelse(bh); hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up)); kfree(name2); return NULL; } dno = le32_to_cpu(upf->u.external[0].disk_secno); brelse(bh); go_down: downd = 0; go_up: if (!(d = hpfs_map_dnode(s, dno, qbh))) { kfree(name2); return NULL; } de_end = dnode_end_de(d); de = dnode_first_de(d); if (downd) { while (de < de_end) { if (de->down) if (de_down_pointer(de) == downd) goto f; de = de_next_de(de); } hpfs_error(s, "pointer to dnode %08x not found in dnode %08x", downd, dno); hpfs_brelse4(qbh); kfree(name2); return NULL; } next_de: if (le32_to_cpu(de->fnode) == fno) { kfree(name2); return de; } c = hpfs_compare_names(s, name1, name1len, de->name, de->namelen, de->last); if (c < 0 && de->down) { dno = de_down_pointer(de); hpfs_brelse4(qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, dno, &c1, &c2, "map_fnode_dirent #1")) { kfree(name2); return NULL; } goto go_down; } f: if (le32_to_cpu(de->fnode) == fno) { kfree(name2); return de; } c = hpfs_compare_names(s, name2, name2len, de->name, de->namelen, de->last); if (c < 0 && !de->last) goto not_found; if ((de = de_next_de(de)) < de_end) goto next_de; if (d->root_dnode) goto not_found; downd = dno; dno = le32_to_cpu(d->up); hpfs_brelse4(qbh); if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, downd, &d1, &d2, "map_fnode_dirent #2")) { kfree(name2); return NULL; } goto go_up; not_found: hpfs_brelse4(qbh); hpfs_error(s, "dirent for fnode %08x not found", fno); kfree(name2); return NULL; }
gpl-2.0